repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
Attrezzi/vision
[ "1aef87d01eec2c0989458387fa04baebcc86ea7b" ]
[ "torchvision/transforms/transforms.py" ]
[ "import math\nimport numbers\nimport random\nimport warnings\nfrom collections.abc import Sequence\nfrom typing import Tuple, List, Optional\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torch import Tensor\n\ntry:\n import accimage\nexcept ImportError:\n accimage = None\n\nfrom . import functional as F\n\n\n__all__ = [\"Compose\", \"ToTensor\", \"PILToTensor\", \"ConvertImageDtype\", \"ToPILImage\", \"Normalize\", \"Resize\", \"Scale\",\n \"CenterCrop\", \"Pad\", \"Lambda\", \"RandomApply\", \"RandomChoice\", \"RandomOrder\", \"RandomCrop\",\n \"RandomHorizontalFlip\", \"RandomVerticalFlip\", \"RandomResizedCrop\", \"RandomSizedCrop\", \"FiveCrop\", \"TenCrop\",\n \"LinearTransformation\", \"ColorJitter\", \"RandomRotation\", \"RandomAffine\", \"Grayscale\", \"RandomGrayscale\",\n \"RandomPerspective\", \"RandomErasing\"]\n\n_pil_interpolation_to_str = {\n Image.NEAREST: 'PIL.Image.NEAREST',\n Image.BILINEAR: 'PIL.Image.BILINEAR',\n Image.BICUBIC: 'PIL.Image.BICUBIC',\n Image.LANCZOS: 'PIL.Image.LANCZOS',\n Image.HAMMING: 'PIL.Image.HAMMING',\n Image.BOX: 'PIL.Image.BOX',\n}\n\n\nclass Compose(object):\n \"\"\"Composes several transforms together.\n\n Args:\n transforms (list of ``Transform`` objects): list of transforms to compose.\n\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img):\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass ToTensor(object):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n Converts a PIL Image or numpy.ndarray (H x W x C) in the range\n [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]\n if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)\n or if the numpy.ndarray has dtype = np.uint8\n\n In the other cases, tensors are returned without scaling.\n \"\"\"\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return F.to_tensor(pic)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass PILToTensor(object):\n \"\"\"Convert a ``PIL Image`` to a tensor of the same type.\n\n Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).\n \"\"\"\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (PIL Image): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return F.pil_to_tensor(pic)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass ConvertImageDtype(object):\n \"\"\"Convert a tensor image to the given ``dtype`` and scale the values accordingly\n\n Args:\n dtype (torch.dtype): Desired data type of the output\n\n .. note::\n\n When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.\n If converted back and forth, this mismatch has no effect.\n\n Raises:\n RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as\n well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to\n overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range\n of the integer ``dtype``.\n \"\"\"\n\n def __init__(self, dtype: torch.dtype) -> None:\n self.dtype = dtype\n\n def __call__(self, image: torch.Tensor) -> torch.Tensor:\n return F.convert_image_dtype(image, self.dtype)\n\n\nclass ToPILImage(object):\n \"\"\"Convert a tensor or an ndarray to PIL Image.\n\n Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape\n H x W x C to a PIL Image while preserving the value range.\n\n Args:\n mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).\n If ``mode`` is ``None`` (default) there are some assumptions made about the input data:\n - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.\n - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.\n - If the input has 2 channels, the ``mode`` is assumed to be ``LA``.\n - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,\n ``short``).\n\n .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes\n \"\"\"\n def __init__(self, mode=None):\n self.mode = mode\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.\n\n Returns:\n PIL Image: Image converted to PIL Image.\n\n \"\"\"\n return F.to_pil_image(pic, self.mode)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n if self.mode is not None:\n format_string += 'mode={0}'.format(self.mode)\n format_string += ')'\n return format_string\n\n\nclass Normalize(object):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``\n channels, this transform will normalize each channel of the input\n ``torch.*Tensor`` i.e.,\n ``output[channel] = (input[channel] - mean[channel]) / std[channel]``\n\n .. note::\n This transform acts out of place, i.e., it does not mutate the input tensor.\n\n Args:\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channel.\n inplace(bool,optional): Bool to make this operation in-place.\n\n \"\"\"\n\n def __init__(self, mean, std, inplace=False):\n self.mean = mean\n self.std = std\n self.inplace = inplace\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n return F.normalize(tensor, self.mean, self.std, self.inplace)\n\n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)\n\n\nclass Resize(torch.nn.Module):\n \"\"\"Resize the input image to the given size.\n The image can be a PIL Image or a torch Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n size (sequence or int): Desired output size. If size is a sequence like\n (h, w), output size will be matched to this. If size is an int,\n smaller edge of the image will be matched to this number.\n i.e, if height > width, then image will be rescaled to\n (size * height / width, size).\n In torchscript mode padding as single int is not supported, use a tuple or\n list of length 1: ``[size, ]``.\n interpolation (int, optional): Desired interpolation enum defined by `filters`_.\n Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR``\n and ``PIL.Image.BICUBIC`` are supported.\n \"\"\"\n\n def __init__(self, size, interpolation=Image.BILINEAR):\n super().__init__()\n if not isinstance(size, (int, Sequence)):\n raise TypeError(\"Size should be int or sequence. Got {}\".format(type(size)))\n if isinstance(size, Sequence) and len(size) not in (1, 2):\n raise ValueError(\"If size is a sequence, it should have 1 or 2 values\")\n self.size = size\n self.interpolation = interpolation\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be scaled.\n\n Returns:\n PIL Image or Tensor: Rescaled image.\n \"\"\"\n return F.resize(img, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = _pil_interpolation_to_str[self.interpolation]\n return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)\n\n\nclass Scale(Resize):\n \"\"\"\n Note: This transform is deprecated in favor of Resize.\n \"\"\"\n def __init__(self, *args, **kwargs):\n warnings.warn(\"The use of the transforms.Scale transform is deprecated, \" +\n \"please use transforms.Resize instead.\")\n super(Scale, self).__init__(*args, **kwargs)\n\n\nclass CenterCrop(torch.nn.Module):\n \"\"\"Crops the given image at the center.\n The image can be a PIL Image or a torch Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).\n \"\"\"\n\n def __init__(self, size):\n super().__init__()\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n elif isinstance(size, Sequence) and len(size) == 1:\n self.size = (size[0], size[0])\n else:\n if len(size) != 2:\n raise ValueError(\"Please provide only two dimensions (h, w) for size.\")\n\n self.size = size\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n PIL Image or Tensor: Cropped image.\n \"\"\"\n return F.center_crop(img, self.size)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass Pad(torch.nn.Module):\n \"\"\"Pad the given image on all sides with the given \"pad\" value.\n The image can be a PIL Image or a torch Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n padding (int or tuple or list): Padding on each border. If a single int is provided this\n is used to pad all borders. If tuple of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a tuple of length 4 is provided\n this is the padding for the left, top, right and bottom borders respectively.\n In torchscript mode padding as single int is not supported, use a tuple or\n list of length 1: ``[padding, ]``.\n fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.\n Default is constant. Mode symmetric is not yet supported for Tensor inputs.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value at the edge of the image\n\n - reflect: pads with reflection of image without repeating the last value on the edge\n\n For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image repeating the last value on the edge\n\n For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n \"\"\"\n\n def __init__(self, padding, fill=0, padding_mode=\"constant\"):\n super().__init__()\n if not isinstance(padding, (numbers.Number, tuple, list)):\n raise TypeError(\"Got inappropriate padding arg\")\n\n if not isinstance(fill, (numbers.Number, str, tuple)):\n raise TypeError(\"Got inappropriate fill arg\")\n\n if padding_mode not in [\"constant\", \"edge\", \"reflect\", \"symmetric\"]:\n raise ValueError(\"Padding mode should be either constant, edge, reflect or symmetric\")\n\n if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]:\n raise ValueError(\"Padding must be an int or a 1, 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n self.padding = padding\n self.fill = fill\n self.padding_mode = padding_mode\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be padded.\n\n Returns:\n PIL Image or Tensor: Padded image.\n \"\"\"\n return F.pad(img, self.padding, self.fill, self.padding_mode)\n\n def __repr__(self):\n return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\\\n format(self.padding, self.fill, self.padding_mode)\n\n\nclass Lambda(object):\n \"\"\"Apply a user-defined lambda as a transform.\n\n Args:\n lambd (function): Lambda/function to be used for transform.\n \"\"\"\n\n def __init__(self, lambd):\n assert callable(lambd), repr(type(lambd).__name__) + \" object is not callable\"\n self.lambd = lambd\n\n def __call__(self, img):\n return self.lambd(img)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass RandomTransforms(object):\n \"\"\"Base class for a list of transformations with randomness\n\n Args:\n transforms (list or tuple): list of transformations\n \"\"\"\n\n def __init__(self, transforms):\n assert isinstance(transforms, (list, tuple))\n self.transforms = transforms\n\n def __call__(self, *args, **kwargs):\n raise NotImplementedError()\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass RandomApply(RandomTransforms):\n \"\"\"Apply randomly a list of transformations with a given probability\n\n Args:\n transforms (list or tuple): list of transformations\n p (float): probability\n \"\"\"\n\n def __init__(self, transforms, p=0.5):\n super(RandomApply, self).__init__(transforms)\n self.p = p\n\n def __call__(self, img):\n if self.p < random.random():\n return img\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += '\\n p={}'.format(self.p)\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass RandomOrder(RandomTransforms):\n \"\"\"Apply a list of transformations in a random order\n \"\"\"\n def __call__(self, img):\n order = list(range(len(self.transforms)))\n random.shuffle(order)\n for i in order:\n img = self.transforms[i](img)\n return img\n\n\nclass RandomChoice(RandomTransforms):\n \"\"\"Apply single transformation randomly picked from a list\n \"\"\"\n def __call__(self, img):\n t = random.choice(self.transforms)\n return t(img)\n\n\nclass RandomCrop(torch.nn.Module):\n \"\"\"Crop the given image at a random location.\n The image can be a PIL Image or a Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).\n padding (int or sequence, optional): Optional padding on each border\n of the image. Default is None. If a single int is provided this\n is used to pad all borders. If tuple of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a tuple of length 4 is provided\n this is the padding for the left, top, right and bottom borders respectively.\n In torchscript mode padding as single int is not supported, use a tuple or\n list of length 1: ``[padding, ]``.\n pad_if_needed (boolean): It will pad the image if smaller than the\n desired size to avoid raising an exception. Since cropping is done\n after padding, the padding seems to be done at a random offset.\n fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.\n Mode symmetric is not yet supported for Tensor inputs.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value on the edge of the image\n\n - reflect: pads with reflection of image (without repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image (repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n\n \"\"\"\n\n @staticmethod\n def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:\n \"\"\"Get parameters for ``crop`` for a random crop.\n\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n output_size (tuple): Expected output size of the crop.\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.\n \"\"\"\n w, h = F._get_image_size(img)\n th, tw = output_size\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = torch.randint(0, h - th + 1, size=(1, )).item()\n j = torch.randint(0, w - tw + 1, size=(1, )).item()\n return i, j, th, tw\n\n def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode=\"constant\"):\n super().__init__()\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n elif isinstance(size, Sequence) and len(size) == 1:\n self.size = (size[0], size[0])\n else:\n if len(size) != 2:\n raise ValueError(\"Please provide only two dimensions (h, w) for size.\")\n\n # cast to tuple for torchscript\n self.size = tuple(size)\n self.padding = padding\n self.pad_if_needed = pad_if_needed\n self.fill = fill\n self.padding_mode = padding_mode\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n PIL Image or Tensor: Cropped image.\n \"\"\"\n if self.padding is not None:\n img = F.pad(img, self.padding, self.fill, self.padding_mode)\n\n width, height = F._get_image_size(img)\n # pad the width if needed\n if self.pad_if_needed and width < self.size[1]:\n padding = [self.size[1] - width, 0]\n img = F.pad(img, padding, self.fill, self.padding_mode)\n # pad the height if needed\n if self.pad_if_needed and height < self.size[0]:\n padding = [0, self.size[0] - height]\n img = F.pad(img, padding, self.fill, self.padding_mode)\n\n i, j, h, w = self.get_params(img, self.size)\n\n return F.crop(img, i, j, h, w)\n\n def __repr__(self):\n return self.__class__.__name__ + \"(size={0}, padding={1})\".format(self.size, self.padding)\n\n\nclass RandomHorizontalFlip(torch.nn.Module):\n \"\"\"Horizontally flip the given image randomly with a given probability.\n The image can be a PIL Image or a torch Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be flipped.\n\n Returns:\n PIL Image or Tensor: Randomly flipped image.\n \"\"\"\n if torch.rand(1) < self.p:\n return F.hflip(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomVerticalFlip(torch.nn.Module):\n \"\"\"Vertically flip the given image randomly with a given probability.\n The image can be a PIL Image or a torch Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be flipped.\n\n Returns:\n PIL Image or Tensor: Randomly flipped image.\n \"\"\"\n if torch.rand(1) < self.p:\n return F.vflip(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomPerspective(object):\n \"\"\"Performs Perspective transformation of the given PIL Image randomly with a given probability.\n\n Args:\n interpolation : Default- Image.BICUBIC\n\n p (float): probability of the image being perspectively transformed. Default value is 0.5\n\n distortion_scale(float): it controls the degree of distortion and ranges from 0 to 1. Default value is 0.5.\n\n fill (3-tuple or int): RGB pixel fill value for area outside the rotated image.\n If int, it is used for all channels respectively. Default value is 0.\n \"\"\"\n\n def __init__(self, distortion_scale=0.5, p=0.5, interpolation=Image.BICUBIC, fill=0):\n self.p = p\n self.interpolation = interpolation\n self.distortion_scale = distortion_scale\n self.fill = fill\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be Perspectively transformed.\n\n Returns:\n PIL Image: Random perspectivley transformed image.\n \"\"\"\n if not F._is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n if random.random() < self.p:\n width, height = img.size\n startpoints, endpoints = self.get_params(width, height, self.distortion_scale)\n return F.perspective(img, startpoints, endpoints, self.interpolation, self.fill)\n return img\n\n @staticmethod\n def get_params(width, height, distortion_scale):\n \"\"\"Get parameters for ``perspective`` for a random perspective transform.\n\n Args:\n width : width of the image.\n height : height of the image.\n\n Returns:\n List containing [top-left, top-right, bottom-right, bottom-left] of the original image,\n List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.\n \"\"\"\n half_height = int(height / 2)\n half_width = int(width / 2)\n topleft = (random.randint(0, int(distortion_scale * half_width)),\n random.randint(0, int(distortion_scale * half_height)))\n topright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),\n random.randint(0, int(distortion_scale * half_height)))\n botright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),\n random.randint(height - int(distortion_scale * half_height) - 1, height - 1))\n botleft = (random.randint(0, int(distortion_scale * half_width)),\n random.randint(height - int(distortion_scale * half_height) - 1, height - 1))\n startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)]\n endpoints = [topleft, topright, botright, botleft]\n return startpoints, endpoints\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomResizedCrop(torch.nn.Module):\n \"\"\"Crop the given image to random size and aspect ratio.\n The image can be a PIL Image or a Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n A crop of random size (default: of 0.08 to 1.0) of the original size and a random\n aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop\n is finally resized to given size.\n This is popularly used to train the Inception networks.\n\n Args:\n size (int or sequence): expected output size of each edge. If size is an\n int instead of sequence like (h, w), a square output size ``(size, size)`` is\n made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).\n scale (tuple of float): range of size of the origin size cropped\n ratio (tuple of float): range of aspect ratio of the origin aspect ratio cropped.\n interpolation (int): Desired interpolation enum defined by `filters`_.\n Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR``\n and ``PIL.Image.BICUBIC`` are supported.\n \"\"\"\n\n def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):\n super().__init__()\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n elif isinstance(size, Sequence) and len(size) == 1:\n self.size = (size[0], size[0])\n else:\n if len(size) != 2:\n raise ValueError(\"Please provide only two dimensions (h, w) for size.\")\n self.size = size\n\n if not isinstance(scale, (tuple, list)):\n raise TypeError(\"Scale should be a sequence\")\n if not isinstance(ratio, (tuple, list)):\n raise TypeError(\"Ratio should be a sequence\")\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n warnings.warn(\"Scale and ratio should be of kind (min, max)\")\n\n self.interpolation = interpolation\n self.scale = scale\n self.ratio = ratio\n\n @staticmethod\n def get_params(\n img: Tensor, scale: Tuple[float, float], ratio: Tuple[float, float]\n ) -> Tuple[int, int, int, int]:\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image or Tensor): Input image.\n scale (tuple): range of scale of the origin size cropped\n ratio (tuple): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n \"\"\"\n width, height = F._get_image_size(img)\n area = height * width\n\n for _ in range(10):\n target_area = area * torch.empty(1).uniform_(*scale).item()\n log_ratio = torch.log(torch.tensor(ratio))\n aspect_ratio = torch.exp(\n torch.empty(1).uniform_(log_ratio[0], log_ratio[1])\n ).item()\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if 0 < w <= width and 0 < h <= height:\n i = torch.randint(0, height - h + 1, size=(1,)).item()\n j = torch.randint(0, width - w + 1, size=(1,)).item()\n return i, j, h, w\n\n # Fallback to central crop\n in_ratio = float(width) / float(height)\n if in_ratio < min(ratio):\n w = width\n h = int(round(w / min(ratio)))\n elif in_ratio > max(ratio):\n h = height\n w = int(round(h * max(ratio)))\n else: # whole image\n w = width\n h = height\n i = (height - h) // 2\n j = (width - w) // 2\n return i, j, h, w\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped and resized.\n\n Returns:\n PIL Image or Tensor: Randomly cropped and resized image.\n \"\"\"\n i, j, h, w = self.get_params(img, self.scale, self.ratio)\n return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = _pil_interpolation_to_str[self.interpolation]\n format_string = self.__class__.__name__ + '(size={0}'.format(self.size)\n format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))\n format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))\n format_string += ', interpolation={0})'.format(interpolate_str)\n return format_string\n\n\nclass RandomSizedCrop(RandomResizedCrop):\n \"\"\"\n Note: This transform is deprecated in favor of RandomResizedCrop.\n \"\"\"\n def __init__(self, *args, **kwargs):\n warnings.warn(\"The use of the transforms.RandomSizedCrop transform is deprecated, \" +\n \"please use transforms.RandomResizedCrop instead.\")\n super(RandomSizedCrop, self).__init__(*args, **kwargs)\n\n\nclass FiveCrop(torch.nn.Module):\n \"\"\"Crop the given image into four corners and the central crop.\n The image can be a PIL Image or a Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n .. Note::\n This transform returns a tuple of images and there may be a mismatch in the number of\n inputs and targets your Dataset returns. See below for an example of how to deal with\n this.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an ``int``\n instead of sequence like (h, w), a square crop of size (size, size) is made.\n If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).\n\n Example:\n >>> transform = Compose([\n >>> FiveCrop(size), # this is a list of PIL Images\n >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor\n >>> ])\n >>> #In your test loop you can do the following:\n >>> input, target = batch # input is a 5d tensor, target is 2d\n >>> bs, ncrops, c, h, w = input.size()\n >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops\n >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops\n \"\"\"\n\n def __init__(self, size):\n super().__init__()\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n elif isinstance(size, Sequence) and len(size) == 1:\n self.size = (size[0], size[0])\n else:\n if len(size) != 2:\n raise ValueError(\"Please provide only two dimensions (h, w) for size.\")\n\n self.size = size\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n tuple of 5 images. Image can be PIL Image or Tensor\n \"\"\"\n return F.five_crop(img, self.size)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass TenCrop(torch.nn.Module):\n \"\"\"Crop the given image into four corners and the central crop plus the flipped version of\n these (horizontal flipping is used by default).\n The image can be a PIL Image or a Tensor, in which case it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n .. Note::\n This transform returns a tuple of images and there may be a mismatch in the number of\n inputs and targets your Dataset returns. See below for an example of how to deal with\n this.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).\n vertical_flip (bool): Use vertical flipping instead of horizontal\n\n Example:\n >>> transform = Compose([\n >>> TenCrop(size), # this is a list of PIL Images\n >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor\n >>> ])\n >>> #In your test loop you can do the following:\n >>> input, target = batch # input is a 5d tensor, target is 2d\n >>> bs, ncrops, c, h, w = input.size()\n >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops\n >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops\n \"\"\"\n\n def __init__(self, size, vertical_flip=False):\n super().__init__()\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n elif isinstance(size, Sequence) and len(size) == 1:\n self.size = (size[0], size[0])\n else:\n if len(size) != 2:\n raise ValueError(\"Please provide only two dimensions (h, w) for size.\")\n\n self.size = size\n self.vertical_flip = vertical_flip\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n tuple of 10 images. Image can be PIL Image or Tensor\n \"\"\"\n return F.ten_crop(img, self.size, self.vertical_flip)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)\n\n\nclass LinearTransformation(object):\n \"\"\"Transform a tensor image with a square transformation matrix and a mean_vector computed\n offline.\n Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and\n subtract mean_vector from it which is then followed by computing the dot\n product with the transformation matrix and then reshaping the tensor to its\n original shape.\n\n Applications:\n whitening transformation: Suppose X is a column vector zero-centered data.\n Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),\n perform SVD on this matrix and pass it as transformation_matrix.\n\n Args:\n transformation_matrix (Tensor): tensor [D x D], D = C x H x W\n mean_vector (Tensor): tensor [D], D = C x H x W\n \"\"\"\n\n def __init__(self, transformation_matrix, mean_vector):\n if transformation_matrix.size(0) != transformation_matrix.size(1):\n raise ValueError(\"transformation_matrix should be square. Got \" +\n \"[{} x {}] rectangular matrix.\".format(*transformation_matrix.size()))\n\n if mean_vector.size(0) != transformation_matrix.size(0):\n raise ValueError(\"mean_vector should have the same length {}\".format(mean_vector.size(0)) +\n \" as any one of the dimensions of the transformation_matrix [{}]\"\n .format(tuple(transformation_matrix.size())))\n\n self.transformation_matrix = transformation_matrix\n self.mean_vector = mean_vector\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be whitened.\n\n Returns:\n Tensor: Transformed image.\n \"\"\"\n if tensor.size(0) * tensor.size(1) * tensor.size(2) != self.transformation_matrix.size(0):\n raise ValueError(\"tensor and transformation matrix have incompatible shape.\" +\n \"[{} x {} x {}] != \".format(*tensor.size()) +\n \"{}\".format(self.transformation_matrix.size(0)))\n flat_tensor = tensor.view(1, -1) - self.mean_vector\n transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)\n tensor = transformed_tensor.view(tensor.size())\n return tensor\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(transformation_matrix='\n format_string += (str(self.transformation_matrix.tolist()) + ')')\n format_string += (\", (mean_vector=\" + str(self.mean_vector.tolist()) + ')')\n return format_string\n\n\nclass ColorJitter(torch.nn.Module):\n \"\"\"Randomly change the brightness, contrast and saturation of an image.\n\n Args:\n brightness (float or tuple of float (min, max)): How much to jitter brightness.\n brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]\n or the given [min, max]. Should be non negative numbers.\n contrast (float or tuple of float (min, max)): How much to jitter contrast.\n contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]\n or the given [min, max]. Should be non negative numbers.\n saturation (float or tuple of float (min, max)): How much to jitter saturation.\n saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]\n or the given [min, max]. Should be non negative numbers.\n hue (float or tuple of float (min, max)): How much to jitter hue.\n hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].\n Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.\n \"\"\"\n\n def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):\n super().__init__()\n self.brightness = self._check_input(brightness, 'brightness')\n self.contrast = self._check_input(contrast, 'contrast')\n self.saturation = self._check_input(saturation, 'saturation')\n self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),\n clip_first_on_zero=False)\n\n @torch.jit.unused\n def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):\n if isinstance(value, numbers.Number):\n if value < 0:\n raise ValueError(\"If {} is a single number, it must be non negative.\".format(name))\n value = [center - float(value), center + float(value)]\n if clip_first_on_zero:\n value[0] = max(value[0], 0.0)\n elif isinstance(value, (tuple, list)) and len(value) == 2:\n if not bound[0] <= value[0] <= value[1] <= bound[1]:\n raise ValueError(\"{} values should be between {}\".format(name, bound))\n else:\n raise TypeError(\"{} should be a single number or a list/tuple with lenght 2.\".format(name))\n\n # if value is 0 or (1., 1.) for brightness/contrast/saturation\n # or (0., 0.) for hue, do nothing\n if value[0] == value[1] == center:\n value = None\n return value\n\n @staticmethod\n @torch.jit.unused\n def get_params(brightness, contrast, saturation, hue):\n \"\"\"Get a randomized transform to be applied on image.\n\n Arguments are same as that of __init__.\n\n Returns:\n Transform which randomly adjusts brightness, contrast and\n saturation in a random order.\n \"\"\"\n transforms = []\n\n if brightness is not None:\n brightness_factor = random.uniform(brightness[0], brightness[1])\n transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))\n\n if contrast is not None:\n contrast_factor = random.uniform(contrast[0], contrast[1])\n transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))\n\n if saturation is not None:\n saturation_factor = random.uniform(saturation[0], saturation[1])\n transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))\n\n if hue is not None:\n hue_factor = random.uniform(hue[0], hue[1])\n transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))\n\n random.shuffle(transforms)\n transform = Compose(transforms)\n\n return transform\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Input image.\n\n Returns:\n PIL Image or Tensor: Color jittered image.\n \"\"\"\n fn_idx = torch.randperm(4)\n for fn_id in fn_idx:\n if fn_id == 0 and self.brightness is not None:\n brightness = self.brightness\n brightness_factor = torch.tensor(1.0).uniform_(brightness[0], brightness[1]).item()\n img = F.adjust_brightness(img, brightness_factor)\n\n if fn_id == 1 and self.contrast is not None:\n contrast = self.contrast\n contrast_factor = torch.tensor(1.0).uniform_(contrast[0], contrast[1]).item()\n img = F.adjust_contrast(img, contrast_factor)\n\n if fn_id == 2 and self.saturation is not None:\n saturation = self.saturation\n saturation_factor = torch.tensor(1.0).uniform_(saturation[0], saturation[1]).item()\n img = F.adjust_saturation(img, saturation_factor)\n\n if fn_id == 3 and self.hue is not None:\n hue = self.hue\n hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item()\n img = F.adjust_hue(img, hue_factor)\n\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += 'brightness={0}'.format(self.brightness)\n format_string += ', contrast={0}'.format(self.contrast)\n format_string += ', saturation={0}'.format(self.saturation)\n format_string += ', hue={0})'.format(self.hue)\n return format_string\n\n\nclass RandomRotation(object):\n \"\"\"Rotate the image by angle.\n\n Args:\n degrees (sequence or float or int): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees).\n resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):\n An optional resampling filter. See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to PIL.Image.NEAREST.\n expand (bool, optional): Optional expansion flag.\n If true, expands the output to make it large enough to hold the entire rotated image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (2-tuple, optional): Optional center of rotation.\n Origin is the upper left corner.\n Default is the center of the image.\n fill (n-tuple or int or float): Pixel fill value for area outside the rotated\n image. If int or float, the value is used for all bands respectively.\n Defaults to 0 for all bands. This option is only available for ``pillow>=5.2.0``.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(self, degrees, resample=False, expand=False, center=None, fill=None):\n if isinstance(degrees, numbers.Number):\n if degrees < 0:\n raise ValueError(\"If degrees is a single number, it must be positive.\")\n self.degrees = (-degrees, degrees)\n else:\n if len(degrees) != 2:\n raise ValueError(\"If degrees is a sequence, it must be of len 2.\")\n self.degrees = degrees\n\n self.resample = resample\n self.expand = expand\n self.center = center\n self.fill = fill\n\n @staticmethod\n def get_params(degrees):\n \"\"\"Get parameters for ``rotate`` for a random rotation.\n\n Returns:\n sequence: params to be passed to ``rotate`` for random rotation.\n \"\"\"\n angle = random.uniform(degrees[0], degrees[1])\n\n return angle\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be rotated.\n\n Returns:\n PIL Image: Rotated image.\n \"\"\"\n\n angle = self.get_params(self.degrees)\n\n return F.rotate(img, angle, self.resample, self.expand, self.center, self.fill)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)\n format_string += ', resample={0}'.format(self.resample)\n format_string += ', expand={0}'.format(self.expand)\n if self.center is not None:\n format_string += ', center={0}'.format(self.center)\n if self.fill is not None:\n format_string += ', fill={0}'.format(self.fill)\n format_string += ')'\n return format_string\n\n\nclass RandomAffine(object):\n \"\"\"Random affine transformation of the image keeping center invariant\n\n Args:\n degrees (sequence or float or int): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees). Set to 0 to deactivate rotations.\n translate (tuple, optional): tuple of maximum absolute fraction for horizontal\n and vertical translations. For example translate=(a, b), then horizontal shift\n is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is\n randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.\n scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is\n randomly sampled from the range a <= scale <= b. Will keep original scale by default.\n shear (sequence or float or int, optional): Range of degrees to select from.\n If shear is a number, a shear parallel to the x axis in the range (-shear, +shear)\n will be apllied. Else if shear is a tuple or list of 2 values a shear parallel to the x axis in the\n range (shear[0], shear[1]) will be applied. Else if shear is a tuple or list of 4 values,\n a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.\n Will not apply shear by default\n resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):\n An optional resampling filter. See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to PIL.Image.NEAREST.\n fillcolor (tuple or int): Optional fill color (Tuple for RGB Image And int for grayscale) for the area\n outside the transform in the output image.(Pillow>=5.0.0)\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(self, degrees, translate=None, scale=None, shear=None, resample=False, fillcolor=0):\n if isinstance(degrees, numbers.Number):\n if degrees < 0:\n raise ValueError(\"If degrees is a single number, it must be positive.\")\n self.degrees = (-degrees, degrees)\n else:\n assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \\\n \"degrees should be a list or tuple and it must be of length 2.\"\n self.degrees = degrees\n\n if translate is not None:\n assert isinstance(translate, (tuple, list)) and len(translate) == 2, \\\n \"translate should be a list or tuple and it must be of length 2.\"\n for t in translate:\n if not (0.0 <= t <= 1.0):\n raise ValueError(\"translation values should be between 0 and 1\")\n self.translate = translate\n\n if scale is not None:\n assert isinstance(scale, (tuple, list)) and len(scale) == 2, \\\n \"scale should be a list or tuple and it must be of length 2.\"\n for s in scale:\n if s <= 0:\n raise ValueError(\"scale values should be positive\")\n self.scale = scale\n\n if shear is not None:\n if isinstance(shear, numbers.Number):\n if shear < 0:\n raise ValueError(\"If shear is a single number, it must be positive.\")\n self.shear = (-shear, shear)\n else:\n assert isinstance(shear, (tuple, list)) and \\\n (len(shear) == 2 or len(shear) == 4), \\\n \"shear should be a list or tuple and it must be of length 2 or 4.\"\n # X-Axis shear with [min, max]\n if len(shear) == 2:\n self.shear = [shear[0], shear[1], 0., 0.]\n elif len(shear) == 4:\n self.shear = [s for s in shear]\n else:\n self.shear = shear\n\n self.resample = resample\n self.fillcolor = fillcolor\n\n @staticmethod\n def get_params(degrees, translate, scale_ranges, shears, img_size):\n \"\"\"Get parameters for affine transformation\n\n Returns:\n sequence: params to be passed to the affine transformation\n \"\"\"\n angle = random.uniform(degrees[0], degrees[1])\n if translate is not None:\n max_dx = translate[0] * img_size[0]\n max_dy = translate[1] * img_size[1]\n translations = (np.round(random.uniform(-max_dx, max_dx)),\n np.round(random.uniform(-max_dy, max_dy)))\n else:\n translations = (0, 0)\n\n if scale_ranges is not None:\n scale = random.uniform(scale_ranges[0], scale_ranges[1])\n else:\n scale = 1.0\n\n if shears is not None:\n if len(shears) == 2:\n shear = [random.uniform(shears[0], shears[1]), 0.]\n elif len(shears) == 4:\n shear = [random.uniform(shears[0], shears[1]),\n random.uniform(shears[2], shears[3])]\n else:\n shear = 0.0\n\n return angle, translations, scale, shear\n\n def __call__(self, img):\n \"\"\"\n img (PIL Image): Image to be transformed.\n\n Returns:\n PIL Image: Affine transformed image.\n \"\"\"\n ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img.size)\n return F.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor)\n\n def __repr__(self):\n s = '{name}(degrees={degrees}'\n if self.translate is not None:\n s += ', translate={translate}'\n if self.scale is not None:\n s += ', scale={scale}'\n if self.shear is not None:\n s += ', shear={shear}'\n if self.resample > 0:\n s += ', resample={resample}'\n if self.fillcolor != 0:\n s += ', fillcolor={fillcolor}'\n s += ')'\n d = dict(self.__dict__)\n d['resample'] = _pil_interpolation_to_str[d['resample']]\n return s.format(name=self.__class__.__name__, **d)\n\n\nclass Grayscale(object):\n \"\"\"Convert image to grayscale.\n\n Args:\n num_output_channels (int): (1 or 3) number of channels desired for output image\n\n Returns:\n PIL Image: Grayscale version of the input.\n - If ``num_output_channels == 1`` : returned image is single channel\n - If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, num_output_channels=1):\n self.num_output_channels = num_output_channels\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be converted to grayscale.\n\n Returns:\n PIL Image: Randomly grayscaled image.\n \"\"\"\n return F.to_grayscale(img, num_output_channels=self.num_output_channels)\n\n def __repr__(self):\n return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)\n\n\nclass RandomGrayscale(object):\n \"\"\"Randomly convert image to grayscale with a probability of p (default 0.1).\n\n Args:\n p (float): probability that image should be converted to grayscale.\n\n Returns:\n PIL Image: Grayscale version of the input image with probability p and unchanged\n with probability (1-p).\n - If input image is 1 channel: grayscale version is 1 channel\n - If input image is 3 channel: grayscale version is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, p=0.1):\n self.p = p\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be converted to grayscale.\n\n Returns:\n PIL Image: Randomly grayscaled image.\n \"\"\"\n num_output_channels = 1 if img.mode == 'L' else 3\n if random.random() < self.p:\n return F.to_grayscale(img, num_output_channels=num_output_channels)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={0})'.format(self.p)\n\n\nclass RandomErasing(torch.nn.Module):\n \"\"\" Randomly selects a rectangle region in an image and erases its pixels.\n 'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/pdf/1708.04896.pdf\n\n Args:\n p: probability that the random erasing operation will be performed.\n scale: range of proportion of erased area against input image.\n ratio: range of aspect ratio of erased area.\n value: erasing value. Default is 0. If a single int, it is used to\n erase all pixels. If a tuple of length 3, it is used to erase\n R, G, B channels respectively.\n If a str of 'random', erasing each pixel with random values.\n inplace: boolean to make this transform inplace. Default set to False.\n\n Returns:\n Erased Image.\n\n # Examples:\n >>> transform = transforms.Compose([\n >>> transforms.RandomHorizontalFlip(),\n >>> transforms.ToTensor(),\n >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n >>> transforms.RandomErasing(),\n >>> ])\n \"\"\"\n\n def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False):\n super().__init__()\n if not isinstance(value, (numbers.Number, str, tuple, list)):\n raise TypeError(\"Argument value should be either a number or str or a sequence\")\n if isinstance(value, str) and value != \"random\":\n raise ValueError(\"If value is str, it should be 'random'\")\n if not isinstance(scale, (tuple, list)):\n raise TypeError(\"Scale should be a sequence\")\n if not isinstance(ratio, (tuple, list)):\n raise TypeError(\"Ratio should be a sequence\")\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n warnings.warn(\"Scale and ratio should be of kind (min, max)\")\n if scale[0] < 0 or scale[1] > 1:\n raise ValueError(\"Scale should be between 0 and 1\")\n if p < 0 or p > 1:\n raise ValueError(\"Random erasing probability should be between 0 and 1\")\n\n self.p = p\n self.scale = scale\n self.ratio = ratio\n self.value = value\n self.inplace = inplace\n\n @staticmethod\n def get_params(\n img: Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], value: Optional[List[float]] = None\n ) -> Tuple[int, int, int, int, Tensor]:\n \"\"\"Get parameters for ``erase`` for a random erasing.\n\n Args:\n img (Tensor): Tensor image of size (C, H, W) to be erased.\n scale (tuple or list): range of proportion of erased area against input image.\n ratio (tuple or list): range of aspect ratio of erased area.\n value (list, optional): erasing value. If None, it is interpreted as \"random\"\n (erasing each pixel with random values). If ``len(value)`` is 1, it is interpreted as a number,\n i.e. ``value[0]``.\n\n Returns:\n tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.\n \"\"\"\n img_c, img_h, img_w = img.shape\n area = img_h * img_w\n\n for _ in range(10):\n erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()\n aspect_ratio = torch.empty(1).uniform_(ratio[0], ratio[1]).item()\n\n h = int(round(math.sqrt(erase_area * aspect_ratio)))\n w = int(round(math.sqrt(erase_area / aspect_ratio)))\n if not (h < img_h and w < img_w):\n continue\n\n if value is None:\n v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()\n else:\n v = torch.tensor(value)[:, None, None]\n\n i = torch.randint(0, img_h - h + 1, size=(1, )).item()\n j = torch.randint(0, img_w - w + 1, size=(1, )).item()\n return i, j, h, w, v\n\n # Return original image\n return 0, 0, img_h, img_w, img\n\n def forward(self, img):\n \"\"\"\n Args:\n img (Tensor): Tensor image of size (C, H, W) to be erased.\n\n Returns:\n img (Tensor): Erased Tensor image.\n \"\"\"\n if torch.rand(1) < self.p:\n\n # cast self.value to script acceptable type\n if isinstance(self.value, (int, float)):\n value = [self.value, ]\n elif isinstance(self.value, str):\n value = None\n elif isinstance(self.value, tuple):\n value = list(self.value)\n else:\n value = self.value\n\n if value is not None and not (len(value) in (1, img.shape[-3])):\n raise ValueError(\n \"If value is a sequence, it should have either a single value or \"\n \"{} (number of input channels)\".format(img.shape[-3])\n )\n\n x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=value)\n return F.erase(img, x, y, h, w, v, self.inplace)\n return img\n" ]
[ [ "torch.mm", "torch.randint", "torch.empty", "torch.randperm", "torch.tensor", "torch.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ChristianCKKoch/Projektarbeit_Digethic
[ "80999e48de29106545398252bbc6cea2b8b953ce" ]
[ "src/models/model_library.py" ]
[ "from sklearn.model_selection import GridSearchCV\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.svm import SVC\nimport numpy as np\nimport pandas as pd\nimport pickle as pi\nfrom operator import itemgetter\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\nimport sys\nsys.path.append(\"src/models/\")\nfrom early_stopping import EarlyStopping\n\nclass Classifier:\n def __init__(self, X_train, X_test, y_train, y_test):\n #Array für alle Ergebnisse\n self.ergebnis = []\n self.X_train = X_train\n self.X_test = X_test\n self.y_train = y_train\n self.y_test = y_test\n \n def train_models(self, models):\n for model in models:\n #-----------------------\n #Knn-Classifier\n #-----------------------\n if model == 'knn':\n #Optimalen Knn-Classifier bestimmen\n error = []\n for i in range(1, 250):\n knn = KNeighborsClassifier(n_neighbors=i)\n knn.fit(self.X_train, self.y_train)\n pred_i = knn.predict(self.X_test)\n error.append([i, np.mean(pred_i != self.y_test)])\n\n #Debug-Print\n print()\n print(\"Debug KNN-Classifier\")\n print(\"knn n: {}\".format(sorted(error, key=itemgetter(1), reverse=False)[0][0]))\n print(\"knn error: {}\".format(sorted(error, key=itemgetter(1), reverse=False)[0][1]))\n print()\n\n #Optimale Anzahl der n_neighbors übergeben\n optimal_n = sorted(error, key=itemgetter(1), reverse=False)[0][0]\n\n #Knn-Classifier trainieren\n knnclf = KNeighborsClassifier(n_neighbors=optimal_n)\n knnclf.fit(self.X_train, self.y_train)\n\n #Knn-Classifier Akkuranz bestimmen\n score = knnclf.score(self.X_test,self.y_test)\n self.ergebnis.append([knnclf.__class__.__name__, score, knnclf])\n #-----------------------\n \n #-----------------------\n #Decision Tree\n #-----------------------\n elif model == 'dt':\n #class_weight gebrauchen für DT und RF\n\n #Optimalen Decision Tree bestimmen\n #Zu testende Decision Tree Parameter\n dt = DecisionTreeClassifier()\n tree_para = {'criterion':['gini','entropy'],'max_depth':[i for i in range(1,20)]\n , 'min_samples_split':[i for i in range (2,10)]}\n\n #GridSearchCV \n grd_clf = GridSearchCV(dt, tree_para, cv=5)\n grd_clf.fit(self.X_train, self.y_train)\n\n #Besten gefundenen Decision Tree übergeben\n dt_clf = grd_clf.best_estimator_\n\n #Debug-Print\n print()\n print(\"Debug DecisionTreeClassifier\")\n print(\"dt best parameters: {}\".format(grd_clf.best_params_))\n print()\n\n score = dt_clf.score(self.X_test, self.y_test)\n self.ergebnis.append([dt_clf.__class__.__name__, score, dt_clf])\n #-----------------------\n\n #-----------------------\n #Random Forest\n #-----------------------\n elif model == 'rf':\n #rf = RandomForestClassifier(max_depth=8, criterion=\"entropy\", min_samples_split=9)\n rf = RandomForestClassifier(n_estimators=100)\n rf.fit(self.X_train, self.y_train)\n score = rf.score(self.X_test, self.y_test)\n self.ergebnis.append([rf.__class__.__name__, score, rf])\n #-----------------------\n\n #-----------------------\n #Support Vector Machine\n #-----------------------\n elif model == 'svm':\n svm = SVC(kernel = 'poly', probability=True)\n svm.fit(self.X_train, self.y_train)\n score = svm.score(self.X_test, self.y_test)\n self.ergebnis.append([svm.__class__.__name__, score, svm])\n\n #-----------------------\n #MLP\n #-----------------------\n elif model == 'mlp':\n mlp = MLPClassifier(hidden_layer_sizes=[100,100], max_iter=5000, solver='sgd'\n , learning_rate='adaptive', learning_rate_init=0.01, n_iter_no_change=200, early_stopping=True)\n mlp.fit(self.X_train, self.y_train)\n score = mlp.score(self.X_test, self.y_test)\n self.ergebnis.append([mlp.__class__.__name__, score, mlp])\n\n #Debug-Print\n print()\n print(\"Debug MLPClassifier\")\n print(\"iterations: {}; layers: {}; loss: {}\".format(mlp.n_iter_, mlp.n_layers_, mlp.loss_))\n print()\n #epochs = np.linspace(1,mlp.n_iter_, mlp.n_iter_)\n\n return self.ergebnis\n \n def ensemble_model(self):\n \n #Alle inkludierten Modelle werden in eine Liste geladen, die dann als Parameter\n #dem Voting Classifier übergeben wird.\n models = list()\n for model in self.ergebnis:\n models.append([model[0], model[2]])\n \n voting_clf = VotingClassifier(estimators=models, voting='soft')\n voting_clf.fit(self.X_train, self.y_train)\n score = voting_clf.score(self.X_test, self.y_test)\n self.ergebnis.append([voting_clf.__class__.__name__, score, voting_clf])\n\n return self.ergebnis\n\n def neuronal_network(self, epochs, patience_early_stopping, threshold_for_early_stopping):\n #Funktion für das Ansprechen und Ausführen des Neuronalen Netzes mittels Pytorch\n\n #Standardausgabe für Pytorch, auf welcher processing unit gerechnet wird\n #In meinem Falle nur CPU möglich\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print('This Computation is running on {}'.format(device))\n\n #Initialisierung des Neuronalen Netzwerks\n nn_model = NN_Model()\n #Als Fehlerfunktion wird CrossEntropyLoss verwendet\n loss_func = torch.nn.CrossEntropyLoss()\n #Als Optimizer der Adam-Optimizer mit einer learing rate von 0.001\n optimizer = torch.optim.Adam(nn_model.parameters(), lr=0.0001)\n\n #Leere Arrays für das Speichern von Fehler-/Akkuranzdaten über die Epochen hinweg\n epoch_errors = []\n epoch_train_accuracy = []\n epoch_test_accuracy = []\n\n #Initialisierung des Early Stopping\n early_stopping = EarlyStopping(patience=patience_early_stopping)\n\n #Umsetzen der Trainings- und Testdaten in die benötigten Tensoren-Formate\n X_Train = torch.from_numpy(self.X_train).float()\n y_Train = torch.tensor(self.y_train, dtype=torch.long)\n X_Test = torch.from_numpy(self.X_test).float()\n y_Test = torch.from_numpy(np.array(self.y_test)).long()\n \n #Trainieren des Neuronalen Netzwerks; maximale Anzahl der Epochen als Funktionsparameter übergeben\n for epoch in range(epochs):\n \n #Vorbereiten der Ergebnisse des Neuronalen Netzwerkes\n #LogSoftmax explizit hier, da diese in der Fehlerfunktion (CrossEntropyLoss) automatisch\n #angewandt wird!\n log_sm = torch.nn.LogSoftmax(dim=1)\n train_nn_model = log_sm(nn_model(X_Train))\n test_nn_model = log_sm(nn_model(X_Test))\n\n #Erstellen von leerem Array für das Speichern der einzelnen vom Modell berechneten Ergebnisse\n #Zusätzlich noch ein Zähler zum Aufsummieren der korrekt vorhergesagten Ergebnisse, mit 0 initialisiert\n train_pred_ergebnis = []\n train_running_correct = 0\n\n test_pred_ergebnis = []\n test_running_correct = 0\n\n #Autograd ausschalten für das Berechnen der Ergebnisse zu Validierungszwecken\n with torch.no_grad():\n #Trainings-Akkuranz\n # Leeren array füllen mit Ergebnissen aus Ergebnis-Tensor\n # Hierbei werden die probalistischen Werte verglichen und das wahrscheinlichste Ergebnis übergeben\n # als 0 - Heimsieg, 1 - Unentschieden, 2 - Auswärtssieg\n for i in range(train_nn_model.shape[0]):\n ergebnis = 0 if (train_nn_model[i][0] > train_nn_model[i][1] and train_nn_model[i][0] > train_nn_model[i][2]) else 1 if (train_nn_model[i][1] > train_nn_model[i][0] and train_nn_model[i][1] > train_nn_model[i][2]) else 2\n train_pred_ergebnis.append(ergebnis)\n #Test-Akkuranz\n # Leeren array füllen mit Ergebnissen aus Ergebnis-Tensor\n # Hierbei werden die probalistischen Werte verglichen und das wahrscheinlichste Ergebnis übergeben\n # als 0 - Heimsieg, 1 - Unentschieden, 2 - Auswärtssieg\n for i in range(test_nn_model.shape[0]):\n ergebnis = 0 if (test_nn_model[i][0] > test_nn_model[i][1] and test_nn_model[i][0] > test_nn_model[i][2]) else 1 if (test_nn_model[i][1] > test_nn_model[i][0] and test_nn_model[i][1] > test_nn_model[i][2]) else 2\n test_pred_ergebnis.append(ergebnis)\n\n #Arrays in tensor umwandeln\n train_pred_tensor = torch.tensor(train_pred_ergebnis, dtype=torch.float)\n test_pred_tensor = torch.tensor(test_pred_ergebnis, dtype=torch.float)\n\n #Die korrekten Ergebnisse aus dem Traininsdatensatz werden aufsummiert und\n #daraus die Akkuranz dieser Epoche berechnet und dem Array epoch_train_accuracy für spätere Auswertung übergeben\n train_running_correct += (train_pred_tensor == y_Train).sum().item()\n train_accuracy = train_running_correct*100./y_Train.shape[0]\n epoch_train_accuracy.append(train_accuracy)\n\n #Die korrekten Ergebnisse aus dem Testdatensatz werden aufsummiert und\n #daraus die Akkuranz dieser Epoche berechnet und dem Array epoch_test_accuracy für spätere Auswertung übergeben\n test_running_correct += (test_pred_tensor == y_Test).sum().item()\n test_accuracy = test_running_correct*100./y_Test.shape[0]\n epoch_test_accuracy.append(test_accuracy)\n \n #---------------------------------------------------------------------------------------\n #Hier werden nun die entscheidenden Schritte zum Trainineren des NN Modells durchgeführt\n #---------------------------------------------------------------------------------------\n error = loss_func(nn_model(X_Train),y_Train)\n optimizer.zero_grad()\n error.backward()\n epoch_errors.append(error.item())\n optimizer.step()\n #---------------------------------------------------------------------------------------\n \n #Debug-Print Ausgabe der Epoche mit Akkuranzen\n print(\"Epoche: {}/{} mit Train-Akkuranz: {} und Test-Akkuranz: {}\".format(epoch, epochs, train_accuracy, test_accuracy))\n\n #-----------------------------\n #Early Stopping\n #-----------------------------\n #Loss für Testdaten berechnen\n error_Test = loss_func(nn_model(X_Test),y_Test)\n\n #Aufruf der Early Stopping Funktion\n # Die Fehlerfunkion der Testdaten dient hier als zentrales Kriterium:\n # Sinkt diese mit der Rate \"delta\" eine bestimmte Anzahl Schritte \"patience\" \n # hintereinander NICHT MEHR, wird gestoppt. \n # Zusätzlich wird ein Threshold mit angegeben, sodass erst ab einer bestimmten erreichten\n # Akkuranz das Early Stopping aktiviert wird.\n early_stopping(error_Test, nn_model, train_accuracy > threshold_for_early_stopping)\n #Sollte ein Early Stop erreicht sein, wird das Durchlaufen der Epochen unterbrochen\n if early_stopping.early_stop:\n print(\"Early stopping\")\n break\n #-----------------------------\n\n #Debug-Print finales Loss-Ergebnis\n #print('Loss nach {} Epochen: {}'.format(epoch+1,error.item()))\n\n #Übergabe der Ergebnisdaten and den zentralen Ergebnis-Array\n self.ergebnis.append([nn_model.__class__.__name__, test_accuracy/100, nn_model])\n\n #Rückgabewerte für weitere Verwendung (Ausgabe, Test) im Hauptprogramm\n return self.ergebnis, epoch_errors, epoch_train_accuracy, epoch_test_accuracy, test_pred_tensor\n\n\nclass NN_Model(torch.nn.Module):\n def __init__(self):\n super(NN_Model, self).__init__()\n self.fc1 = nn.Linear(75,120)\n self.fc2 = nn.Linear(120,180)\n self.fc3 = nn.Linear(180,100)\n self.fc4 = nn.Linear(100,40)\n self.output = nn.Linear(40,3)\n\n def forward(self,x):\n x = torch.relu(self.fc1(x))\n x = torch.sigmoid(self.fc2(x))\n x = torch.relu(self.fc3(x))\n x = torch.sigmoid(self.fc4(x))\n #Keine Softmax-Funktion benötigt bei output, da CrossEntropyLoss\n #als Fehlerfunktion dies automatisch tut\n #Bemerkung: softmax muss aber beim Validieren/Testen angewandt werden!\n x = self.output(x)\n\n return x" ]
[ [ "sklearn.neural_network.MLPClassifier", "torch.nn.CrossEntropyLoss", "torch.nn.LogSoftmax", "sklearn.model_selection.GridSearchCV", "sklearn.ensemble.RandomForestClassifier", "torch.from_numpy", "sklearn.ensemble.VotingClassifier", "torch.tensor", "sklearn.neighbors.KNeighborsClassifier", "torch.nn.Linear", "sklearn.tree.DecisionTreeClassifier", "torch.no_grad", "numpy.mean", "torch.cuda.is_available", "sklearn.svm.SVC", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Comp-UFSCar/neural-networks-2
[ "e5e105c91bcd1d63b200f36b9e02dbcde54ae756" ]
[ "tasks/assignment-1/p2.py" ]
[ "\"\"\"Problem 2.\n\nAuthor: Lucas David -- <[email protected]>\n\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom algorithms.linear_estimator import Perceptron\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.io import loadmat\nfrom sklearn.metrics import accuracy_score\n\nAxes3D\n\n\ndef a(X, y):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y)\n\n plt.grid()\n plt.tight_layout(0)\n plt.savefig('results/p2.a.png')\n\n\ndef b(X, y):\n clf = Perceptron(learning_rate=.1, n_epochs=200, random_state=0)\n\n clf.fit(X, y)\n s = clf.predict(X)\n print(y)\n print(s)\n print('score: %.2f' % accuracy_score(y, s))\n print('epochs needed: %i' % clf.n_epochs_)\n\n fig = plt.figure()\n ax = fig.add_subplot(121)\n ax.plot(range(len(clf.loss_)), clf.loss_)\n\n ax = fig.add_subplot(122, projection='3d')\n\n xx, yy = np.meshgrid(range(10), range(10))\n\n normal = clf.W_\n z = (-normal[0] * xx - normal[1] * yy - clf.b_) * 1. / normal[2]\n ax.plot_surface(xx, yy, z, alpha=0.2, color='green')\n\n ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y)\n\n plt.grid()\n plt.tight_layout(0)\n plt.savefig('results/p2b.png')\n\n\ndef c(X, y):\n clf = Perceptron(learning_rate=.0001, n_epochs=2, random_state=0)\n\n clf.fit(X, y)\n s = clf.predict(X)\n print(y)\n print(s)\n print('score: %.2f' % accuracy_score(y, s))\n print('epochs needed: %i' % clf.n_epochs_)\n\n fig = plt.figure()\n ax = fig.add_subplot(121)\n ax.plot(range(len(clf.loss_)), clf.loss_)\n\n ax = fig.add_subplot(122, projection='3d')\n\n xx, yy = np.meshgrid(range(10), range(10))\n\n normal = clf.W_\n z = (-normal[0] * xx - normal[1] * yy - clf.b_) * 1. / normal[2]\n ax.plot_surface(xx, yy, z, alpha=0.2, color='green')\n\n ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y)\n\n plt.grid()\n plt.tight_layout(0)\n plt.savefig('results/p2c.png')\n\n\ndef d(X, y):\n clf = Perceptron(learning_rate=.1, n_epochs=200, random_state=1)\n\n clf.fit(X, y)\n s = clf.predict(X)\n print(y)\n print(s)\n print('score: %.2f' % accuracy_score(y, s))\n print('epochs needed: %i' % clf.n_epochs_)\n\n fig = plt.figure()\n ax = fig.add_subplot(121)\n ax.plot(range(len(clf.loss_)), clf.loss_)\n\n ax = fig.add_subplot(122, projection='3d')\n\n xx, yy = np.meshgrid(range(10), range(10))\n\n normal = clf.W_\n z = (-normal[0] * xx - normal[1] * yy - clf.b_) * 1. / normal[2]\n ax.plot_surface(xx, yy, z, alpha=0.2, color='green')\n\n ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y)\n\n plt.grid()\n plt.tight_layout(0)\n plt.savefig('results/p2d.png')\n\n\ndef main():\n print(__doc__)\n\n data = loadmat('./data/dados1.mat')\n X, y = data['x'], data['desejado'].flatten()\n # y[y == -1] = 0\n\n d(X, y)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.tight_layout", "sklearn.metrics.accuracy_score", "scipy.io.loadmat", "matplotlib.pyplot.savefig", "matplotlib.pyplot.grid", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
MaxVanDeursen/pandas
[ "9821b77de692716d7c2b62db1a68cac9ffc456c3", "9821b77de692716d7c2b62db1a68cac9ffc456c3" ]
[ "pandas/core/arrays/base.py", "pandas/tests/groupby/test_transform.py" ]
[ "\"\"\"An interface for extending pandas with custom arrays.\n\n.. warning::\n\n This is an experimental API and subject to breaking changes\n without warning.\n\"\"\"\nimport operator\nfrom typing import Any, Callable, Optional, Sequence, Tuple, Union\n\nimport numpy as np\n\nfrom pandas.compat import PY3, set_function_name\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import Appender, Substitution\n\nfrom pandas.core.dtypes.common import is_list_like\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.generic import ABCIndexClass, ABCSeries\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core import ops\n\n_not_implemented_message = \"{} does not implement {}.\"\n\n_extension_array_shared_docs = dict()\n\n\nclass ExtensionArray(object):\n \"\"\"\n Abstract base class for custom 1-D array types.\n\n pandas will recognize instances of this class as proper arrays\n with a custom type and will not attempt to coerce them to objects. They\n may be stored directly inside a :class:`DataFrame` or :class:`Series`.\n\n .. versionadded:: 0.23.0\n\n Notes\n -----\n The interface includes the following abstract methods that must be\n implemented by subclasses:\n\n * _from_sequence\n * _from_factorized\n * __getitem__\n * __len__\n * dtype\n * nbytes\n * isna\n * take\n * copy\n * _concat_same_type\n\n A default repr displaying the type, (truncated) data, length,\n and dtype is provided. It can be customized or replaced by\n by overriding:\n\n * __repr__ : A default repr for the ExtensionArray.\n * _formatter : Print scalars inside a Series or DataFrame.\n\n Some methods require casting the ExtensionArray to an ndarray of Python\n objects with ``self.astype(object)``, which may be expensive. When\n performance is a concern, we highly recommend overriding the following\n methods:\n\n * fillna\n * dropna\n * unique\n * factorize / _values_for_factorize\n * argsort / _values_for_argsort\n * searchsorted\n\n The remaining methods implemented on this class should be performant,\n as they only compose abstract methods. Still, a more efficient\n implementation may be available, and these methods can be overridden.\n\n One can implement methods to handle array reductions.\n\n * _reduce\n\n One can implement methods to handle parsing from strings that will be used\n in methods such as ``pandas.io.parsers.read_csv``.\n\n * _from_sequence_of_strings\n\n This class does not inherit from 'abc.ABCMeta' for performance reasons.\n Methods and properties required by the interface raise\n ``pandas.errors.AbstractMethodError`` and no ``register`` method is\n provided for registering virtual subclasses.\n\n ExtensionArrays are limited to 1 dimension.\n\n They may be backed by none, one, or many NumPy arrays. For example,\n ``pandas.Categorical`` is an extension array backed by two arrays,\n one for codes and one for categories. An array of IPv6 address may\n be backed by a NumPy structured array with two fields, one for the\n lower 64 bits and one for the upper 64 bits. Or they may be backed\n by some other storage type, like Python lists. Pandas makes no\n assumptions on how the data are stored, just that it can be converted\n to a NumPy array.\n The ExtensionArray interface does not impose any rules on how this data\n is stored. However, currently, the backing data cannot be stored in\n attributes called ``.values`` or ``._values`` to ensure full compatibility\n with pandas internals. But other names as ``.data``, ``._data``,\n ``._items``, ... can be freely used.\n \"\"\"\n # '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.\n # Don't override this.\n _typ = 'extension'\n\n # ------------------------------------------------------------------------\n # Constructors\n # ------------------------------------------------------------------------\n @classmethod\n def _from_sequence(cls, scalars, dtype=None, copy=False):\n \"\"\"\n Construct a new ExtensionArray from a sequence of scalars.\n\n Parameters\n ----------\n scalars : Sequence\n Each element will be an instance of the scalar type for this\n array, ``cls.dtype.type``.\n dtype : dtype, optional\n Construct for this particular dtype. This should be a Dtype\n compatible with the ExtensionArray.\n copy : boolean, default False\n If True, copy the underlying data.\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n raise AbstractMethodError(cls)\n\n @classmethod\n def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):\n \"\"\"Construct a new ExtensionArray from a sequence of strings.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n strings : Sequence\n Each element will be an instance of the scalar type for this\n array, ``cls.dtype.type``.\n dtype : dtype, optional\n Construct for this particular dtype. This should be a Dtype\n compatible with the ExtensionArray.\n copy : boolean, default False\n If True, copy the underlying data.\n\n Returns\n -------\n ExtensionArray\n\n \"\"\"\n raise AbstractMethodError(cls)\n\n @classmethod\n def _from_factorized(cls, values, original):\n \"\"\"\n Reconstruct an ExtensionArray after factorization.\n\n Parameters\n ----------\n values : ndarray\n An integer ndarray with the factorized values.\n original : ExtensionArray\n The original ExtensionArray that factorize was called on.\n\n See Also\n --------\n pandas.factorize\n ExtensionArray.factorize\n \"\"\"\n raise AbstractMethodError(cls)\n\n # ------------------------------------------------------------------------\n # Must be a Sequence\n # ------------------------------------------------------------------------\n\n def __getitem__(self, item):\n # type (Any) -> Any\n \"\"\"\n Select a subset of self.\n\n Parameters\n ----------\n item : int, slice, or ndarray\n * int: The position in 'self' to get.\n\n * slice: A slice object, where 'start', 'stop', and 'step' are\n integers or None\n\n * ndarray: A 1-d boolean NumPy ndarray the same length as 'self'\n\n Returns\n -------\n item : scalar or ExtensionArray\n\n Notes\n -----\n For scalar ``item``, return a scalar value suitable for the array's\n type. This should be an instance of ``self.dtype.type``.\n\n For slice ``key``, return an instance of ``ExtensionArray``, even\n if the slice is length 0 or 1.\n\n For a boolean mask, return an instance of ``ExtensionArray``, filtered\n to the values where ``item`` is True.\n \"\"\"\n raise AbstractMethodError(self)\n\n def __setitem__(self, key, value):\n # type: (Union[int, np.ndarray], Any) -> None\n \"\"\"\n Set one or more values inplace.\n\n This method is not required to satisfy the pandas extension array\n interface.\n\n Parameters\n ----------\n key : int, ndarray, or slice\n When called from, e.g. ``Series.__setitem__``, ``key`` will be\n one of\n\n * scalar int\n * ndarray of integers.\n * boolean ndarray\n * slice object\n\n value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object\n value or values to be set of ``key``.\n\n Returns\n -------\n None\n \"\"\"\n # Some notes to the ExtensionArray implementor who may have ended up\n # here. While this method is not required for the interface, if you\n # *do* choose to implement __setitem__, then some semantics should be\n # observed:\n #\n # * Setting multiple values : ExtensionArrays should support setting\n # multiple values at once, 'key' will be a sequence of integers and\n # 'value' will be a same-length sequence.\n #\n # * Broadcasting : For a sequence 'key' and a scalar 'value',\n # each position in 'key' should be set to 'value'.\n #\n # * Coercion : Most users will expect basic coercion to work. For\n # example, a string like '2018-01-01' is coerced to a datetime\n # when setting on a datetime64ns array. In general, if the\n # __init__ method coerces that value, then so should __setitem__\n # Note, also, that Series/DataFrame.where internally use __setitem__\n # on a copy of the data.\n raise NotImplementedError(_not_implemented_message.format(\n type(self), '__setitem__')\n )\n\n def __len__(self):\n # type: () -> int\n \"\"\"\n Length of this array\n\n Returns\n -------\n length : int\n \"\"\"\n raise AbstractMethodError(self)\n\n def __iter__(self):\n \"\"\"\n Iterate over elements of the array.\n \"\"\"\n # This needs to be implemented so that pandas recognizes extension\n # arrays as list-like. The default implementation makes successive\n # calls to ``__getitem__``, which may be slower than necessary.\n for i in range(len(self)):\n yield self[i]\n\n # ------------------------------------------------------------------------\n # Required attributes\n # ------------------------------------------------------------------------\n @property\n def dtype(self):\n # type: () -> ExtensionDtype\n \"\"\"\n An instance of 'ExtensionDtype'.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def shape(self):\n # type: () -> Tuple[int, ...]\n \"\"\"\n Return a tuple of the array dimensions.\n \"\"\"\n return (len(self),)\n\n @property\n def ndim(self):\n # type: () -> int\n \"\"\"\n Extension Arrays are only allowed to be 1-dimensional.\n \"\"\"\n return 1\n\n @property\n def nbytes(self):\n # type: () -> int\n \"\"\"\n The number of bytes needed to store this object in memory.\n \"\"\"\n # If this is expensive to compute, return an approximate lower bound\n # on the number of bytes needed.\n raise AbstractMethodError(self)\n\n # ------------------------------------------------------------------------\n # Additional Methods\n # ------------------------------------------------------------------------\n def astype(self, dtype, copy=True):\n \"\"\"\n Cast to a NumPy array with 'dtype'.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n copy : bool, default True\n Whether to copy the data, even if not necessary. If False,\n a copy is made only if the old dtype does not match the\n new dtype.\n\n Returns\n -------\n array : ndarray\n NumPy ndarray with 'dtype' for its dtype.\n \"\"\"\n return np.array(self, dtype=dtype, copy=copy)\n\n def isna(self):\n # type: () -> Union[ExtensionArray, np.ndarray]\n \"\"\"\n A 1-D array indicating if each value is missing.\n\n Returns\n -------\n na_values : Union[np.ndarray, ExtensionArray]\n In most cases, this should return a NumPy ndarray. For\n exceptional cases like ``SparseArray``, where returning\n an ndarray would be expensive, an ExtensionArray may be\n returned.\n\n Notes\n -----\n If returning an ExtensionArray, then\n\n * ``na_values._is_boolean`` should be True\n * `na_values` should implement :func:`ExtensionArray._reduce`\n * ``na_values.any`` and ``na_values.all`` should be implemented\n \"\"\"\n raise AbstractMethodError(self)\n\n def _values_for_argsort(self):\n # type: () -> np.ndarray\n \"\"\"\n Return values for sorting.\n\n Returns\n -------\n ndarray\n The transformed values should maintain the ordering between values\n within the array.\n\n See Also\n --------\n ExtensionArray.argsort\n \"\"\"\n # Note: this is used in `ExtensionArray.argsort`.\n return np.array(self)\n\n def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):\n \"\"\"\n Return the indices that would sort this array.\n\n Parameters\n ----------\n ascending : bool, default True\n Whether the indices should result in an ascending\n or descending sort.\n kind : {'quicksort', 'mergesort', 'heapsort'}, optional\n Sorting algorithm.\n *args, **kwargs:\n passed through to :func:`numpy.argsort`.\n\n Returns\n -------\n index_array : ndarray\n Array of indices that sort ``self``.\n\n See Also\n --------\n numpy.argsort : Sorting implementation used internally.\n \"\"\"\n # Implementor note: You have two places to override the behavior of\n # argsort.\n # 1. _values_for_argsort : construct the values passed to np.argsort\n # 2. argsort : total control over sorting.\n ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)\n values = self._values_for_argsort()\n result = np.argsort(values, kind=kind, **kwargs)\n if not ascending:\n result = result[::-1]\n return result\n\n def fillna(self, value=None, method=None, limit=None):\n \"\"\"\n Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, array-like\n If a scalar value is passed it is used to fill all missing values.\n Alternatively, an array-like 'value' can be given. It's expected\n that the array-like have the same length as 'self'.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use NEXT valid observation to fill gap\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled.\n\n Returns\n -------\n filled : ExtensionArray with NA/NaN filled\n \"\"\"\n from pandas.api.types import is_array_like\n from pandas.util._validators import validate_fillna_kwargs\n from pandas.core.missing import pad_1d, backfill_1d\n\n value, method = validate_fillna_kwargs(value, method)\n\n mask = self.isna()\n\n if is_array_like(value):\n if len(value) != len(self):\n raise ValueError(\"Length of 'value' does not match. Got ({}) \"\n \" expected {}\".format(len(value), len(self)))\n value = value[mask]\n\n if mask.any():\n if method is not None:\n func = pad_1d if method == 'pad' else backfill_1d\n new_values = func(self.astype(object), limit=limit,\n mask=mask)\n new_values = self._from_sequence(new_values, dtype=self.dtype)\n else:\n # fill with value\n new_values = self.copy()\n new_values[mask] = value\n else:\n new_values = self.copy()\n return new_values\n\n def dropna(self):\n \"\"\"\n Return ExtensionArray without NA values\n\n Returns\n -------\n valid : ExtensionArray\n \"\"\"\n return self[~self.isna()]\n\n def shift(self, periods=1, fill_value=None):\n # type: (int, object) -> ExtensionArray\n \"\"\"\n Shift values by desired number.\n\n Newly introduced missing values are filled with\n ``self.dtype.na_value``.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n periods : int, default 1\n The number of periods to shift. Negative values are allowed\n for shifting backwards.\n\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n The default is ``self.dtype.na_value``\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n shifted : ExtensionArray\n\n Notes\n -----\n If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is\n returned.\n\n If ``periods > len(self)``, then an array of size\n len(self) is returned, with all values filled with\n ``self.dtype.na_value``.\n \"\"\"\n # Note: this implementation assumes that `self.dtype.na_value` can be\n # stored in an instance of your ExtensionArray with `self.dtype`.\n if not len(self) or periods == 0:\n return self.copy()\n\n if isna(fill_value):\n fill_value = self.dtype.na_value\n\n empty = self._from_sequence(\n [fill_value] * min(abs(periods), len(self)),\n dtype=self.dtype\n )\n if periods > 0:\n a = empty\n b = self[:-periods]\n else:\n a = self[abs(periods):]\n b = empty\n return self._concat_same_type([a, b])\n\n def unique(self):\n \"\"\"\n Compute the ExtensionArray of unique values.\n\n Returns\n -------\n uniques : ExtensionArray\n \"\"\"\n from pandas import unique\n\n uniques = unique(self.astype(object))\n return self._from_sequence(uniques, dtype=self.dtype)\n\n def searchsorted(self, value, side=\"left\", sorter=None):\n \"\"\"\n Find indices where elements should be inserted to maintain order.\n\n .. versionadded:: 0.24.0\n\n Find the indices into a sorted array `self` (a) such that, if the\n corresponding elements in `value` were inserted before the indices,\n the order of `self` would be preserved.\n\n Assuming that `self` is sorted:\n\n ====== ================================\n `side` returned index `i` satisfies\n ====== ================================\n left ``self[i-1] < value <= self[i]``\n right ``self[i-1] <= value < self[i]``\n ====== ================================\n\n Parameters\n ----------\n value : array_like\n Values to insert into `self`.\n side : {'left', 'right'}, optional\n If 'left', the index of the first suitable location found is given.\n If 'right', return the last such index. If there is no suitable\n index, return either 0 or N (where N is the length of `self`).\n sorter : 1-D array_like, optional\n Optional array of integer indices that sort array a into ascending\n order. They are typically the result of argsort.\n\n Returns\n -------\n array of ints\n Array of insertion points with the same shape as `value`.\n\n See Also\n --------\n numpy.searchsorted : Similar method from NumPy.\n \"\"\"\n # Note: the base tests provided by pandas only test the basics.\n # We do not test\n # 1. Values outside the range of the `data_for_sorting` fixture\n # 2. Values between the values in the `data_for_sorting` fixture\n # 3. Missing values.\n arr = self.astype(object)\n return arr.searchsorted(value, side=side, sorter=sorter)\n\n def _values_for_factorize(self):\n # type: () -> Tuple[np.ndarray, Any]\n \"\"\"\n Return an array and missing value suitable for factorization.\n\n Returns\n -------\n values : ndarray\n\n An array suitable for factorization. This should maintain order\n and be a supported dtype (Float64, Int64, UInt64, String, Object).\n By default, the extension array is cast to object dtype.\n na_value : object\n The value in `values` to consider missing. This will be treated\n as NA in the factorization routines, so it will be coded as\n `na_sentinal` and not included in `uniques`. By default,\n ``np.nan`` is used.\n\n Notes\n -----\n The values returned by this method are also used in\n :func:`pandas.util.hash_pandas_object`.\n \"\"\"\n return self.astype(object), np.nan\n\n def factorize(self, na_sentinel=-1):\n # type: (int) -> Tuple[np.ndarray, ExtensionArray]\n \"\"\"\n Encode the extension array as an enumerated type.\n\n Parameters\n ----------\n na_sentinel : int, default -1\n Value to use in the `labels` array to indicate missing values.\n\n Returns\n -------\n labels : ndarray\n An integer NumPy array that's an indexer into the original\n ExtensionArray.\n uniques : ExtensionArray\n An ExtensionArray containing the unique values of `self`.\n\n .. note::\n\n uniques will *not* contain an entry for the NA value of\n the ExtensionArray if there are any missing values present\n in `self`.\n\n See Also\n --------\n pandas.factorize : Top-level factorize method that dispatches here.\n\n Notes\n -----\n :meth:`pandas.factorize` offers a `sort` keyword as well.\n \"\"\"\n # Impelmentor note: There are two ways to override the behavior of\n # pandas.factorize\n # 1. _values_for_factorize and _from_factorize.\n # Specify the values passed to pandas' internal factorization\n # routines, and how to convert from those values back to the\n # original ExtensionArray.\n # 2. ExtensionArray.factorize.\n # Complete control over factorization.\n from pandas.core.algorithms import _factorize_array\n\n arr, na_value = self._values_for_factorize()\n\n labels, uniques = _factorize_array(arr, na_sentinel=na_sentinel,\n na_value=na_value)\n\n uniques = self._from_factorized(uniques, self)\n return labels, uniques\n\n _extension_array_shared_docs['repeat'] = \"\"\"\n Repeat elements of a %(klass)s.\n\n Returns a new %(klass)s where each element of the current %(klass)s\n is repeated consecutively a given number of times.\n\n Parameters\n ----------\n repeats : int or array of ints\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n %(klass)s.\n axis : None\n Must be ``None``. Has no effect but is accepted for compatibility\n with numpy.\n\n Returns\n -------\n repeated_array : %(klass)s\n Newly created %(klass)s with repeated elements.\n\n See Also\n --------\n Series.repeat : Equivalent function for Series.\n Index.repeat : Equivalent function for Index.\n numpy.repeat : Similar method for :class:`numpy.ndarray`.\n ExtensionArray.take : Take arbitrary positions.\n\n Examples\n --------\n >>> cat = pd.Categorical(['a', 'b', 'c'])\n >>> cat\n [a, b, c]\n Categories (3, object): [a, b, c]\n >>> cat.repeat(2)\n [a, a, b, b, c, c]\n Categories (3, object): [a, b, c]\n >>> cat.repeat([1, 2, 3])\n [a, b, b, c, c, c]\n Categories (3, object): [a, b, c]\n \"\"\"\n\n @Substitution(klass='ExtensionArray')\n @Appender(_extension_array_shared_docs['repeat'])\n def repeat(self, repeats, axis=None):\n nv.validate_repeat(tuple(), dict(axis=axis))\n ind = np.arange(len(self)).repeat(repeats)\n return self.take(ind)\n\n # ------------------------------------------------------------------------\n # Indexing methods\n # ------------------------------------------------------------------------\n\n def take(self, indices, allow_fill=False, fill_value=None):\n # type: (Sequence[int], bool, Optional[Any]) -> ExtensionArray\n \"\"\"\n Take elements from an array.\n\n Parameters\n ----------\n indices : sequence of integers\n Indices to be taken.\n allow_fill : bool, default False\n How to handle negative values in `indices`.\n\n * False: negative values in `indices` indicate positional indices\n from the right (the default). This is similar to\n :func:`numpy.take`.\n\n * True: negative values in `indices` indicate\n missing values. These values are set to `fill_value`. Any other\n other negative values raise a ``ValueError``.\n\n fill_value : any, optional\n Fill value to use for NA-indices when `allow_fill` is True.\n This may be ``None``, in which case the default NA value for\n the type, ``self.dtype.na_value``, is used.\n\n For many ExtensionArrays, there will be two representations of\n `fill_value`: a user-facing \"boxed\" scalar, and a low-level\n physical NA value. `fill_value` should be the user-facing version,\n and the implementation should handle translating that to the\n physical version for processing the take if necessary.\n\n Returns\n -------\n ExtensionArray\n\n Raises\n ------\n IndexError\n When the indices are out of bounds for the array.\n ValueError\n When `indices` contains negative values other than ``-1``\n and `allow_fill` is True.\n\n Notes\n -----\n ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,\n ``iloc``, when `indices` is a sequence of values. Additionally,\n it's called by :meth:`Series.reindex`, or any other method\n that causes realignment, with a `fill_value`.\n\n See Also\n --------\n numpy.take\n pandas.api.extensions.take\n\n Examples\n --------\n Here's an example implementation, which relies on casting the\n extension array to object dtype. This uses the helper method\n :func:`pandas.api.extensions.take`.\n\n .. code-block:: python\n\n def take(self, indices, allow_fill=False, fill_value=None):\n from pandas.core.algorithms import take\n\n # If the ExtensionArray is backed by an ndarray, then\n # just pass that here instead of coercing to object.\n data = self.astype(object)\n\n if allow_fill and fill_value is None:\n fill_value = self.dtype.na_value\n\n # fill value should always be translated from the scalar\n # type for the array, to the physical storage type for\n # the data, before passing to take.\n\n result = take(data, indices, fill_value=fill_value,\n allow_fill=allow_fill)\n return self._from_sequence(result, dtype=self.dtype)\n \"\"\"\n # Implementer note: The `fill_value` parameter should be a user-facing\n # value, an instance of self.dtype.type. When passed `fill_value=None`,\n # the default of `self.dtype.na_value` should be used.\n # This may differ from the physical storage type your ExtensionArray\n # uses. In this case, your implementation is responsible for casting\n # the user-facing type to the storage type, before using\n # pandas.api.extensions.take\n raise AbstractMethodError(self)\n\n def copy(self, deep=False):\n # type: (bool) -> ExtensionArray\n \"\"\"\n Return a copy of the array.\n\n Parameters\n ----------\n deep : bool, default False\n Also copy the underlying data backing this array.\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n raise AbstractMethodError(self)\n\n # ------------------------------------------------------------------------\n # Printing\n # ------------------------------------------------------------------------\n def __repr__(self):\n from pandas.io.formats.printing import format_object_summary\n\n template = (\n u'{class_name}'\n u'{data}\\n'\n u'Length: {length}, dtype: {dtype}'\n )\n # the short repr has no trailing newline, while the truncated\n # repr does. So we include a newline in our template, and strip\n # any trailing newlines from format_object_summary\n data = format_object_summary(self, self._formatter(),\n indent_for_name=False).rstrip(', \\n')\n class_name = u'<{}>\\n'.format(self.__class__.__name__)\n return template.format(class_name=class_name, data=data,\n length=len(self),\n dtype=self.dtype)\n\n def _formatter(self, boxed=False):\n # type: (bool) -> Callable[[Any], Optional[str]]\n \"\"\"Formatting function for scalar values.\n\n This is used in the default '__repr__'. The returned formatting\n function receives instances of your scalar type.\n\n Parameters\n ----------\n boxed: bool, default False\n An indicated for whether or not your array is being printed\n within a Series, DataFrame, or Index (True), or just by\n itself (False). This may be useful if you want scalar values\n to appear differently within a Series versus on its own (e.g.\n quoted or not).\n\n Returns\n -------\n Callable[[Any], str]\n A callable that gets instances of the scalar type and\n returns a string. By default, :func:`repr` is used\n when ``boxed=False`` and :func:`str` is used when\n ``boxed=True``.\n \"\"\"\n if boxed:\n return str\n return repr\n\n def _formatting_values(self):\n # type: () -> np.ndarray\n # At the moment, this has to be an array since we use result.dtype\n \"\"\"\n An array of values to be printed in, e.g. the Series repr\n\n .. deprecated:: 0.24.0\n\n Use :meth:`ExtensionArray._formatter` instead.\n \"\"\"\n return np.array(self)\n\n # ------------------------------------------------------------------------\n # Reshaping\n # ------------------------------------------------------------------------\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n # type: (Sequence[ExtensionArray]) -> ExtensionArray\n \"\"\"\n Concatenate multiple array\n\n Parameters\n ----------\n to_concat : sequence of this type\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n raise AbstractMethodError(cls)\n\n # The _can_hold_na attribute is set to True so that pandas internals\n # will use the ExtensionDtype.na_value as the NA value in operations\n # such as take(), reindex(), shift(), etc. In addition, those results\n # will then be of the ExtensionArray subclass rather than an array\n # of objects\n _can_hold_na = True\n\n @property\n def _ndarray_values(self):\n # type: () -> np.ndarray\n \"\"\"\n Internal pandas method for lossy conversion to a NumPy ndarray.\n\n This method is not part of the pandas interface.\n\n The expectation is that this is cheap to compute, and is primarily\n used for interacting with our indexers.\n \"\"\"\n return np.array(self)\n\n def _reduce(self, name, skipna=True, **kwargs):\n \"\"\"\n Return a scalar result of performing the reduction operation.\n\n Parameters\n ----------\n name : str\n Name of the function, supported values are:\n { any, all, min, max, sum, mean, median, prod,\n std, var, sem, kurt, skew }.\n skipna : bool, default True\n If True, skip NaN values.\n **kwargs\n Additional keyword arguments passed to the reduction function.\n Currently, `ddof` is the only supported kwarg.\n\n Returns\n -------\n scalar\n\n Raises\n ------\n TypeError : subclass does not define reductions\n \"\"\"\n raise TypeError(\"cannot perform {name} with type {dtype}\".format(\n name=name, dtype=self.dtype))\n\n\nclass ExtensionOpsMixin(object):\n \"\"\"\n A base class for linking the operators to their dunder names.\n\n .. note::\n\n You may want to set ``__array_priority__`` if you want your\n implementation to be called when involved in binary operations\n with NumPy arrays.\n \"\"\"\n\n @classmethod\n def _add_arithmetic_ops(cls):\n cls.__add__ = cls._create_arithmetic_method(operator.add)\n cls.__radd__ = cls._create_arithmetic_method(ops.radd)\n cls.__sub__ = cls._create_arithmetic_method(operator.sub)\n cls.__rsub__ = cls._create_arithmetic_method(ops.rsub)\n cls.__mul__ = cls._create_arithmetic_method(operator.mul)\n cls.__rmul__ = cls._create_arithmetic_method(ops.rmul)\n cls.__pow__ = cls._create_arithmetic_method(operator.pow)\n cls.__rpow__ = cls._create_arithmetic_method(ops.rpow)\n cls.__mod__ = cls._create_arithmetic_method(operator.mod)\n cls.__rmod__ = cls._create_arithmetic_method(ops.rmod)\n cls.__floordiv__ = cls._create_arithmetic_method(operator.floordiv)\n cls.__rfloordiv__ = cls._create_arithmetic_method(ops.rfloordiv)\n cls.__truediv__ = cls._create_arithmetic_method(operator.truediv)\n cls.__rtruediv__ = cls._create_arithmetic_method(ops.rtruediv)\n if not PY3:\n cls.__div__ = cls._create_arithmetic_method(operator.div)\n cls.__rdiv__ = cls._create_arithmetic_method(ops.rdiv)\n\n cls.__divmod__ = cls._create_arithmetic_method(divmod)\n cls.__rdivmod__ = cls._create_arithmetic_method(ops.rdivmod)\n\n @classmethod\n def _add_comparison_ops(cls):\n cls.__eq__ = cls._create_comparison_method(operator.eq)\n cls.__ne__ = cls._create_comparison_method(operator.ne)\n cls.__lt__ = cls._create_comparison_method(operator.lt)\n cls.__gt__ = cls._create_comparison_method(operator.gt)\n cls.__le__ = cls._create_comparison_method(operator.le)\n cls.__ge__ = cls._create_comparison_method(operator.ge)\n\n\nclass ExtensionScalarOpsMixin(ExtensionOpsMixin):\n \"\"\"\n A mixin for defining ops on an ExtensionArray.\n\n It is assumed that the underlying scalar objects have the operators\n already defined.\n\n Notes\n -----\n If you have defined a subclass MyExtensionArray(ExtensionArray), then\n use MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin) to\n get the arithmetic operators. After the definition of MyExtensionArray,\n insert the lines\n\n MyExtensionArray._add_arithmetic_ops()\n MyExtensionArray._add_comparison_ops()\n\n to link the operators to your class.\n\n .. note::\n\n You may want to set ``__array_priority__`` if you want your\n implementation to be called when involved in binary operations\n with NumPy arrays.\n \"\"\"\n\n @classmethod\n def _create_method(cls, op, coerce_to_dtype=True):\n \"\"\"\n A class method that returns a method that will correspond to an\n operator for an ExtensionArray subclass, by dispatching to the\n relevant operator defined on the individual elements of the\n ExtensionArray.\n\n Parameters\n ----------\n op : function\n An operator that takes arguments op(a, b)\n coerce_to_dtype : bool, default True\n boolean indicating whether to attempt to convert\n the result to the underlying ExtensionArray dtype.\n If it's not possible to create a new ExtensionArray with the\n values, an ndarray is returned instead.\n\n Returns\n -------\n Callable[[Any, Any], Union[ndarray, ExtensionArray]]\n A method that can be bound to a class. When used, the method\n receives the two arguments, one of which is the instance of\n this class, and should return an ExtensionArray or an ndarray.\n\n Returning an ndarray may be necessary when the result of the\n `op` cannot be stored in the ExtensionArray. The dtype of the\n ndarray uses NumPy's normal inference rules.\n\n Example\n -------\n Given an ExtensionArray subclass called MyExtensionArray, use\n\n >>> __add__ = cls._create_method(operator.add)\n\n in the class definition of MyExtensionArray to create the operator\n for addition, that will be based on the operator implementation\n of the underlying elements of the ExtensionArray\n \"\"\"\n\n def _binop(self, other):\n def convert_values(param):\n if isinstance(param, ExtensionArray) or is_list_like(param):\n ovalues = param\n else: # Assume its an object\n ovalues = [param] * len(self)\n return ovalues\n\n if isinstance(other, (ABCSeries, ABCIndexClass)):\n # rely on pandas to unbox and dispatch to us\n return NotImplemented\n\n lvalues = self\n rvalues = convert_values(other)\n\n # If the operator is not defined for the underlying objects,\n # a TypeError should be raised\n res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]\n\n def _maybe_convert(arr):\n if coerce_to_dtype:\n # https://github.com/pandas-dev/pandas/issues/22850\n # We catch all regular exceptions here, and fall back\n # to an ndarray.\n try:\n res = self._from_sequence(arr)\n except Exception:\n res = np.asarray(arr)\n else:\n res = np.asarray(arr)\n return res\n\n if op.__name__ in {'divmod', 'rdivmod'}:\n a, b = zip(*res)\n res = _maybe_convert(a), _maybe_convert(b)\n else:\n res = _maybe_convert(res)\n return res\n\n op_name = ops._get_op_name(op, True)\n return set_function_name(_binop, op_name, cls)\n\n @classmethod\n def _create_arithmetic_method(cls, op):\n return cls._create_method(op)\n\n @classmethod\n def _create_comparison_method(cls, op):\n return cls._create_method(op, coerce_to_dtype=False)\n", "\"\"\" test with the .transform \"\"\"\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import groupby\nfrom pandas.compat import StringIO\n\nfrom pandas.core.dtypes.common import ensure_platform_int, is_timedelta64_dtype\n\nimport pandas as pd\nfrom pandas import DataFrame, MultiIndex, Series, Timestamp, concat, date_range\nfrom pandas.core.groupby.groupby import DataError\nfrom pandas.util import testing as tm\nfrom pandas.util.testing import assert_frame_equal, assert_series_equal\n\n\ndef assert_fp_equal(a, b):\n assert (np.abs(a - b) < 1e-12).all()\n\n\ndef test_transform():\n data = Series(np.arange(9) // 3, index=np.arange(9))\n\n index = np.arange(9)\n np.random.shuffle(index)\n data = data.reindex(index)\n\n grouped = data.groupby(lambda x: x // 3)\n\n transformed = grouped.transform(lambda x: x * x.sum())\n assert transformed[7] == 12\n\n # GH 8046\n # make sure that we preserve the input order\n\n df = DataFrame(\n np.arange(6, dtype='int64').reshape(\n 3, 2), columns=[\"a\", \"b\"], index=[0, 2, 1])\n key = [0, 0, 1]\n expected = df.sort_index().groupby(key).transform(\n lambda x: x - x.mean()).groupby(key).mean()\n result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(\n key).mean()\n assert_frame_equal(result, expected)\n\n def demean(arr):\n return arr - arr.mean()\n\n people = DataFrame(np.random.randn(5, 5),\n columns=['a', 'b', 'c', 'd', 'e'],\n index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])\n key = ['one', 'two', 'one', 'two', 'one']\n result = people.groupby(key).transform(demean).groupby(key).mean()\n expected = people.groupby(key).apply(demean).groupby(key).mean()\n assert_frame_equal(result, expected)\n\n # GH 8430\n df = tm.makeTimeDataFrame()\n g = df.groupby(pd.Grouper(freq='M'))\n g.transform(lambda x: x - 1)\n\n # GH 9700\n df = DataFrame({'a': range(5, 10), 'b': range(5)})\n result = df.groupby('a').transform(max)\n expected = DataFrame({'b': range(5)})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_fast():\n\n df = DataFrame({'id': np.arange(100000) / 3,\n 'val': np.random.randn(100000)})\n\n grp = df.groupby('id')['val']\n\n values = np.repeat(grp.mean().values,\n ensure_platform_int(grp.count().values))\n expected = pd.Series(values, index=df.index, name='val')\n\n result = grp.transform(np.mean)\n assert_series_equal(result, expected)\n\n result = grp.transform('mean')\n assert_series_equal(result, expected)\n\n # GH 12737\n df = pd.DataFrame({'grouping': [0, 1, 1, 3], 'f': [1.1, 2.1, 3.1, 4.5],\n 'd': pd.date_range('2014-1-1', '2014-1-4'),\n 'i': [1, 2, 3, 4]},\n columns=['grouping', 'f', 'i', 'd'])\n result = df.groupby('grouping').transform('first')\n\n dates = [pd.Timestamp('2014-1-1'), pd.Timestamp('2014-1-2'),\n pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-4')]\n expected = pd.DataFrame({'f': [1.1, 2.1, 2.1, 4.5],\n 'd': dates,\n 'i': [1, 2, 2, 4]},\n columns=['f', 'i', 'd'])\n assert_frame_equal(result, expected)\n\n # selection\n result = df.groupby('grouping')[['f', 'i']].transform('first')\n expected = expected[['f', 'i']]\n assert_frame_equal(result, expected)\n\n # dup columns\n df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['g', 'a', 'a'])\n result = df.groupby('g').transform('first')\n expected = df.drop('g', axis=1)\n assert_frame_equal(result, expected)\n\n\ndef test_transform_broadcast(tsframe, ts):\n grouped = ts.groupby(lambda x: x.month)\n result = grouped.transform(np.mean)\n\n tm.assert_index_equal(result.index, ts.index)\n for _, gp in grouped:\n assert_fp_equal(result.reindex(gp.index), gp.mean())\n\n grouped = tsframe.groupby(lambda x: x.month)\n result = grouped.transform(np.mean)\n tm.assert_index_equal(result.index, tsframe.index)\n for _, gp in grouped:\n agged = gp.mean()\n res = result.reindex(gp.index)\n for col in tsframe:\n assert_fp_equal(res[col], agged[col])\n\n # group columns\n grouped = tsframe.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},\n axis=1)\n result = grouped.transform(np.mean)\n tm.assert_index_equal(result.index, tsframe.index)\n tm.assert_index_equal(result.columns, tsframe.columns)\n for _, gp in grouped:\n agged = gp.mean(1)\n res = result.reindex(columns=gp.columns)\n for idx in gp.index:\n assert_fp_equal(res.xs(idx), agged[idx])\n\n\ndef test_transform_axis(tsframe):\n\n # make sure that we are setting the axes\n # correctly when on axis=0 or 1\n # in the presence of a non-monotonic indexer\n # GH12713\n\n base = tsframe.iloc[0:5]\n r = len(base.index)\n c = len(base.columns)\n tso = DataFrame(np.random.randn(r, c),\n index=base.index,\n columns=base.columns,\n dtype='float64')\n # monotonic\n ts = tso\n grouped = ts.groupby(lambda x: x.weekday())\n result = ts - grouped.transform('mean')\n expected = grouped.apply(lambda x: x - x.mean())\n assert_frame_equal(result, expected)\n\n ts = ts.T\n grouped = ts.groupby(lambda x: x.weekday(), axis=1)\n result = ts - grouped.transform('mean')\n expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)\n assert_frame_equal(result, expected)\n\n # non-monotonic\n ts = tso.iloc[[1, 0] + list(range(2, len(base)))]\n grouped = ts.groupby(lambda x: x.weekday())\n result = ts - grouped.transform('mean')\n expected = grouped.apply(lambda x: x - x.mean())\n assert_frame_equal(result, expected)\n\n ts = ts.T\n grouped = ts.groupby(lambda x: x.weekday(), axis=1)\n result = ts - grouped.transform('mean')\n expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)\n assert_frame_equal(result, expected)\n\n\ndef test_transform_dtype():\n # GH 9807\n # Check transform dtype output is preserved\n df = DataFrame([[1, 3], [2, 3]])\n result = df.groupby(1).transform('mean')\n expected = DataFrame([[1.5], [1.5]])\n assert_frame_equal(result, expected)\n\n\ndef test_transform_bug():\n # GH 5712\n # transforming on a datetime column\n df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))\n result = df.groupby('A')['B'].transform(\n lambda x: x.rank(ascending=False))\n expected = Series(np.arange(5, 0, step=-1), name='B')\n assert_series_equal(result, expected)\n\n\ndef test_transform_numeric_to_boolean():\n # GH 16875\n # inconsistency in transforming boolean values\n expected = pd.Series([True, True], name='A')\n\n df = pd.DataFrame({'A': [1.1, 2.2], 'B': [1, 2]})\n result = df.groupby('B').A.transform(lambda x: True)\n assert_series_equal(result, expected)\n\n df = pd.DataFrame({'A': [1, 2], 'B': [1, 2]})\n result = df.groupby('B').A.transform(lambda x: True)\n assert_series_equal(result, expected)\n\n\ndef test_transform_datetime_to_timedelta():\n # GH 15429\n # transforming a datetime to timedelta\n df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))\n expected = pd.Series([\n Timestamp('20130101') - Timestamp('20130101')] * 5, name='A')\n\n # this does date math without changing result type in transform\n base_time = df['A'][0]\n result = df.groupby('A')['A'].transform(\n lambda x: x.max() - x.min() + base_time) - base_time\n assert_series_equal(result, expected)\n\n # this does date math and causes the transform to return timedelta\n result = df.groupby('A')['A'].transform(lambda x: x.max() - x.min())\n assert_series_equal(result, expected)\n\n\ndef test_transform_datetime_to_numeric():\n # GH 10972\n # convert dt to float\n df = DataFrame({\n 'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})\n result = df.groupby('a').b.transform(\n lambda x: x.dt.dayofweek - x.dt.dayofweek.mean())\n\n expected = Series([-0.5, 0.5], name='b')\n assert_series_equal(result, expected)\n\n # convert dt to int\n df = DataFrame({\n 'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})\n result = df.groupby('a').b.transform(\n lambda x: x.dt.dayofweek - x.dt.dayofweek.min())\n\n expected = Series([0, 1], name='b')\n assert_series_equal(result, expected)\n\n\ndef test_transform_casting():\n # 13046\n data = \"\"\"\n idx A ID3 DATETIME\n 0 B-028 b76cd912ff \"2014-10-08 13:43:27\"\n 1 B-054 4a57ed0b02 \"2014-10-08 14:26:19\"\n 2 B-076 1a682034f8 \"2014-10-08 14:29:01\"\n 3 B-023 b76cd912ff \"2014-10-08 18:39:34\"\n 4 B-023 f88g8d7sds \"2014-10-08 18:40:18\"\n 5 B-033 b76cd912ff \"2014-10-08 18:44:30\"\n 6 B-032 b76cd912ff \"2014-10-08 18:46:00\"\n 7 B-037 b76cd912ff \"2014-10-08 18:52:15\"\n 8 B-046 db959faf02 \"2014-10-08 18:59:59\"\n 9 B-053 b76cd912ff \"2014-10-08 19:17:48\"\n 10 B-065 b76cd912ff \"2014-10-08 19:21:38\"\n \"\"\"\n df = pd.read_csv(StringIO(data), sep=r'\\s+',\n index_col=[0], parse_dates=['DATETIME'])\n\n result = df.groupby('ID3')['DATETIME'].transform(lambda x: x.diff())\n assert is_timedelta64_dtype(result.dtype)\n\n result = df[['ID3', 'DATETIME']].groupby('ID3').transform(\n lambda x: x.diff())\n assert is_timedelta64_dtype(result.DATETIME.dtype)\n\n\ndef test_transform_multiple(ts):\n grouped = ts.groupby([lambda x: x.year, lambda x: x.month])\n\n grouped.transform(lambda x: x * 2)\n grouped.transform(np.mean)\n\n\ndef test_dispatch_transform(tsframe):\n df = tsframe[::5].reindex(tsframe.index)\n\n grouped = df.groupby(lambda x: x.month)\n\n filled = grouped.fillna(method='pad')\n fillit = lambda x: x.fillna(method='pad')\n expected = df.groupby(lambda x: x.month).transform(fillit)\n assert_frame_equal(filled, expected)\n\n\ndef test_transform_select_columns(df):\n f = lambda x: x.mean()\n result = df.groupby('A')['C', 'D'].transform(f)\n\n selection = df[['C', 'D']]\n expected = selection.groupby(df['A']).transform(f)\n\n assert_frame_equal(result, expected)\n\n\ndef test_transform_exclude_nuisance(df):\n\n # this also tests orderings in transform between\n # series/frame to make sure it's consistent\n expected = {}\n grouped = df.groupby('A')\n expected['C'] = grouped['C'].transform(np.mean)\n expected['D'] = grouped['D'].transform(np.mean)\n expected = DataFrame(expected)\n result = df.groupby('A').transform(np.mean)\n\n assert_frame_equal(result, expected)\n\n\ndef test_transform_function_aliases(df):\n result = df.groupby('A').transform('mean')\n expected = df.groupby('A').transform(np.mean)\n assert_frame_equal(result, expected)\n\n result = df.groupby('A')['C'].transform('mean')\n expected = df.groupby('A')['C'].transform(np.mean)\n assert_series_equal(result, expected)\n\n\ndef test_series_fast_transform_date():\n # GH 13191\n df = pd.DataFrame({'grouping': [np.nan, 1, 1, 3],\n 'd': pd.date_range('2014-1-1', '2014-1-4')})\n result = df.groupby('grouping')['d'].transform('first')\n dates = [pd.NaT, pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-2'),\n pd.Timestamp('2014-1-4')]\n expected = pd.Series(dates, name='d')\n assert_series_equal(result, expected)\n\n\ndef test_transform_length():\n # GH 9697\n df = pd.DataFrame({'col1': [1, 1, 2, 2], 'col2': [1, 2, 3, np.nan]})\n expected = pd.Series([3.0] * 4)\n\n def nsum(x):\n return np.nansum(x)\n\n results = [df.groupby('col1').transform(sum)['col2'],\n df.groupby('col1')['col2'].transform(sum),\n df.groupby('col1').transform(nsum)['col2'],\n df.groupby('col1')['col2'].transform(nsum)]\n for result in results:\n assert_series_equal(result, expected, check_names=False)\n\n\ndef test_transform_coercion():\n\n # 14457\n # when we are transforming be sure to not coerce\n # via assignment\n df = pd.DataFrame(dict(A=['a', 'a'], B=[0, 1]))\n g = df.groupby('A')\n\n expected = g.transform(np.mean)\n result = g.transform(lambda x: np.mean(x))\n assert_frame_equal(result, expected)\n\n\ndef test_groupby_transform_with_int():\n\n # GH 3740, make sure that we might upcast on item-by-item transform\n\n # floats\n df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=Series(1, dtype='float64'),\n C=Series(\n [1, 2, 3, 1, 2, 3], dtype='float64'), D='foo'))\n with np.errstate(all='ignore'):\n result = df.groupby('A').transform(\n lambda x: (x - x.mean()) / x.std())\n expected = DataFrame(dict(B=np.nan, C=Series(\n [-1, 0, 1, -1, 0, 1], dtype='float64')))\n assert_frame_equal(result, expected)\n\n # int case\n df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1,\n C=[1, 2, 3, 1, 2, 3], D='foo'))\n with np.errstate(all='ignore'):\n result = df.groupby('A').transform(\n lambda x: (x - x.mean()) / x.std())\n expected = DataFrame(dict(B=np.nan, C=[-1, 0, 1, -1, 0, 1]))\n assert_frame_equal(result, expected)\n\n # int that needs float conversion\n s = Series([2, 3, 4, 10, 5, -1])\n df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1, C=s, D='foo'))\n with np.errstate(all='ignore'):\n result = df.groupby('A').transform(\n lambda x: (x - x.mean()) / x.std())\n\n s1 = s.iloc[0:3]\n s1 = (s1 - s1.mean()) / s1.std()\n s2 = s.iloc[3:6]\n s2 = (s2 - s2.mean()) / s2.std()\n expected = DataFrame(dict(B=np.nan, C=concat([s1, s2])))\n assert_frame_equal(result, expected)\n\n # int downcasting\n result = df.groupby('A').transform(lambda x: x * 2 / 2)\n expected = DataFrame(dict(B=1, C=[2, 3, 4, 10, 5, -1]))\n assert_frame_equal(result, expected)\n\n\ndef test_groupby_transform_with_nan_group():\n # GH 9941\n df = pd.DataFrame({'a': range(10),\n 'b': [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]})\n result = df.groupby(df.b)['a'].transform(max)\n expected = pd.Series([1., 1., 2., 3., np.nan, 6., 6., 9., 9., 9.],\n name='a')\n assert_series_equal(result, expected)\n\n\ndef test_transform_mixed_type():\n index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]\n ])\n df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],\n 'c': np.tile(['a', 'b', 'c'], 2),\n 'v': np.arange(1., 7.)}, index=index)\n\n def f(group):\n group['g'] = group['d'] * 2\n return group[:1]\n\n grouped = df.groupby('c')\n result = grouped.apply(f)\n\n assert result['d'].dtype == np.float64\n\n # this is by definition a mutating operation!\n with pd.option_context('mode.chained_assignment', None):\n for key, group in grouped:\n res = f(group)\n assert_frame_equal(res, result.loc[key])\n\n\ndef _check_cython_group_transform_cumulative(pd_op, np_op, dtype):\n \"\"\"\n Check a group transform that executes a cumulative function.\n\n Parameters\n ----------\n pd_op : callable\n The pandas cumulative function.\n np_op : callable\n The analogous one in NumPy.\n dtype : type\n The specified dtype of the data.\n \"\"\"\n\n is_datetimelike = False\n\n data = np.array([[1], [2], [3], [4]], dtype=dtype)\n ans = np.zeros_like(data)\n\n labels = np.array([0, 0, 0, 0], dtype=np.int64)\n pd_op(ans, data, labels, is_datetimelike)\n\n tm.assert_numpy_array_equal(np_op(data), ans[:, 0],\n check_dtype=False)\n\n\ndef test_cython_group_transform_cumsum(any_real_dtype):\n # see gh-4095\n dtype = np.dtype(any_real_dtype).type\n pd_op, np_op = groupby.group_cumsum, np.cumsum\n _check_cython_group_transform_cumulative(pd_op, np_op, dtype)\n\n\ndef test_cython_group_transform_cumprod():\n # see gh-4095\n dtype = np.float64\n pd_op, np_op = groupby.group_cumprod_float64, np.cumproduct\n _check_cython_group_transform_cumulative(pd_op, np_op, dtype)\n\n\ndef test_cython_group_transform_algos():\n # see gh-4095\n is_datetimelike = False\n\n # with nans\n labels = np.array([0, 0, 0, 0, 0], dtype=np.int64)\n\n data = np.array([[1], [2], [3], [np.nan], [4]], dtype='float64')\n actual = np.zeros_like(data)\n actual.fill(np.nan)\n groupby.group_cumprod_float64(actual, data, labels, is_datetimelike)\n expected = np.array([1, 2, 6, np.nan, 24], dtype='float64')\n tm.assert_numpy_array_equal(actual[:, 0], expected)\n\n actual = np.zeros_like(data)\n actual.fill(np.nan)\n groupby.group_cumsum(actual, data, labels, is_datetimelike)\n expected = np.array([1, 3, 6, np.nan, 10], dtype='float64')\n tm.assert_numpy_array_equal(actual[:, 0], expected)\n\n # timedelta\n is_datetimelike = True\n data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None]\n actual = np.zeros_like(data, dtype='int64')\n groupby.group_cumsum(actual, data.view('int64'), labels,\n is_datetimelike)\n expected = np.array([np.timedelta64(1, 'ns'), np.timedelta64(\n 2, 'ns'), np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'),\n np.timedelta64(5, 'ns')])\n tm.assert_numpy_array_equal(actual[:, 0].view('m8[ns]'), expected)\n\n\[email protected](\n \"op, args, targop\",\n [('cumprod', (), lambda x: x.cumprod()),\n ('cumsum', (), lambda x: x.cumsum()),\n ('shift', (-1, ), lambda x: x.shift(-1)),\n ('shift', (1, ), lambda x: x.shift())])\ndef test_cython_transform_series(op, args, targop):\n # GH 4095\n s = Series(np.random.randn(1000))\n s_missing = s.copy()\n s_missing.iloc[2:10] = np.nan\n labels = np.random.randint(0, 50, size=1000).astype(float)\n\n # series\n for data in [s, s_missing]:\n # print(data.head())\n expected = data.groupby(labels).transform(targop)\n\n tm.assert_series_equal(\n expected,\n data.groupby(labels).transform(op, *args))\n tm.assert_series_equal(expected, getattr(\n data.groupby(labels), op)(*args))\n\n\[email protected](\"op\", ['cumprod', 'cumsum'])\[email protected](\"skipna\", [False, True])\[email protected]('input, exp', [\n # When everything is NaN\n ({'key': ['b'] * 10, 'value': np.nan},\n pd.Series([np.nan] * 10, name='value')),\n # When there is a single NaN\n ({'key': ['b'] * 10 + ['a'] * 2,\n 'value': [3] * 3 + [np.nan] + [3] * 8},\n {('cumprod', False): [3.0, 9.0, 27.0] + [np.nan] * 7 + [3.0, 9.0],\n ('cumprod', True): [3.0, 9.0, 27.0, np.nan, 81., 243., 729.,\n 2187., 6561., 19683., 3.0, 9.0],\n ('cumsum', False): [3.0, 6.0, 9.0] + [np.nan] * 7 + [3.0, 6.0],\n ('cumsum', True): [3.0, 6.0, 9.0, np.nan, 12., 15., 18.,\n 21., 24., 27., 3.0, 6.0]})])\ndef test_groupby_cum_skipna(op, skipna, input, exp):\n df = pd.DataFrame(input)\n result = df.groupby('key')['value'].transform(op, skipna=skipna)\n if isinstance(exp, dict):\n expected = exp[(op, skipna)]\n else:\n expected = exp\n expected = pd.Series(expected, name='value')\n tm.assert_series_equal(expected, result)\n\n\[email protected](\n \"op, args, targop\",\n [('cumprod', (), lambda x: x.cumprod()),\n ('cumsum', (), lambda x: x.cumsum()),\n ('shift', (-1, ), lambda x: x.shift(-1)),\n ('shift', (1, ), lambda x: x.shift())])\ndef test_cython_transform_frame(op, args, targop):\n s = Series(np.random.randn(1000))\n s_missing = s.copy()\n s_missing.iloc[2:10] = np.nan\n labels = np.random.randint(0, 50, size=1000).astype(float)\n strings = list('qwertyuiopasdfghjklz')\n strings_missing = strings[:]\n strings_missing[5] = np.nan\n df = DataFrame({'float': s,\n 'float_missing': s_missing,\n 'int': [1, 1, 1, 1, 2] * 200,\n 'datetime': pd.date_range('1990-1-1', periods=1000),\n 'timedelta': pd.timedelta_range(1, freq='s',\n periods=1000),\n 'string': strings * 50,\n 'string_missing': strings_missing * 50},\n columns=['float', 'float_missing', 'int', 'datetime',\n 'timedelta', 'string', 'string_missing'])\n df['cat'] = df['string'].astype('category')\n\n df2 = df.copy()\n df2.index = pd.MultiIndex.from_product([range(100), range(10)])\n\n # DataFrame - Single and MultiIndex,\n # group by values, index level, columns\n for df in [df, df2]:\n for gb_target in [dict(by=labels), dict(level=0), dict(by='string')\n ]: # dict(by='string_missing')]:\n # dict(by=['int','string'])]:\n\n gb = df.groupby(**gb_target)\n # whitelisted methods set the selection before applying\n # bit a of hack to make sure the cythonized shift\n # is equivalent to pre 0.17.1 behavior\n if op == 'shift':\n gb._set_group_selection()\n\n if op != 'shift' and 'int' not in gb_target:\n # numeric apply fastpath promotes dtype so have\n # to apply separately and concat\n i = gb[['int']].apply(targop)\n f = gb[['float', 'float_missing']].apply(targop)\n expected = pd.concat([f, i], axis=1)\n else:\n expected = gb.apply(targop)\n\n expected = expected.sort_index(axis=1)\n tm.assert_frame_equal(expected,\n gb.transform(op, *args).sort_index(\n axis=1))\n tm.assert_frame_equal(\n expected,\n getattr(gb, op)(*args).sort_index(axis=1))\n # individual columns\n for c in df:\n if c not in ['float', 'int', 'float_missing'\n ] and op != 'shift':\n msg = \"No numeric types to aggregate\"\n with pytest.raises(DataError, match=msg):\n gb[c].transform(op)\n with pytest.raises(DataError, match=msg):\n getattr(gb[c], op)()\n else:\n expected = gb[c].apply(targop)\n expected.name = c\n tm.assert_series_equal(expected,\n gb[c].transform(op, *args))\n tm.assert_series_equal(expected,\n getattr(gb[c], op)(*args))\n\n\ndef test_transform_with_non_scalar_group():\n # GH 10165\n cols = pd.MultiIndex.from_tuples([\n ('syn', 'A'), ('mis', 'A'), ('non', 'A'),\n ('syn', 'C'), ('mis', 'C'), ('non', 'C'),\n ('syn', 'T'), ('mis', 'T'), ('non', 'T'),\n ('syn', 'G'), ('mis', 'G'), ('non', 'G')])\n df = pd.DataFrame(np.random.randint(1, 10, (4, 12)),\n columns=cols,\n index=['A', 'C', 'G', 'T'])\n\n msg = 'transform must return a scalar value for each group.*'\n with pytest.raises(ValueError, match=msg):\n df.groupby(axis=1, level=1).transform(\n lambda z: z.div(z.sum(axis=1), axis=0))\n\n\[email protected]('cols,exp,comp_func', [\n ('a', pd.Series([1, 1, 1], name='a'), tm.assert_series_equal),\n (['a', 'c'], pd.DataFrame({'a': [1, 1, 1], 'c': [1, 1, 1]}),\n tm.assert_frame_equal)\n])\[email protected]('agg_func', [\n 'count', 'rank', 'size'])\ndef test_transform_numeric_ret(cols, exp, comp_func, agg_func):\n if agg_func == 'size' and isinstance(cols, list):\n pytest.xfail(\"'size' transformation not supported with \"\n \"NDFrameGroupy\")\n\n # GH 19200\n df = pd.DataFrame(\n {'a': pd.date_range('2018-01-01', periods=3),\n 'b': range(3),\n 'c': range(7, 10)})\n\n result = df.groupby('b')[cols].transform(agg_func)\n\n if agg_func == 'rank':\n exp = exp.astype('float')\n\n comp_func(result, exp)\n\n\[email protected](\"mix_groupings\", [True, False])\[email protected](\"as_series\", [True, False])\[email protected](\"val1,val2\", [\n ('foo', 'bar'), (1, 2), (1., 2.)])\[email protected](\"fill_method,limit,exp_vals\", [\n (\"ffill\", None,\n [np.nan, np.nan, 'val1', 'val1', 'val1', 'val2', 'val2', 'val2']),\n (\"ffill\", 1,\n [np.nan, np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan]),\n (\"bfill\", None,\n ['val1', 'val1', 'val1', 'val2', 'val2', 'val2', np.nan, np.nan]),\n (\"bfill\", 1,\n [np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan, np.nan])\n])\ndef test_group_fill_methods(mix_groupings, as_series, val1, val2,\n fill_method, limit, exp_vals):\n vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan]\n _exp_vals = list(exp_vals)\n # Overwrite placeholder values\n for index, exp_val in enumerate(_exp_vals):\n if exp_val == 'val1':\n _exp_vals[index] = val1\n elif exp_val == 'val2':\n _exp_vals[index] = val2\n\n # Need to modify values and expectations depending on the\n # Series / DataFrame that we ultimately want to generate\n if mix_groupings: # ['a', 'b', 'a, 'b', ...]\n keys = ['a', 'b'] * len(vals)\n\n def interweave(list_obj):\n temp = list()\n for x in list_obj:\n temp.extend([x, x])\n\n return temp\n\n _exp_vals = interweave(_exp_vals)\n vals = interweave(vals)\n else: # ['a', 'a', 'a', ... 'b', 'b', 'b']\n keys = ['a'] * len(vals) + ['b'] * len(vals)\n _exp_vals = _exp_vals * 2\n vals = vals * 2\n\n df = DataFrame({'key': keys, 'val': vals})\n if as_series:\n result = getattr(\n df.groupby('key')['val'], fill_method)(limit=limit)\n exp = Series(_exp_vals, name='val')\n assert_series_equal(result, exp)\n else:\n result = getattr(df.groupby('key'), fill_method)(limit=limit)\n exp = DataFrame({'key': keys, 'val': _exp_vals})\n assert_frame_equal(result, exp)\n\n\[email protected](\"fill_method\", ['ffill', 'bfill'])\ndef test_pad_stable_sorting(fill_method):\n # GH 21207\n x = [0] * 20\n y = [np.nan] * 10 + [1] * 10\n\n if fill_method == 'bfill':\n y = y[::-1]\n\n df = pd.DataFrame({'x': x, 'y': y})\n expected = df.copy()\n\n result = getattr(df.groupby('x'), fill_method)()\n\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"test_series\", [True, False])\[email protected](\"freq\", [\n None,\n pytest.param('D', marks=pytest.mark.xfail(\n reason='GH#23918 before method uses freq in vectorized approach'))])\[email protected](\"periods,fill_method,limit\", [\n (1, 'ffill', None), (1, 'ffill', 1),\n (1, 'bfill', None), (1, 'bfill', 1),\n (-1, 'ffill', None), (-1, 'ffill', 1),\n (-1, 'bfill', None), (-1, 'bfill', 1),\n])\ndef test_pct_change(test_series, freq, periods, fill_method, limit):\n # GH 21200, 21621\n vals = [3, np.nan, np.nan, np.nan, 1, 2, 4, 10, np.nan, 4]\n keys = ['a', 'b']\n key_v = np.repeat(keys, len(vals))\n df = DataFrame({'key': key_v, 'vals': vals * 2})\n\n df_g = getattr(df.groupby('key'), fill_method)(limit=limit)\n grp = df_g.groupby('key')\n\n expected = grp['vals'].obj / grp['vals'].shift(periods) - 1\n\n if test_series:\n result = df.groupby('key')['vals'].pct_change(\n periods=periods, fill_method=fill_method, limit=limit, freq=freq)\n tm.assert_series_equal(result, expected)\n else:\n result = df.groupby('key').pct_change(\n periods=periods, fill_method=fill_method, limit=limit, freq=freq)\n tm.assert_frame_equal(result, expected.to_frame('vals'))\n\n\[email protected](\"func\", [np.any, np.all])\ndef test_any_all_np_func(func):\n # GH 20653\n df = pd.DataFrame([['foo', True],\n [np.nan, True],\n ['foo', True]], columns=['key', 'val'])\n\n exp = pd.Series([True, np.nan, True], name='val')\n\n res = df.groupby('key')['val'].transform(func)\n tm.assert_series_equal(res, exp)\n\n\ndef test_groupby_transform_rename():\n # https://github.com/pandas-dev/pandas/issues/23461\n def demean_rename(x):\n result = x - x.mean()\n\n if isinstance(x, pd.Series):\n return result\n\n result = result.rename(\n columns={c: '{}_demeaned'.format(c) for c in result.columns})\n\n return result\n\n df = pd.DataFrame({'group': list('ababa'),\n 'value': [1, 1, 1, 2, 2]})\n expected = pd.DataFrame({'value': [-1. / 3, -0.5, -1. / 3, 0.5, 2. / 3]})\n\n result = df.groupby('group').transform(demean_rename)\n tm.assert_frame_equal(result, expected)\n result_single = df.groupby('group').value.transform(demean_rename)\n tm.assert_series_equal(result_single, expected['value'])\n\n\[email protected]('func', [min, max, np.min, np.max, 'first', 'last'])\ndef test_groupby_transform_timezone_column(func):\n # GH 24198\n ts = pd.to_datetime('now', utc=True).tz_convert('Asia/Singapore')\n result = pd.DataFrame({'end_time': [ts], 'id': [1]})\n result['max_end_time'] = result.groupby('id').end_time.transform(func)\n expected = pd.DataFrame([[ts, 1, ts]], columns=['end_time', 'id',\n 'max_end_time'])\n tm.assert_frame_equal(result, expected)\n" ]
[ [ "pandas.compat.set_function_name", "pandas.core.dtypes.common.is_list_like", "pandas.util._decorators.Substitution", "pandas.util._decorators.Appender", "pandas.errors.AbstractMethodError", "pandas.core.algorithms._factorize_array", "numpy.asarray", "pandas.compat.numpy.function.validate_argsort_with_ascending", "pandas.util._validators.validate_fillna_kwargs", "pandas.api.types.is_array_like", "pandas.core.dtypes.missing.isna", "pandas.core.ops._get_op_name", "numpy.argsort", "numpy.array" ], [ "pandas.to_datetime", "pandas.Series", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "numpy.dtype", "pandas.util.testing.assert_frame_equal", "pandas.util.testing.assert_index_equal", "numpy.zeros_like", "numpy.random.randn", "numpy.mean", "numpy.random.randint", "pandas.util.testing.makeTimeDataFrame", "pandas.util.testing.assert_numpy_array_equal", "numpy.arange", "pandas.compat.StringIO", "pandas.util.testing.assert_series_equal", "numpy.nansum", "pandas._libs.groupby.group_cumprod_float64", "pandas.concat", "pandas.core.dtypes.common.is_timedelta64_dtype", "pandas.option_context", "numpy.timedelta64", "pandas.date_range", "numpy.errstate", "numpy.array", "pandas.timedelta_range", "numpy.abs", "pandas._libs.groupby.group_cumsum", "pandas.Grouper", "numpy.tile", "numpy.random.shuffle", "pandas.MultiIndex.from_arrays", "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nliolios24/textrank
[ "845c0a46b9bd36c36773bf106c0e2fee3bdb61a5", "845c0a46b9bd36c36773bf106c0e2fee3bdb61a5" ]
[ "share/doc/networkx-1.9.1/examples/graph/unix_email.py", "share/doc/networkx-1.9.1/examples/drawing/degree_histogram.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nCreate a directed graph, allowing multiple edges and self loops, from\na unix mailbox. The nodes are email addresses with links\nthat point from the sender to the recievers. The edge data\nis a Python email.Message object which contains all of\nthe email message data. \n\nThis example shows the power of XDiGraph to hold edge data\nof arbitrary Python objects (in this case a list of email messages).\n\nBy default, load the sample unix email mailbox called \"unix_email.mbox\".\nYou can load your own mailbox by naming it on the command line, eg\n\npython unixemail.py /var/spool/mail/username\n\n\"\"\"\n__author__ = \"\"\"Aric Hagberg ([email protected])\"\"\"\n# Copyright (C) 2005 by \n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\n\nimport email\nfrom email.utils import getaddresses,parseaddr\nimport mailbox\nimport sys\n\n# unix mailbox recipe\n# see http://www.python.org/doc/current/lib/module-mailbox.html\ndef msgfactory(fp):\n try:\n return email.message_from_file(fp)\n except email.Errors.MessageParseError:\n # Don't return None since that will stop the mailbox iterator\n return ''\n\n\n\nif __name__ == '__main__':\n\n import networkx as nx\n try: \n import matplotlib.pyplot as plt\n except:\n pass\n\n if len(sys.argv)==1:\n filePath = \"unix_email.mbox\"\n else:\n filePath = sys.argv[1]\n\n mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox\n\n G=nx.MultiDiGraph() # create empty graph\n\n # parse each messages and build graph \n for msg in mbox: # msg is python email.Message.Message object\n (source_name,source_addr) = parseaddr(msg['From']) # sender\n # get all recipients\n # see http://www.python.org/doc/current/lib/module-email.Utils.html\n tos = msg.get_all('to', [])\n ccs = msg.get_all('cc', [])\n resent_tos = msg.get_all('resent-to', [])\n resent_ccs = msg.get_all('resent-cc', [])\n all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)\n # now add the edges for this mail message\n for (target_name,target_addr) in all_recipients:\n G.add_edge(source_addr,target_addr,message=msg) \n\n # print edges with message subject\n for (u,v,d) in G.edges_iter(data=True):\n print(\"From: %s To: %s Subject: %s\"%(u,v,d['message'][\"Subject\"]))\n \n\n try: # draw\n pos=nx.spring_layout(G,iterations=10)\n nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)\n plt.savefig(\"unix_email.png\")\n plt.show()\n except: # matplotlib not available\n pass\n", "#!/usr/bin/env python\n\"\"\"\nRandom graph from given degree sequence.\nDraw degree rank plot and graph with matplotlib.\n\"\"\"\n__author__ = \"\"\"Aric Hagberg <[email protected]>\"\"\"\nimport networkx as nx\nimport matplotlib.pyplot as plt\nG = nx.gnp_random_graph(100,0.02)\n\ndegree_sequence=sorted(nx.degree(G).values(),reverse=True) # degree sequence\n#print \"Degree sequence\", degree_sequence\ndmax=max(degree_sequence)\n\nplt.loglog(degree_sequence,'b-',marker='o')\nplt.title(\"Degree rank plot\")\nplt.ylabel(\"degree\")\nplt.xlabel(\"rank\")\n\n# draw graph in inset \nplt.axes([0.45,0.45,0.45,0.45])\nGcc=sorted(nx.connected_component_subgraphs(G), key = len, reverse=True)[0]\npos=nx.spring_layout(Gcc)\nplt.axis('off')\nnx.draw_networkx_nodes(Gcc,pos,node_size=20)\nnx.draw_networkx_edges(Gcc,pos,alpha=0.4)\n\nplt.savefig(\"degree_histogram.png\")\nplt.show()\n\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.savefig" ], [ "matplotlib.pyplot.title", "matplotlib.pyplot.loglog", "matplotlib.pyplot.savefig", "matplotlib.pyplot.axes", "matplotlib.pyplot.axis", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dguari1/Auto-eFace
[ "5d30493deccb73bc6a355ce342ae0f6a4a0e177d" ]
[ "ImageViewerandProcess.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 16 10:53:19 2017\n\n@author: Diego L.Guarin -- diego_guarin at meei.harvard.edu\n\"\"\"\nimport cv2\nimport numpy as np\nfrom scipy.spatial.distance import cdist\n\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import QtGui\nfrom PyQt5 import QtCore\n\n\nfrom utilities import mark_picture #this function draws the landmarks and iris circles \nfrom process_eye import get_iris_manual #this function opens a new window to manually select the iris\n\n\n\"\"\"\nThis class is in charge of drawing the picture and the landmarks in the main \nwindow, it also takes care of lifting and re-location of landmarks. \n\"\"\"\n\nclass ImageViewer(QtWidgets.QGraphicsView): \n \n def __init__(self):\n #usual parameters to make sure the image can be zoom-in and out and is \n #possible to move around the zoomed-in view\n super(ImageViewer, self).__init__()\n self._zoom = 0\n self._scene = QtWidgets.QGraphicsScene(self)\n self._photo = QtWidgets.QGraphicsPixmapItem()\n self._scene.addItem(self._photo)\n self.setScene(self._scene)\n self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)\n self.setResizeAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)\n self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.setBackgroundBrush(QtGui.QBrush(QtGui.QColor(100,100,100)))\n self.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.setDragMode(QtWidgets.QGraphicsView.RubberBandDrag)\n self.setMouseTracking(True)\n \n #this is used to show the dots and update the dots in image\n self._shape = None\n self._lefteye = None\n self._righteye = None\n self._opencvimage = None\n self._boundingbox = None\n self._PointToModify = None\n self._points = None\n \n \n #this variable is used to verify is a landmark will be relocated\n self._IsPointLifted = False\n \n #this variable is used to verify if the user wants to drag the eyes to a different location\n self._IsDragEyes = False\n #and what eye is the person trying to move\n self._IsDragLeft = False\n self._IsDragRight = False\n self._BothEyesTogether = False\n\n #QtWidgets.QGraphicsView.RubberBandDrag\n \n def setPhoto(self, pixmap = None):\n #this function puts an image in the scece (if pixmap is not None), it\n #sets the zoom to zero \n self._zoom = 0 \n if pixmap and not pixmap.isNull():\n self.setDragMode(QtWidgets.QGraphicsView.RubberBandDrag)\n self._photo.setPixmap(pixmap)\n self.fitInView()\n else:\n self.setDragMode(QtWidgets.QGraphicsView.NoDrag)\n self._photo.setPixmap(QtGui.QPixmap())\n\n def fitInView(self):\n #this function takes care of accomodating the view so that it can fit\n #in the scene, it resets the zoom to 0 (i think is a overkill, i took\n #it from somewhere else)\n rect = QtCore.QRectF(self._photo.pixmap().rect())\n #self.setSceneRect(rect)\n if not rect.isNull():\n unity = self.transform().mapRect(QtCore.QRectF(0, 0, 1, 1))\n self.scale(1 / unity.width(), 1 / unity.height()) \n viewrect = self.viewport().rect()\n scenerect = self.transform().mapRect(rect)\n factor = min(viewrect.width() / scenerect.width(),\n viewrect.height() / scenerect.height()) \n self.scale(factor, factor)\n self.centerOn(rect.center())\n self._zoom = 0 \n \n def zoomFactor(self):\n return self._zoom\n \n def wheelEvent(self, event):\n #this take care of the zoom, it modifies the zoom factor if the mouse \n #wheel is moved forward or backward by 20%\n if not self._photo.pixmap().isNull():\n move=(event.angleDelta().y()/120)\n if move > 0:\n factor = 1.2\n self._zoom += 1\n else:\n factor = 0.8\n self._zoom -= 1\n \n if self._zoom > 0:\n self.scale(factor, factor)\n elif self._zoom <= 0:\n self._zoom = 0\n self.fitInView()\n \n \n def mousePressEvent(self, event):\n #this function takes care of lifting (if RightClick) and relocating (if\n #a point is lifted and LeftClick) landmarks. It also verifies if the \n #user wants to manually modify the position of the iris. In that case,\n #it opens up a new window showing only the eye (left or right) where \n #the user can select four points around the iris\n if not self._photo.pixmap().isNull():\n scenePos = self.mapToScene(event.pos())\n if event.button() == QtCore.Qt.RightButton:\n #if the user RightClick and no point is lifted then verify if \n #the position of the click is close to one of the landmarks\n if self._IsPointLifted == False:\n if self._shape is not None and not self._BothEyesTogether:\n \n x_mousePos = scenePos.toPoint().x()\n y_mousePos = scenePos.toPoint().y()\n mousePos=np.array([(x_mousePos, y_mousePos)]) \n distance=cdist(np.append(self._shape,\n [[self._righteye[0],self._righteye[1]],\n [self._lefteye[0],self._lefteye[1]]], axis=0)\n , mousePos)\n distance=distance[:,0]\n #check if a landmark (including the eyes) is no more than \n #3 pixels away from the click location. If there is then lift that\n #landmark from the face. If the image is taller than 1000 pixels \n #then the distance is 5 pixels\n if self._scene.height() < 1000: \n PointToModify = [i for i, j in enumerate(distance) if j <=3 ]\n else:\n PointToModify = [i for i, j in enumerate(distance) if j <=6 ]\n if PointToModify:\n self._PointToModify = PointToModify[0]\n if self._PointToModify >= 68:\n if self._PointToModify == 69:\n #if click is in left eye then open up the \n #eye window showing the left eye only \n position = 'left'\n temp = get_iris_manual(self._opencvimage, self._shape, position)\n if temp is not None:\n self._lefteye = temp\n \n elif self._PointToModify == 68:\n #if click is in right eye then open up the \n #eye window showing the right eye only \n position = 'right'\n temp = get_iris_manual(self._opencvimage, self._shape, position)\n if temp is not None:\n self._righteye = temp\n else: \n self._shape[self._PointToModify] = [-1,-1]\n self._IsPointLifted = True\n self.set_update_photo()\n \n elif self._BothEyesTogether: #the user was moving both eyes and now needs to finilize the moving action\n self.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))\n #remove the option to drag the eyes (if click was released then draggin is over)\n self._IsDragEyes = False\n self._IsDragLeft = False\n self._IsDragRight = False\n self._BothEyesTogether = False\n self.set_update_photo() \n \n elif event.button() == QtCore.Qt.LeftButton:\n \n #if the user LeftClick and there is a landmark lifted, then \n #reposition the landmar in the position of the click \n if self._IsPointLifted:\n x_mousePos = scenePos.toPoint().x()\n y_mousePos = scenePos.toPoint().y()\n mousePos=np.array([(x_mousePos, y_mousePos)])\n self._shape[self._PointToModify] = [x_mousePos, y_mousePos]\n self._IsPointLifted = False\n self._PointToModify = None\n #self.set_update_photo()\n elif self._BothEyesTogether: #the user was moving both eyes and now needs to finilize the moving action\n self.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))\n #remove the option to drag the eyes (if click was released then draggin is over)\n self._IsDragEyes = False\n self._IsDragLeft = False\n self._IsDragRight = False\n self._BothEyesTogether = False\n self.set_update_photo() \n else:\n #The user is probably goin to pan around, allow this \n self.setDragMode(QtWidgets.QGraphicsView.ScrollHandDrag)\n \n #Now:\n #verify if the user is actually trying to modify the eye position. This is done by clicking in the center of the eye\n if self._shape is not None:\n \n x_mousePos = scenePos.toPoint().x()\n y_mousePos = scenePos.toPoint().y()\n mousePos=np.array([(x_mousePos, y_mousePos)]) \n distance=cdist([[self._righteye[0],self._righteye[1]],\n [self._lefteye[0],self._lefteye[1]]]\n , mousePos)\n distance=distance[:,0]\n #check if a landmark (including the eyes) is no more than \n #3 pixels away from the click location. If there is then lift that\n #landmark from the face. If the image is taller than 1000 pixels \n #then the distance is 5 pixels\n if self._scene.height() < 1000:\n PointToModify = [i for i, j in enumerate(distance) if j <=3 ]\n else:\n PointToModify = [i for i, j in enumerate(distance) if j <=6 ]\n \n if PointToModify:\n self._PointToModify = PointToModify[0]\n if self._PointToModify == 0:\n #user wants to move the right eye. The eye will move alone\n self._IsDragEyes = True\n self._IsDragLeft = False \n self._IsDragRight = True \n self._BothEyesTogether = False\n elif self._PointToModify == 1:\n #user wants to move the left eye. The eye will move alone \n self._IsDragEyes = True\n self._IsDragRight = False \n self._IsDragLeft = True \n self._BothEyesTogether = False\n \n self.set_update_photo() \n #remove the Drag option\n self.setDragMode(QtWidgets.QGraphicsView.NoDrag) \n #make the cursor a cross to facilitate localization of eye center \n self.setCursor(QtGui.QCursor(QtCore.Qt.CrossCursor))\n self.draw_circle(self._righteye)\n self.draw_circle(self._lefteye) \n \n \n QtWidgets.QGraphicsView.mousePressEvent(self, event)\n \n def mouseReleaseEvent(self, event): \n #this function defines what happens when you release the mouse click \n \n if not self._BothEyesTogether: #the user moved a single eye. This will only happen if the click is not realease\n #remove the Drag option\n self.setDragMode(QtWidgets.QGraphicsView.NoDrag)\n #return the cursor to an arrow (in case that it was changes to a cross)\n self.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))\n #remove the option to drag the eyes (if click was released then draggin is over)\n self._IsDragEyes = False\n self._IsDragLeft = False\n self._IsDragRight = False\n self._BothEyesTogether = False\n self.set_update_photo()\n #update the viewer to present the latest postion of landmarks and iris\n elif self._BothEyesTogether: #the user is moving both eyes together. The eyes will be moved until a new click is pressed\n #remove the Drag option\n self.setDragMode(QtWidgets.QGraphicsView.NoDrag)\n\n QtWidgets.QGraphicsView.mouseReleaseEvent(self, event)\n\n \n def mouseDoubleClickEvent(self, event):\n \n #if the user double click on one of the iris the both iris will be able to move together\n if event.button() == QtCore.Qt.LeftButton:\n event.accept()\n if self._shape is not None:\n scenePos = self.mapToScene(event.pos())\n x_mousePos = scenePos.toPoint().x()\n y_mousePos = scenePos.toPoint().y()\n mousePos=np.array([(x_mousePos, y_mousePos)]) \n distance=cdist([[self._righteye[0],self._righteye[1]],\n [self._lefteye[0],self._lefteye[1]]]\n , mousePos)\n distance=distance[:,0]\n #check if a landmark (including the eyes) is no more than \n #3 pixels away from the click location. If there is then lift that\n #landmark from the face. If the image is taller than 1000 pixels \n #then the distance is 5 pixels\n if self._scene.height() < 1000:\n PointToModify = [i for i, j in enumerate(distance) if j <=3 ]\n else:\n PointToModify = [i for i, j in enumerate(distance) if j <=6 ]\n \n if PointToModify:\n self._PointToModify = PointToModify[0]\n if self._PointToModify == 0:\n #user wants to move the right eye, Both eyes have to move together \n self._IsDragEyes = True\n self._IsDragRight = True \n self._IsDragLeft = False \n self._BothEyesTogether = True\n \n elif self._PointToModify == 1:\n #user wants to move the left eye, Both eyes have to move together \n self._IsDragEyes = True\n self._IsDragRight = False \n self._IsDragLeft = True \n self._BothEyesTogether = True\n \n\n #remove the iris from the image \n self.set_update_photo() \n #remove the Drag option\n self.setDragMode(QtWidgets.QGraphicsView.NoDrag) \n #make the cursor a cross to facilitate localization of eye center \n self.setCursor(QtGui.QCursor(QtCore.Qt.CrossCursor))\n\n \n else:\n event.ignore()\n \n self.draw_circle(self._righteye)\n self.draw_circle(self._lefteye) \n QtWidgets.QGraphicsView.mouseDoubleClickEvent(self, event)\n \n\n \n def mouseMoveEvent(self, event):\n #this function takes care of the pan (move around the photo) and draggin of the eyes\n \n #if _IsDragEyes == true then the user wants to change the position of the eyes\n if self._IsDragEyes is True and self._BothEyesTogether is False:\n event.accept()\n for item in self._scene.items():\n if isinstance(item, QtWidgets.QGraphicsEllipseItem):\n self._scene.removeItem(item)\n\n scenePos = self.mapToScene(event.pos())\n x_mousePos = scenePos.toPoint().x()\n y_mousePos = scenePos.toPoint().y()\n \n if self._IsDragLeft: #the user wants to move the left eye \n #update the position of the left eye with the current mouse position \n #rect = QtCore.QRectF(self._photo.pixmap().rect())\n #scenerect = self.transform().mapRect(rect)\n #print(rect.width(),rect.height())\n# if x_mousePos + self._lefteye[2] > rect.width():\n# print('out')\n# elif x_mousePos - self._lefteye[2] < 0:\n# print('out')\n# elif y_mousePos - self._lefteye[2] < 0:\n# print('out')\n# elif y_mousePos + self._lefteye[2] > rect.height():\n# print('out') \n self._lefteye = [x_mousePos, y_mousePos, self._lefteye[2]]\n #draw a circle \n self.draw_circle(self._lefteye)\n \n \n elif self._IsDragRight:\n #update the position of the right eye with the current mouse position \n self._righteye = [x_mousePos, y_mousePos, self._righteye[2]]\n #draw a circle \n self.draw_circle(self._righteye)\n \n elif self._IsDragEyes is True and self._BothEyesTogether is True:\n event.accept()\n for item in self._scene.items():\n if isinstance(item, QtWidgets.QGraphicsEllipseItem):\n self._scene.removeItem(item)\n\n scenePos = self.mapToScene(event.pos())\n x_mousePos = scenePos.toPoint().x()\n y_mousePos = scenePos.toPoint().y()\n \n if self._IsDragLeft: #the user wants to move the left eye \n #update the position of the left eye with the current mouse position \n delta_x = x_mousePos-self._lefteye[0]\n delta_y = y_mousePos-self._lefteye[1]\n self._lefteye = [x_mousePos, y_mousePos, self._lefteye[2]]\n self._righteye = [self._righteye[0]+delta_x, self._righteye[1]+delta_y, self._righteye[2]]\n\n #draw a circle \n self.draw_circle(self._lefteye)\n self.draw_circle(self._righteye)\n \n if self._IsDragRight: #the user wants to move the right eye \n #update the position of the right eye with the current mouse position \n delta_x = x_mousePos-self._righteye[0]\n delta_y = y_mousePos-self._righteye[1]\n self._righteye = [x_mousePos, y_mousePos, self._righteye[2]]\n self._lefteye = [self._lefteye[0]+delta_x, self._lefteye[1]+delta_y, self._lefteye[2]]\n\n #draw a circle \n self.draw_circle(self._righteye)\n self.draw_circle(self._lefteye)\n \n \n else:\n event.ignore()\n \n #self.update()\n \n\n QtWidgets.QGraphicsView.mouseMoveEvent(self, event)\n \n\n\n def draw_circle(self, CircleInformation ):\n #this function draws an circle with specific center and radius \n \n Ellipse = QtWidgets.QGraphicsEllipseItem(0,0,CircleInformation[2]*2,CircleInformation[2]*2)\n #ellipse will be green\n pen = QtGui.QPen(QtCore.Qt.green)\n #set the ellipse line width according to the image size\n if self._scene.height() < 1000:\n pen.setWidth(1)\n else:\n pen.setWidth(3)\n \n Ellipse.setPen(pen) \n #if I want to fill the ellipse i should do this:\n #brush = QtGui.QBrush(QtCore.Qt.green) \n #Ellipse.setPen(brush)\n \n #this is the position of the top-left corner of the ellipse.......\n Ellipse.setPos(CircleInformation[0]-CircleInformation[2],CircleInformation[1]-CircleInformation[2])\n Ellipse.setTransform(QtGui.QTransform()) \n self._scene.addItem(Ellipse)\n \n\n def set_update_photo(self, toggle=True):\n #this function takes care of updating the view without re-setting the \n #zoom. Is usefull for when you lift or relocate landmarks or when \n #drawing lines in the middle of the face\n if self._opencvimage is not None:\n self._scene.removeItem(self._photo)\n \n temp_image = self._opencvimage.copy() \n \n if toggle: #verify if the user wants to remove the landmarks..\n #if shape then draw 68 landmark points\n if self._shape is not None:\n #mark_picture takes care of drawing the landmarks and the circles\n #in the iris using opencv \n if self._IsDragEyes:\n if self._IsDragRight and not self._BothEyesTogether: #don't draw the right eye \n temp_image = mark_picture(temp_image, self._shape, self._lefteye, [0,0,-1], self._points)\n elif self._IsDragLeft and not self._BothEyesTogether: #don't draw the left eye \n temp_image = mark_picture(temp_image, self._shape, [0,0,-1], self._righteye, self._points) \n elif self._BothEyesTogether: #don't draw both eyes \n temp_image = mark_picture(temp_image, self._shape, [0,0,-1], [0,0,-1], self._points) \n else: #draw both eyes\n temp_image = mark_picture(temp_image, self._shape, self._lefteye, self._righteye, self._points)\n \n image = cv2.cvtColor(temp_image,cv2.COLOR_BGR2RGB)\n height, width, channel = image.shape\n bytesPerLine = 3 * width\n img_Qt = QtGui.QImage(image.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888)\n img_show = QtGui.QPixmap.fromImage(img_Qt)\n \n self._photo.setPixmap(img_show) \n self._scene.addItem(self._photo)\n self.setDragMode(QtWidgets.QGraphicsView.RubberBandDrag)\n\n \n def show_entire_image(self):\n #this is a little utility to reset the zoom with a single click\n self.fitInView()\n \n \n def resizeEvent(self, event):\n #this function assure that when the main window is resized the image \n #is also resized preserving the h/w ratio\n self.fitInView()\n \n \n \n def update_view(self):\n #this function takes care of updating the view by re-setting the zoom.\n #is usefull to place the image in the scene for the first time\n \n \n #if shape then add shape to image\n if self._opencvimage is not None:\n temp_image = self._opencvimage.copy()\n \n \n #draw 68 landmark points \n if self._shape is not None:\n temp_image = mark_picture(temp_image, self._shape, self._lefteye, self._righteye, self._points)\n \n image = cv2.cvtColor(temp_image,cv2.COLOR_BGR2RGB)\n height, width, channel = image.shape\n bytesPerLine = 3 * width\n img_Qt = QtGui.QImage(image.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888)\n img_show = QtGui.QPixmap.fromImage(img_Qt)\n \n #show the photo\n self.setPhoto(img_show)\n \n \n \n \n \n \n\n \n \n" ]
[ [ "numpy.append", "numpy.array", "scipy.spatial.distance.cdist" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
tomkooij/covid19
[ "a7d8a5781ed84b4a59652fc4575c15679de7898a" ]
[ "nl_regions.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Holiday zones, municipality classes in the Netherlands; module for importing.\n\nFunction:\n\n- build_municipality_csv()\n- get_municipality_data()\n- select_cases_region()\n\nCreated on Sat Nov 7 16:08:51 2020 @hk_nien\n\nNote: changes as of 2021-01-07:\n\n 'Haaren', # disappears\n 'Boxtel', 'Vught', 'Tilburg': expanded\n 'Eemsdelta', merger of 'Appingedam', 'Delfzijl', 'Loppersum',\n 'Hengelo' renamed to 'Hengelo (O.)' (we undo this)\n\"\"\"\nfrom pathlib import Path\nimport re\nimport json\nimport pandas as pd\n\nDATA_PATH = Path(__file__).parent / 'data'\nDF_MUN = None\n\ndef build_municipality_csv(df_cases):\n \"\"\"Write data/municipalities.csv.\n\n The csv will have columns:\n\n - Municipality_name\n - Population\n - Province\n - HolRegion\n\n This function only needs to be called only rarely (output will be committed).\n\n Parameters:\n\n - df_cases: dataframe with columns 'Municipality_name' and 'Province'.\n \"\"\"\n\n df_mun = _load_municipality_data_cbs(df_cases)\n df_mun.rename(columns={'Inwoners': 'Population'}, inplace=True)\n\n\n ### Get provinces from cases dataframe.\n # dataframe: index=Municipality_name, column 'Province'\n mun_provs = df_cases.groupby('Municipality_name').first()[['Province']]\n df_mun['Province'] = mun_provs['Province']\n\n new_row = dict(\n Municipality='Eemsdelta',\n Population=df_mun.loc[['Appingedam', 'Delfzijl', 'Loppersum'], 'Population'].sum(),\n Province='Groningen',\n )\n\n df_mun = df_mun.append(\n pd.DataFrame.from_records([new_row]).set_index('Municipality')\n )\n _add_holiday_regions(df_mun)\n\n fpath = DATA_PATH / 'municipalities.csv'\n df_mun.to_csv(fpath, float_format='%.7g', header=True)\n print(f'Wrote {fpath}')\n\n\ndef get_municipality_data():\n \"\"\"Return dataframe with municipality data:\n\n Index: Municipality (name)\n Columns: Population, Province, HolRegion,\n\n This just loads the csv file created by build_municipality_csv(),\n or use a previously cached version.\n \"\"\"\n\n global DF_MUN\n if DF_MUN is None:\n df = pd.read_csv(DATA_PATH / 'municipalities.csv')\n df.set_index('Municipality', inplace=True)\n DF_MUN = df\n\n return DF_MUN.copy()\n\ndef _load_municipality_data_cbs(df_cases):\n \"\"\"Return municipality dataframe from cases dataframe.\n\n Cases dataframe must have 'Municipality_name' column.\n This takes data from the CBS table 'Regionale_kerncijfers*.csv'.\n\n Return dataframe with:\n\n - index: municipality\n - 'Inwoners' column\n - 'Province' column\n \"\"\"\n\n ## Load municipality populations\n path = DATA_PATH / 'Regionale_kerncijfers_Nederland_15082020_130832.csv'\n\n df_mun = pd.read_csv(path, sep=';')\n df_mun.rename(columns={\n #'Perioden',\n #\"Regio's\",\n 'Bevolking/Bevolkingssamenstelling op 1 januari/Totale bevolking (aantal)': 'total',\n 'Bevolking/Bevolkingssamenstelling op 1 januari/Burgerlijke staat/Bevolking 15 jaar of ouder/Inwoners 15 jaar of ouder (aantal)': 'n15plus',\n 'Bevolking/Bevolkingssamenstelling op 1 januari/Burgerlijke staat/Bevolking 15 jaar of ouder/Gehuwd (in % van inwoners 15 jaar of ouder)': 'n15gehuwd',\n 'Bevolking/Bevolkingssamenstelling op 1 januari/Bevolkingsdichtheid (aantal inwoners per km²)': 'dichtheid',\n 'Bouwen en wonen/Woningvoorraad/Voorraad op 1 januari (aantal)': 'woningen',\n 'Milieu en bodemgebruik/Bodemgebruik/Oppervlakte/Totale oppervlakte (km²)': 'opp'\n }, inplace=True)\n\n df_mun = pd.DataFrame({'Municipality': df_mun['Regio\\'s'], 'Inwoners': df_mun['total']})\n df_mun.set_index('Municipality', inplace=True)\n df_mun = df_mun.loc[~df_mun.Inwoners.isna()]\n import re\n df_mun.rename(index=lambda x: re.sub(r' \\(gemeente\\)$', '', x), inplace=True)\n\n rename_muns = {\n 'Beek (L.)': 'Beek',\n 'Hengelo (O.)': 'Hengelo',\n 'Laren (NH.)': 'Laren',\n 'Middelburg (Z.)': 'Middelburg',\n 'Rijswijk (ZH.)': 'Rijswijk',\n 'Stein (L.)': 'Stein',\n }\n\n df_mun.rename(index=rename_muns, inplace=True)\n # df_mun.drop(index=['Valkenburg (ZH.)'], inplace=True)\n\n return df_mun\n\ndef _add_holiday_regions(df_mun):\n \"\"\"Add a column 'HolRegion' with holiday region names (Noord, Midden, Zuid).\n\n Parameter:\n\n - df_mun: Dataframe with index 'Municipality_name' and at least column 'Province'.\n\n Update df_mun in-place with new 'HolRegion' column.\n \"\"\"\n\n # Definitions holiday regions\n # https://www.rijksoverheid.nl/onderwerpen/schoolvakanties/regios-schoolvakantie\n\n rules = [\n # Region name, (P:|M:) province/municipality name\n ['Noord',\n 'P:Drenthe', 'P:Flevoland', 'P:Friesland', 'P:Groningen',\n 'P:Overijssel', 'P:Noord-Holland'],\n ['Midden', 'M:Zeewolde', 'P:Utrecht', 'P:Zuid-Holland'],\n ['Zuid', 'P:Limburg', 'P:Noord-Brabant', 'P:Zeeland'],\n ['Noord', 'M:Hattem', 'M:Eemnes'],\n ['Zuid', 'P:Gelderland'],\n ['Midden', 'M:Aalten', 'M:Apeldoorn', 'M:Barneveld', 'M:Berkelland',\n 'M:Bronckhorst', 'M:Brummen', 'M:Buren', 'M:Culemborg', 'M:Doetinchem',\n 'M:Ede', 'M:Elburg', 'M:Epe', 'M:Ermelo', 'M:Harderwijk', 'M:Heerde',\n 'M:Lochem', 'M: Montferland', 'M:Neder-Betuwe', 'M:Nijkerk', 'M:Nunspeet',\n 'M:Oldebroek', 'M:Oost-Gelre', 'M:Oude IJsselstreek', 'M:Putten',\n 'M:Scherpenzeel', 'M:Tiel', 'M:Voorst', 'M:Wageningen', 'M:West Betuwe',\n 'M:Winterswijk en Zutphen', 'M:Werkendam', 'M:Woudrichem'],\n ]\n\n df_mun['HolRegion'] = None\n\n for rule in rules:\n hrname = rule[0]\n for pmname in rule[1:]:\n if pmname.startswith('P:'):\n df_mun.loc[df_mun['Province'] == pmname[2:], 'HolRegion'] = hrname\n elif pmname.startswith('M:'):\n df_mun.loc[df_mun.index == pmname[2:] , 'HolRegion'] = hrname\n else:\n raise ValueError(f'pmname {pmname!r}: bad pattern.')\n\n\n\ndef select_cases_region(dfc, region):\n \"\"\"Select daily cases by region.\n\n Parameters:\n\n - dfc: cases dataframe, with columns\n 'Date_of_report', 'Municipality', and various numerical columns.\n - region: one of:\n - the name of a municipality\n - 'Nederland': all\n - 'HR:Zuid', 'HR:Noord', 'HR:Midden', 'HR:Midden+Zuid', 'HR:Midden+Noord':\n holiday regions.\n - 'POP:xx-yy': municipalities with population xx <= pop/1000 < yy'\n - 'P:xx': province\n - 'JSON:{...}' json dict containing key 'muns' with a list\n of municipalities, to be aggregrated.\n\n Return:\n\n - Dataframe with Date_of_report as index and\n numerical columns summed as appropriate.\n - npop: population.\n\n Note: population is sampled at final date. This may result in funny\n results if the municipality selection changes due to municipality\n reorganization.\n \"\"\"\n\n df_mun = get_municipality_data()\n\n # First, mselect is Dataframe of selected municipalities.\n if region == 'Nederland':\n mselect = df_mun\n elif region == 'HR:Midden+Zuid':\n mselect = df_mun.loc[df_mun['HolRegion'].str.match('Midden|Zuid')]\n elif region == 'HR:Midden+Noord':\n mselect = df_mun.loc[df_mun['HolRegion'].str.match('Midden|Noord')]\n elif region.startswith('HR:'):\n mselect = df_mun.loc[df_mun['HolRegion'] == region[3:]]\n elif region.startswith('P:'):\n mselect = df_mun.loc[df_mun['Province'] == region[2:]]\n elif region.startswith('POP:'):\n ma = re.match(r'POP:(\\d+)-(\\d+)$', region)\n if not ma:\n raise ValueError(f'region={region!r} does not match \\'MS:NUM-NUM\\'.')\n pop_lo, pop_hi = float(ma.group(1)), float(ma.group(2))\n mask = (df_mun['Population'] >= pop_lo*1e3) & (df_mun['Population'] < pop_hi*1e3)\n mselect = df_mun.loc[mask]\n elif region.startswith('JSON:'):\n muns = json.loads(region[5:])['muns']\n mselect = df_mun.loc[muns]\n else:\n mselect = df_mun.loc[[region]]\n\n # Select the corresponding rows in dfc.\n dfc_sel = dfc.join(mselect[[]], on='Municipality_name', how='inner')\n\n if len(dfc_sel) == 0:\n raise ValueError(f'No data for region={region!r}.')\n\n # Population based on final date; avoid double-counting\n # due to municipality reorganization as of 2021-01-07.\n\n date_end = dfc_sel['Date_of_report'].max()\n muns_end = dfc_sel.loc[dfc['Date_of_report'] == date_end, 'Municipality_name']\n if date_end > pd.to_datetime('2021-01-07'):\n # Distribute 'Haren' over the new municipalities\n df_mun = df_mun.copy()\n for mun in ['Boxtel', 'Vught', 'Tilburg']:\n df_mun.loc[mun, 'Population'] += df_mun.loc['Haaren', 'Population'] // 3\n df_mun.drop(index='Haaren', inplace=True)\n\n npop = df_mun.loc[muns_end, 'Population'].sum()\n\n # combine\n dfc_sel = dfc_sel.groupby('Date_of_report').sum()\n\n return dfc_sel, npop\n\n\n\nif __name__ == '__main__':\n # recreate municipalities.csv\n df = pd.read_csv('data/COVID-19_aantallen_gemeente_cumulatief.csv', sep=';')\n build_municipality_csv(df)\n" ]
[ [ "pandas.DataFrame.from_records", "pandas.read_csv", "pandas.to_datetime", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
harrysame/ReinforcementLearning
[ "6b2f85d2200c391b7fde2529ef26178c1be52959" ]
[ "codes/hw3/TD.py" ]
[ "import numpy as np\n\ndef TD0(get_episode,policy, initial_v, gamma, alpha,num_episodes = 1):\n# This function implements TD(0).\n# get_episode: function to generate an episode\n# policy: the policy to be evaluated \n# initial_v: initial estimate for value function\n# gamma: discount factor\n# alpha: learning rate\n# num_episodes: number of episodes (iterations)\n# The function returns the estimate of v\n\n # initialization \n num_states = policy.shape[0]\n v = np.copy(initial_v)\n N_s = np.zeros(num_states) # counter for states\n \n for ep in range(num_episodes):\n states,_,rewards = get_episode(policy) # generate an episode\n\n G_total = 0\n for j,state in enumerate(states):\n N_s[state] += 1.0\n\n first_occr = next(i for i,x in enumerate(states) if states[i] == state )\n # Calculate G_total\n G = sum([ x * (gamma ** i) for i,x in enumerate(rewards[first_occr:])])\n G_total += G\n\n if alpha ==0:\n v[state] +=( G_total - v[state])/N_s[state]\n\n else:\n v[state] += alpha *(G_total - v[state])\n return v\n\n\ndef TD_n(get_episode, policy, initial_v, n, gamma, alpha,num_episodes = 1):\n# This function implements n-step TD.\n# get_episode: function to generate an episode\n# policy: the policy to be evaluated \n# initial_v: initial estimate for value function v\n# n: number of steps to look ahead\n# gamma: discount factor\n# alpha: learning rate\n# num_episodes: number of episodes (iterations)\n# The function returns the estimate of v\n\n # initialization\n num_states = policy.shape[0]\n v = np.copy(initial_v)\n N_s = np.zeros(num_states) # counter for states\n \n for ep in range(num_episodes):\n states,_,rewards = get_episode(policy) # generate an episode\n G_total = 0\n for j,state in enumerate(states):\n N_s[state] += 1.0\n\n first_occr = next(i for i,x in enumerate(states) if states[i] == state )\n # Calculate G_total: Gt to Gt+n\n\n endTime = min(len(states), j+n)\n G = sum([ x * (gamma ** i) for i,x in enumerate(rewards[first_occr:endTime])])\n G_total += G\n # Calculate TD error \n if alpha ==0:\n v[state] +=( G_total - v[state])/N_s[state]\n\n else:\n v[state] += alpha *(G_total - v[state])\n\n return v\n\n\ndef TD_lambda(get_episode, policy, initial_v, lambda_, gamma, alpha,\n num_episodes=1):\n# This function implements n-step TD.\n# get_episode: function to generate an episode\n# policy: the policy to be evaluated \n# initial_v: initial estimate for value function v\n# lambda_: value of lambda in TD(lambda)\n# gamma: discount factor\n# alpha: learning rate\n# num_episodes: number of episodes (iterations)\n# The function returns the estimate of v\n \n # initialization \n v = np.copy(initial_v) \n num_states = policy[0]\n\n for ep in range(num_episodes):\n states,_,rewards = get_episode(policy)\n eTrace = np.copy(initial_v)\n for i in range(len(rewards)):\n currentState = states[i]\n eTrace *= gamma * lambda_ \n eTrace[currentState] += 1\n delta = rewards[i] + gamma * v[states[i+1]] - v[states[i]]\n v += alpha * delta * eTrace\n return v\n\n\n " ]
[ [ "numpy.copy", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fbravosanchez/NIPS4Bplus
[ "6166042f0ded907ba70e943496f3aad4a8b62c62" ]
[ "cut_nips4bplus_files.py" ]
[ "# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nCreated by Francisco Bravo Sanchez July 2021\r\nThis scripts reads the NIPS4B wav files and splits them according to the\r\ncsv annotations from NIPS4Bplus (Morfi V, Bas Y, Pamula H, Glotin H,\r\nStowell D. 2019. NIPS4Bplus: a richly annotated birdsong audio dataset.\r\nPeerJ Comput. Sci. 5:e223 http://doi.org/10.7717/peerj-cs.223)\r\n\r\nNIPS4B wav files:\r\nhttp://sabiod.univ-tln.fr/nips4b/media/birds/NIPS4B_BIRD_CHALLENGE_TRAIN_TEST_WAV.tar.gz\r\n\r\nNIPS4Bplus annotations:\r\nhttps://doi.org/10.6084/m9.figshare.6798548\r\n\r\nInstructions\r\nhttps://github.com/fbravosanchez/NIPS4Bplus#readme\r\n\"\"\"\r\n\r\nimport sys\r\nimport os\r\nimport glob\r\nimport pandas as pd\r\nimport soundfile as sf\r\nimport numpy as np\r\n\r\n\r\n#Set directories\r\n#path to NIPS4B_BIRD wav files\r\nwav_path = sys.argv[1]\r\n#path to NIPS4Bplus csv annotation files\r\ncsv_path = sys.argv[2]\r\n#output path for generated cut files\r\noutput_path = sys.argv[3]\r\n\r\nif not os.path.exists(output_path):\r\n os.makedirs(output_path)\r\n\r\n\r\n#read csv label file list\r\nlbl_files = pd.DataFrame(glob.glob(os.path.join(csv_path , '')+ '*.csv'))\r\nlbl_files.columns = ['csv']\r\nlbl_files['wav'] = 'nips4b_birds_trainfile' + lbl_files['csv'].str[-7:-4]\r\n\r\n\r\n#process by csv file\r\nfor i, j in lbl_files.iterrows():\r\n\r\n #skip empty files\r\n try:\r\n k = pd.read_csv(j['csv'], header=None)\r\n tags = True\r\n except pd.errors.EmptyDataError:\r\n tags = False\r\n\r\n #for each valid csv file process wavefile\r\n if tags:\r\n [signal, fs] = sf.read(os.path.join(wav_path , '') + j['wav'] + '.wav')\r\n signal = signal.astype(np.float64)\r\n\r\n # Signal normalization\r\n signal = signal/np.abs(np.max(signal))\r\n\r\n #cut signal according to tag\r\n for l, m in k.iterrows():\r\n beg_sig = int(m[0]*fs)\r\n end_sig = int((m[0]+m[1])*fs)\r\n signal_cut = signal[beg_sig:end_sig]\r\n\r\n # Save cut signal as a new wavefile\r\n file_out = os.path.join(output_path, '') + str(j['wav']) +'_'+ str(l) + '.wav'\r\n sf.write(file_out, signal_cut, fs)\r\n" ]
[ [ "numpy.max", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
uhh-lt/dl-seminar
[ "b146db2f63462a7d795c43b484dc9e8ca38fb4d6" ]
[ "dump/04_dnn_tagger_02.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom nltk.corpus import brown\nimport sklearn\nimport tensorflow as tf\nimport numpy as np\nimport numpy.random as random\n\nimport itertools\n\nrandom.seed(42)\n\n## Install data by running the following code:\n#import nltk\n#nltk.download('brown')\n#nltk.download('universal_tagset')\n\ndef prepare_data(left_context_len, right_context_len, training_size):\n \n ## Take a subset\n brown_words = list(itertools.islice(brown.words(), training_size))\n brown_tags = [pair[1] for pair in brown.tagged_words(tagset='universal')]\n \n word_encoder = sklearn.preprocessing.LabelEncoder()\n pos_encoder = sklearn.preprocessing.LabelEncoder()\n x_data = word_encoder.fit_transform(brown_words)\n y_data = pos_encoder.fit_transform(brown_tags)\n \n input_dim = len(word_encoder.classes_)\n output_dim = len(pos_encoder.classes_)\n \n train_data = [(x_data[i-left_context_len:i+right_context_len+1], y_data[i]) for i in range(left_context_len, len(x_data)-right_context_len)]\n x_train = np.array([pair[0] for pair in train_data])\n y_train = np.array([pair[1] for pair in train_data])\n \n return input_dim, output_dim, x_train, y_train, pos_encoder\n\n# seq_len (int), input_dim (int), output_dim (int), embedding_dim (int), learning_rate (float)\ndef build_graph(seq_len, input_dim, output_dim, embedding_dim, learning_rate):\n ## input\n x = tf.placeholder(tf.int32, (None, seq_len))\n y = tf.placeholder(tf.int32, (None))\n \n embeddings = tf.Variable(\n tf.random_uniform([input_dim, embedding_dim], -1.0, 1.0))\n \n ## embedd input\n x_embedd = tf.reshape(tf.nn.embedding_lookup(embeddings, x), [-1, embedding_dim*seq_len])\n \n ## multilayer model\n hidden_1 = tf.Variable(tf.random_uniform([embedding_dim*seq_len, embedding_dim], -0.01, 0.01, dtype=tf.float32))\n b1 = tf.Variable(tf.random_uniform([embedding_dim], -0.01, 0.01, dtype=tf.float32))\n \n layer_1 = tf.add(tf.matmul(x_embedd, hidden_1), b1)\n layer_1 = tf.nn.relu(layer_1)\n \n hidden_2 = tf.Variable(tf.random_uniform([embedding_dim, embedding_dim], -0.01, 0.01, dtype=tf.float32))\n b2 = tf.Variable(tf.random_uniform([embedding_dim], -0.01, 0.01, dtype=tf.float32))\n \n layer_2 = tf.add(tf.matmul(layer_1, hidden_2), b2)\n layer_2 = tf.nn.relu(layer_2)\n \n out = tf.Variable(tf.random_uniform([embedding_dim, output_dim], -0.01, 0.01, dtype=tf.float32))\n b_out = tf.Variable(tf.random_uniform([output_dim], -0.01, 0.01, dtype=tf.float32))\n pred = tf.matmul(layer_2, out) + b_out\n \n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=pred))\n \n pred_argmax = tf.argmax(tf.nn.softmax(pred), axis=1)\n\n ## define the optimizer\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n \n # function must return x (placeholder), y (placeholder), optimizer (optimizer op),\n # loss (loss op), pred_argmax (argmax of logits as tensor)\n return x, y, optimizer, loss, pred_argmax\n\ndef main():\n\n # model size parameters\n left_context_len = 2\n right_context_len = 2\n \n # set this higher to get a better model\n training_size = 500\n embedding_dim = 100\n\n ## Hyperparemeters: experiment with these, too\n learning_rate = 0.01\n epochs = 10\n\n seq_len = left_context_len + 1 + right_context_len \n input_dim, output_dim, x_train, y_train, pos_encoder = prepare_data(left_context_len, right_context_len, training_size)\n x, y, optimizer, loss, pred_argmax = build_graph(seq_len, input_dim, output_dim, embedding_dim, learning_rate)\n\n ## start the session\n with tf.Session() as sess:\n \n ## initalize parameters\n sess.run(tf.global_variables_initializer())\n train_dict = {x: x_train, y: y_train}\n \n print(\"Initial training loss: \" + str(sess.run(loss, train_dict)))\n \n for i in range(epochs):\n ## run the optimizer\n epoch_data = list(zip(x_train, y_train))\n np.random.shuffle(epoch_data)\n for x_sample,y_sample in epoch_data:\n train_dict_local = {x: [x_sample], y: [y_sample]}\n sess.run(optimizer, train_dict_local) \n print(\"Training loss after epoch \" + str(i+1) + \":\" + str(sess.run(loss, train_dict)))\n \n print(pos_encoder.inverse_transform(sess.run(pred_argmax, train_dict)))\n\nif __name__ == \"__main__\":\n main()\n\n" ]
[ [ "tensorflow.nn.relu", "tensorflow.matmul", "tensorflow.nn.softmax", "numpy.random.seed", "tensorflow.placeholder", "numpy.random.shuffle", "sklearn.preprocessing.LabelEncoder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.Session", "numpy.array", "tensorflow.random_uniform", "tensorflow.nn.embedding_lookup" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
cyuuu4u/CNN-text-classification-tf-master
[ "b2cce63a71cff57319705ec96ac164b79a89cc21" ]
[ "data_helpers.py" ]
[ "import numpy as np\nimport re\nimport itertools\nfrom collections import Counter\nfrom collections import defaultdict\nimport os\nimport pandas as pd\nimport pickle\n\n\ndef clean_str(string): #清洗数据\n \"\"\"\n Tokenization/string cleaning for all datasets except for SST.\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\n \"\"\"\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()\n\n\ndef load_data_and_labels(positive_data_file, negative_data_file):\n \"\"\"\n Loads MR polarity data from files, splits the data into words and generates labels.\n Returns split sentences and labels.\n \"\"\"\n # Load data from files\n positive_examples = list(open(positive_data_file, \"r\").readlines())\n positive_examples = [s.strip() for s in positive_examples] # 去除前后的空格,中间空格保留\n negative_examples = list(open(negative_data_file, \"r\").readlines())\n negative_examples = [s.strip() for s in negative_examples]\n # Split by words\n x_text = positive_examples + negative_examples # 两个list合并\n x_text = [clean_str(sent) for sent in x_text] #一句话一个sent,进行数据清洗\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n\n return [x_text, y]\n\n\ndef batch_iter(data, batch_size, num_epochs, shuffle=True):\n \"\"\"\n Generates a batch iterator for a dataset.\n \"\"\"\n data = np.array(data) # 变成二维矩阵形式,每一对xy是一行\n data_size = len(data)\n num_batches_per_epoch = int((len(data)-1)/batch_size) + 1\n for epoch in range(num_epochs):\n # Shuffle the data at each epoch\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size)) # 打乱数据集\n shuffled_data = data[shuffle_indices]\n else:\n shuffled_data = data\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield shuffled_data[start_index:end_index] # 产生一个batch要用的数据;然后全部epoch依次产生\n\n\n############## loading word embedding ##############\n\ndef load_embedding_vectors_word2vec(vocabulary, filename, binary):\n # load embedding_vectors from the word2vec\n encoding = 'utf-8'\n with open(filename, \"rb\") as f:\n header = f.readline()\n vocab_size, vector_size = map(int, header.split())\n # initial matrix with random uniform\n embedding_vectors = np.random.uniform(-0.25, 0.25, (len(vocabulary), vector_size))\n if binary:\n binary_len = np.dtype('float32').itemsize * vector_size\n for line_no in range(vocab_size):\n word = []\n while True:\n ch = f.read(1)\n if ch == b' ':\n break\n if ch == b'':\n raise EOFError(\"unexpected end of input; is count incorrect or file otherwise damaged?\")\n if ch != b'\\n':\n word.append(ch)\n word = str(b''.join(word), encoding=encoding, errors='strict')\n idx = vocabulary.get(word)\n if idx != 0:\n embedding_vectors[idx] = np.fromstring(f.read(binary_len), dtype='float32')\n else:\n f.seek(binary_len, 1)\n else:\n for line_no in range(vocab_size):\n line = f.readline()\n if line == b'':\n raise EOFError(\"unexpected end of input; is count incorrect or file otherwise damaged?\")\n parts = str(line.rstrip(), encoding=encoding, errors='strict').split(\" \")\n if len(parts) != vector_size + 1:\n raise ValueError(\"invalid vector on line %s (is this really the text format?)\" % (line_no))\n word, vector = parts[0], list(map('float32', parts[1:]))\n idx = vocabulary.get(word)\n if idx != 0:\n embedding_vectors[idx] = vector\n f.close()\n return embedding_vectors\n\n\ndef load_embedding_vectors_glove(vocabulary, filename, vector_size):\n # load embedding_vectors from the glove\n # initial matrix with random uniform\n embedding_vectors = np.random.uniform(-0.25, 0.25, (len(vocabulary), vector_size))\n f = open(filename)\n for line in f:\n values = line.split()\n word = values[0]\n vector = np.asarray(values[1:], dtype=\"float32\")\n idx = vocabulary.get(word)\n if idx != 0:\n embedding_vectors[idx] = vector\n f.close()\n return embedding_vectors\n" ]
[ [ "numpy.asarray", "numpy.arange", "numpy.dtype", "numpy.concatenate", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JohannesGaessler/presentation_mc
[ "0d43f3b70d71f063f872a5fb8b09cde3b756ee36", "0d43f3b70d71f063f872a5fb8b09cde3b756ee36", "0d43f3b70d71f063f872a5fb8b09cde3b756ee36" ]
[ "02_pi_crude.py", "05_pi_vegas_ensemble.py", "03_pi_variance_reduction.py" ]
[ "import numpy as np\nfrom scipy.integrate import quad\nimport matplotlib.pyplot as plt\nplt.figure(figsize=(32.0, 6.0))\n\n\ndef f(x):\n return 1 - np.sqrt(1 - x ** 2)\n\n\nSAMPLE_SIZE = 1000\nEf = quad(lambda x: f(x), 0, 1)[0]\nVarf = quad(lambda x: (f(x) - Ef) ** 2, 0, 1)[0]\n\nrand_x = np.random.rand(SAMPLE_SIZE)\nrand_y = f(rand_x)\nplot_x = np.linspace(start=0, stop=1.0, num=101, endpoint=True)\nfor i in range(5):\n plt.subplot(1, 5, i+1)\n plt.xlim(0, 1)\n plt.ylim(0, 1)\n plt.xlabel(\"$x$\")\n plt.ylabel(\"$y$\")\n plt.plot(plot_x, f(plot_x))\n plt.bar(x=0, height=rand_y[i], width=1.0, align=\"edge\", color=(1.0, 0.0, 0.0, 0.5))\nplt.savefig(\"pi_crude.png\")\n\npi_empirical = 4 * (1.0 - np.sum(rand_y)/SAMPLE_SIZE)\nprint(f\"Estimate: {pi_empirical:.6f}\")\nprint(f\"Empirical uncertainty: {4 * np.sqrt(np.var(rand_y) / SAMPLE_SIZE) / pi_empirical * 100:.4f}%\")\nprint(f\"Expected uncertainty: {4 * np.sqrt(Varf / SAMPLE_SIZE) / np.pi * 100:.4f}%\")\n", "import numpy as np\nvegas_module = __import__(\"04_pi_vegas\")\nvegas = vegas_module.vegas\n\nresults = []\n\nfor _ in range(1000):\n results.append(vegas(iterations=3, samples_per_iteration=333, num_bins=20, K=1000, alpha=1.0))\nresults = np.array(results)\n\nmean_result = np.mean(results)\nprint(f\"Estimate: {mean_result}\")\nprint(f\"Empirical relative uncertainty: {100 * np.std(results)/mean_result}%\")\n\n", "import numpy as np\nfrom scipy.integrate import quad\nimport matplotlib.pyplot as plt\nplt.figure(figsize=(6.0, 6.0))\n\n\ndef f(x):\n return 1 - np.sqrt(1 - x ** 2)\n\n\ndef g(x):\n return 3 * x ** 2\n\n\nSAMPLE_SIZE = 1000\nEfg = quad(lambda x: f(x)/g(x), 0, 1)[0]\nVarfg = quad(lambda x: (f(x)/g(x) - Efg) ** 2, 0, 1)[0]\n\nrand_x = np.random.rand(SAMPLE_SIZE) ** (1/3)\nrand_y = f(rand_x)\nrand_y_weighted = rand_y / g(rand_x)\nplot_x = np.linspace(start=0.001, stop=1.0, num=1000, endpoint=True)\n\nplt.xlim(0, 1)\nplt.ylim(0, 1)\nplt.vlines(x=rand_x[:100], ymin=0, ymax=rand_y_weighted[:100], color=\"black\", label=\"samples\")\nplt.plot(plot_x, f(plot_x)/g(plot_x), label=\"$f(x)/g(x)$\")\nplt.plot(plot_x, f(plot_x), label=\"$f(x)$\")\nplt.plot(plot_x, g(plot_x)*(1.0-np.pi/4.0), label=r\"$g(x) \\cdot (1 - \\pi/4)$\")\nplt.legend(loc=\"upper left\")\nplt.savefig(\"pi_variance_reduction.png\")\n\npi_empirical = 4 * (1.0 - np.sum(rand_y_weighted)/SAMPLE_SIZE)\nprint(f\"Estimate: {pi_empirical:.6f}\")\nprint(f\"Empirical uncertainty: {4 * np.sqrt(np.var(rand_y_weighted) / SAMPLE_SIZE) / pi_empirical * 100:.4f}%\")\nprint(f\"Expected uncertainty: {4 * np.sqrt(Varfg / SAMPLE_SIZE) / np.pi * 100:.4f}%\")\n\nplt.show()\n" ]
[ [ "numpy.sqrt", "numpy.linspace", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlim", "matplotlib.pyplot.subplot", "matplotlib.pyplot.ylabel", "numpy.random.rand", "matplotlib.pyplot.bar", "numpy.var", "matplotlib.pyplot.xlabel", "numpy.sum", "matplotlib.pyplot.figure" ], [ "numpy.std", "numpy.array", "numpy.mean" ], [ "matplotlib.pyplot.legend", "numpy.sqrt", "numpy.linspace", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlim", "numpy.random.rand", "numpy.var", "matplotlib.pyplot.vlines", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pkicsiny/xline
[ "a0dcc6655082bc08e882509bc4e65204de836b8a", "a0dcc6655082bc08e882509bc4e65204de836b8a", "a0dcc6655082bc08e882509bc4e65204de836b8a" ]
[ "xline/be_beamfields/boost.py", "examples/lhc/benchmark.py", "examples/rf_multipole/rf_multipole.py" ]
[ "import numpy as np\n\n# I program as close as possible to C...\n\n\ndef boost(x, px, y, py, sigma, delta, parboost):\n\n sphi = parboost.sphi\n cphi = parboost.cphi\n tphi = parboost.tphi\n salpha = parboost.salpha\n calpha = parboost.calpha\n\n h = (\n delta\n + 1.0\n - np.sqrt((1.0 + delta) * (1.0 + delta) - px * px - py * py)\n )\n\n px_st = px / cphi - h * calpha * tphi / cphi\n py_st = py / cphi - h * salpha * tphi / cphi\n delta_st = (\n delta - px * calpha * tphi - py * salpha * tphi + h * tphi * tphi\n )\n\n pz_st = np.sqrt(\n (1.0 + delta_st) * (1.0 + delta_st) - px_st * px_st - py_st * py_st\n )\n hx_st = px_st / pz_st\n hy_st = py_st / pz_st\n hsigma_st = 1.0 - (delta_st + 1) / pz_st\n\n L11 = 1.0 + hx_st * calpha * sphi\n L12 = hx_st * salpha * sphi\n L13 = calpha * tphi\n\n L21 = hy_st * calpha * sphi\n L22 = 1.0 + hy_st * salpha * sphi\n L23 = salpha * tphi\n\n L31 = hsigma_st * calpha * sphi\n L32 = hsigma_st * salpha * sphi\n L33 = 1.0 / cphi\n\n x_st = L11 * x + L12 * y + L13 * sigma\n y_st = L21 * x + L22 * y + L23 * sigma\n sigma_st = L31 * x + L32 * y + L33 * sigma\n\n return x_st, px_st, y_st, py_st, sigma_st, delta_st\n\n\ndef inv_boost(x_st, px_st, y_st, py_st, sigma_st, delta_st, parboost):\n\n sphi = parboost.sphi\n cphi = parboost.cphi\n tphi = parboost.tphi\n salpha = parboost.salpha\n calpha = parboost.calpha\n\n pz_st = np.sqrt(\n (1.0 + delta_st) * (1.0 + delta_st) - px_st * px_st - py_st * py_st\n )\n hx_st = px_st / pz_st\n hy_st = py_st / pz_st\n hsigma_st = 1.0 - (delta_st + 1) / pz_st\n\n Det_L = (\n 1.0 / cphi\n + (hx_st * calpha + hy_st * salpha - hsigma_st * sphi) * tphi\n )\n\n Linv_11 = (\n 1.0 / cphi + salpha * tphi * (hy_st - hsigma_st * salpha * sphi)\n ) / Det_L\n Linv_12 = (salpha * tphi * (hsigma_st * calpha * sphi - hx_st)) / Det_L\n Linv_13 = (\n -tphi\n * (\n calpha\n - hx_st * salpha * salpha * sphi\n + hy_st * calpha * salpha * sphi\n )\n / Det_L\n )\n\n Linv_21 = (calpha * tphi * (-hy_st + hsigma_st * salpha * sphi)) / Det_L\n Linv_22 = (\n 1.0 / cphi + calpha * tphi * (hx_st - hsigma_st * calpha * sphi)\n ) / Det_L\n Linv_23 = (\n -tphi\n * (\n salpha\n - hy_st * calpha * calpha * sphi\n + hx_st * calpha * salpha * sphi\n )\n / Det_L\n )\n\n Linv_31 = -hsigma_st * calpha * sphi / Det_L\n Linv_32 = -hsigma_st * salpha * sphi / Det_L\n Linv_33 = (1.0 + hx_st * calpha * sphi + hy_st * salpha * sphi) / Det_L\n\n x_i = Linv_11 * x_st + Linv_12 * y_st + Linv_13 * sigma_st\n y_i = Linv_21 * x_st + Linv_22 * y_st + Linv_23 * sigma_st\n sigma_i = Linv_31 * x_st + Linv_32 * y_st + Linv_33 * sigma_st\n\n h = (delta_st + 1.0 - pz_st) * cphi * cphi\n\n px_i = px_st * cphi + h * calpha * tphi\n py_i = py_st * cphi + h * salpha * tphi\n\n delta_i = (\n delta_st\n + px_i * calpha * tphi\n + py_i * salpha * tphi\n - h * tphi * tphi\n )\n\n return x_i, px_i, y_i, py_i, sigma_i, delta_i\n", "import numpy as np\n\nimport sixtracktools\nimport xline\n\nsix = sixtracktools.SixInput(\".\")\nline = xline.Line.from_sixinput(six)\niconv = line.other_info[\"iconv\"]\n\nsixdump = sixtracktools.SixDump101(\"res/dump3.dat\")[1::2]\n\n\ndef compare(prun, pbench):\n out = []\n for att in \"x px y py zeta delta\".split():\n vrun = getattr(prun, att)\n vbench = getattr(pbench, att)\n diff = vrun - vbench\n out.append(abs(diff))\n print(f\"{att:<5} {vrun:22.13e} {vbench:22.13e} {diff:22.13g}\")\n print(f\"max {max(out):21.12e}\")\n return max(out), out\n\n\nprint(\"\")\ndiffs = []\ns_coord = []\nfor ii in range(1, len(iconv)):\n jja = iconv[ii - 1]\n jjb = iconv[ii]\n prun = xline.Particles(**sixdump[ii - 1].get_minimal_beam())\n print(f\"\\n-----sixtrack={ii} xline={jja} --------------\")\n # print(f\"pysixtr {jja}, x={prun.x}, px={prun.px}\")\n for jj in range(jja + 1, jjb + 1):\n label, elem = line.element_names[jj], line.elements[jj]\n elem.track(prun)\n print(f\"{jj} {label},{str(elem)[:50]}\")\n pbench = xline.Particles(**sixdump[ii].get_minimal_beam())\n s_coord.append(pbench.s)\n # print(f\"sixdump {ii}, x={pbench.x}, px={pbench.px}\")\n print(\"-----------------------\")\n out, out_all = compare(prun, pbench)\n print(\"-----------------------\\n\\n\")\n diffs.append(out_all)\n if out > 1e-13:\n print(\"Too large discrepancy\")\n break\n\ndiffs = np.array(diffs)\n\nimport matplotlib.pyplot as plt\nplt.close('all')\nfig = plt.figure(1, figsize=(6.4*1.5, 4.8*1.3))\nfor ii, (vv, uu) in enumerate(\n zip(['x', 'px', 'y', 'py', r'$\\zeta$', r'$\\delta$'],\n ['[m]', '[-]', '[m]', '[-]', '[m]', '[-]'])):\n ax = fig.add_subplot(3, 2, ii+1)\n ax.plot(s_coord, diffs[:, ii])\n ax.set_ylabel('Difference on '+ vv + ' ' + uu)\n ax.set_xlabel('s [m]')\nfig.subplots_adjust(hspace=.48)\n\n\nplt.show()\n", "import numpy as np\n\nfrom cpymad.madx import Madx\nimport xline\n\n# run MADX tests\nmad = Madx()\nmad.call(\"rf_multipole.madx\")\n\n# create xline rfmultipole\nmad_sequence = mad.sequence[\"sequ_rfmultipole\"]\nrf_mulitpole_mad = mad_sequence.elements[1]\nfreq = rf_mulitpole_mad.freq * 1e6 # MAD units are MHz\nknl = rf_mulitpole_mad.knl\npn = np.array(rf_mulitpole_mad.pnl) * 360 # MAD units are 2pi\nlag = rf_mulitpole_mad.lag * 360 # MAD units are 2pi\n\nrf_multipole = xline.elements.RFMultipole(\n voltage=0, frequency=freq, lag=lag, knl=knl, ksl=[0], pn=pn, ps=[0]\n)\n\n\n# track xline\nmad_part = xline.Particles.from_madx_track(mad)\np1 = mad_part.copy(0)\np2 = mad_part.copy(1)\np3 = p1.copy()\nrf_multipole.track(p3)\n\n# compare\np2.compare(p3)\n\n# test conversion\nline = xline.Line.from_madx_sequence(mad_sequence)\ntw = mad.twiss(betx=1, bety=1, x=0.1, t=0.5)\np_mad = xline.Particles.from_madx_twiss(tw)\np_six = mad_part.copy(0)\np_out = xline.Particles.from_list(\n line.track_elem_by_elem(p_six, start=False)\n)\n" ]
[ [ "numpy.sqrt" ], [ "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.close", "matplotlib.pyplot.figure" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ZhiangChen/tornado_damage
[ "25af94dc170a118cc4e10b6b302a55283e48ce75" ]
[ "tiles.py" ]
[ "\"\"\"\ntiles.py\nZhiang Chen, Jan 7 2020\nTo process tornado damage tiles\n\"\"\"\n\nimport os\nimport numpy as np\nimport pickle\nimport sys\nsys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')\nimport cv2\nimport matplotlib.pyplot as plt\n\nclass Tiles(object):\n def __init__(self, size=(1000, 1000)):\n self.size = size\n\n def generateTiles(self, path, cls, threshold=0.75):\n self.path = path # path.split('_')[0]\n pickle_files = [f for f in os.listdir(path) if f.endswith('pickle')]\n assert len(pickle_files)\n for pickle_file in pickle_files:\n image_file = os.path.join(path, pickle_file.split('.')[0] + \".png\")\n f = os.path.join(path, pickle_file)\n with open(f, 'rb') as filehandle:\n data = pickle.load(filehandle)\n tile = self.__createTile(data, cls, threshold)\n cv2.imwrite(image_file, tile)\n\n def __createTile(self, data, cls, threshold):\n boxes = data['boxes']\n labels = data['labels']\n scores = data['scores']\n masks = data['masks']\n image_name = data['image_name']\n image_path = os.path.join('../', image_name)\n assert os.path.isfile(image_path)\n img = cv2.imread(image_path)\n img = img > 10\n img = np.all(img, axis=2)\n h,w = img.shape\n if np.sum(img) < h*w/4.0:\n return np.zeros(self.size).astype(np.uint8)\n\n if len(boxes) == 0:\n return np.zeros(self.size).astype(np.uint8)\n else:\n idx = scores > threshold\n boxes = boxes[idx]\n if len(boxes) == 0:\n return np.zeros(self.size).astype(np.uint8)\n else:\n labels = labels[idx]\n scores = scores[idx]\n masks = masks[idx]\n\n idx = labels == cls\n boxes = boxes[idx]\n labels = labels[idx]\n scores = scores[idx]\n masks = masks[idx]\n if len(boxes) == 0:\n return np.zeros(self.size).astype(np.uint8)\n else:\n if len(boxes) == 0:\n return np.zeros(self.size).astype(np.uint8)\n print(image_path)\n tile = masks.squeeze(axis=1)\n tile = tile.max(axis=0)\n for box in boxes:\n y1, x1, y2, x2 = box\n pt1 = (y1, x1)\n pt2 = (y2, x2)\n tile = cv2.rectangle(tile, pt1, pt2, color=1, thickness=2)\n tile = (tile * 255).astype(np.uint8)\n return tile\n\n def readTiles(self, path, type=\"grayscale\"):\n tile_files = os.listdir(path)\n self.tiles = {}\n for tile_file in tile_files:\n x, y = tile_file.split('.')[0].split('_')\n # -----> x\n # |\n # |\n # y v\n file_path = os.path.join(path, tile_file)\n if type == \"grayscale\":\n tile = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)\n else:\n tile = cv2.imread(file_path, cv2.IMREAD_UNCHANGED)\n self.tiles[(x, y)] = tile\n\n\n\n def concatenate(self, path, name, step, scale, type=\"grayscale\"):\n \"\"\"\n :param path:\n :param step: the actual size of tile\n :param scale: the actual tile = scale * tile\n :return:\n \"\"\"\n tile_files = [i for i in os.listdir(path) if i.endswith('.png')]\n X = []\n Y = []\n for tile_file in tile_files:\n #print(tile_file)\n x, y = tile_file.split('.')[0].split('_')[-2: ]\n X.append(int(x))\n Y.append(int(y))\n width = max(X)/scale + step/scale\n height = max(Y)/scale + step/scale\n\n if type == \"grayscale\":\n map = np.zeros((int(height), int(width)))\n else:\n map = np.zeros((int(height), int(width), 3))\n\n for tile_file in tile_files:\n x, y = tile_file.split('.')[0].split('_')[-2: ]\n x, y = int(int(x)/scale), int(int(y)/scale)\n file_path = os.path.join(path, tile_file)\n if type == \"grayscale\":\n tile = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)\n else:\n tile = cv2.imread(file_path, cv2.IMREAD_COLOR)\n if tile.shape[0] != step/scale:\n tile = cv2.resize(tile, (int(step/scale), int(step/scale) ))\n map[y:y+int(step/scale), x:x+int(step/scale)] = tile\n\n cv2.imwrite(name, map)\n\n\n def applyMask(self, map_path, mask_path, color=(1, 1, 0)):\n if not map_path.endswith(\".tif\"):\n map = cv2.imread(map_path, cv2.IMREAD_UNCHANGED)\n mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)\n x, y = map.shape[:2]\n x_, y_ = mask.shape[:2]\n if x < x_:\n mask = cv2.resize(mask, (y, x))\n else:\n map = cv2.resize(map, (y_, x_))\n\n alpha = 0.5\n mask = mask > 1\n for c in range(3):\n map[:, :, c] = np.where(mask == 1,\n map[:, :, c] *\n (1 - alpha) + alpha * color[c] * 255,\n map[:, :, c])\n\n return map\n\n\n\nif __name__ == '__main__':\n t = Tiles()\n t.generateTiles('../103_pred/', 1, threshold=0.8)\n #t.generateTiles('../104_pred/', 2, threshold=0.7)\n t.concatenate('../103_pred/non_damaged/', name=\"non_damaged_103.png\", step=2000, scale=2)\n #t.concatenate('../104_pred/damaged/', name=\"damaged_104.png\", step=2000, scale=2)\n #t.concatenate('../101_pred/damaged_60/', name=\"damaged_60_101.png\", step=2000, scale=2)\n #t.concatenate('../104/', name=\"104.png\", step=2000, scale=4, type=\"rgb\")\n #mask_map = t.applyMask('101.png', 'damaged_101.png', color=(1, 0, 1))\n #cv2.imwrite(\"masked_damaged_101.png\", mask_map)\n mask_map = t.applyMask('masked_damaged_103.png', 'non_damaged_103.png', color=(0, 1, 1))\n cv2.imwrite(\"masked_103.png\", mask_map)\n" ]
[ [ "numpy.all", "numpy.where", "numpy.sum", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kmdalton/reciprocalspaceship
[ "cf936cca64c5c387ace505416a047318efa9375f" ]
[ "reciprocalspaceship/utils/math.py" ]
[ "import numpy as np\n\ndef angle_between(vec1, vec2, deg=True):\n \"\"\"\n This function computes the angle between vectors along the last dimension of the input arrays.\n This version is a numerically stable one based on arctan2 as described in this post:\n - https://scicomp.stackexchange.com/a/27769/39858\n\n Parameters\n ----------\n vec1 : array\n An arbitrarily batched arry of vectors\n vec2 : array\n An arbitrarily batched arry of vectors\n deg : bool (optional)\n Whether angles are returned in degrees or radians. The default is degrees (deg=True).\n\n Returns\n -------\n angles : array\n A vector of angles with the same leading dimensions of vec1 and vec2.\n \"\"\"\n v1 = vec1 / np.linalg.norm(vec1, axis=-1)[...,None]\n v2 = vec2 / np.linalg.norm(vec2, axis=-1)[...,None]\n x1 = np.linalg.norm(v1 - v2, axis=-1)\n x2 = np.linalg.norm(v1 + v2, axis=-1)\n alpha = 2.*np.arctan2(x1, x2)\n if deg:\n return np.rad2deg(alpha)\n return alpha\n\n\n" ]
[ [ "numpy.arctan2", "numpy.linalg.norm", "numpy.rad2deg" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ginggs/statsmodels
[ "a74a179d2a3267ed992871f8d9ef6c6d86c9b934", "a74a179d2a3267ed992871f8d9ef6c6d86c9b934", "a74a179d2a3267ed992871f8d9ef6c6d86c9b934", "a74a179d2a3267ed992871f8d9ef6c6d86c9b934" ]
[ "statsmodels/tsa/statespace/exponential_smoothing.py", "statsmodels/tsa/base/tests/test_tsa_indexes.py", "statsmodels/tsa/ar_model.py", "statsmodels/sandbox/distributions/otherdist.py" ]
[ "\"\"\"\nLinear exponential smoothing models\n\nAuthor: Chad Fulton\nLicense: BSD-3\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom statsmodels.base.data import PandasData\n\nfrom statsmodels.genmod.generalized_linear_model import GLM\nfrom statsmodels.tools.validation import (array_like, bool_like, float_like,\n string_like, int_like)\n\nfrom statsmodels.tsa.exponential_smoothing import initialization as es_init\nfrom statsmodels.tsa.statespace import initialization as ss_init\nfrom statsmodels.tsa.statespace.kalman_filter import (\n MEMORY_CONSERVE, MEMORY_NO_FORECAST)\n\nfrom statsmodels.compat.pandas import Appender\nimport statsmodels.base.wrapper as wrap\n\nfrom statsmodels.iolib.summary import forg\nfrom statsmodels.iolib.table import SimpleTable\nfrom statsmodels.iolib.tableformatting import fmt_params\n\nfrom .mlemodel import MLEModel, MLEResults, MLEResultsWrapper\n\n\nclass ExponentialSmoothing(MLEModel):\n \"\"\"\n Linear exponential smoothing models\n\n Parameters\n ----------\n endog : array_like\n The observed time-series process :math:`y`\n trend : bool, optional\n Whether or not to include a trend component. Default is False.\n damped_trend : bool, optional\n Whether or not an included trend component is damped. Default is False.\n seasonal : int, optional\n The number of periods in a complete seasonal cycle for seasonal\n (Holt-Winters) models. For example, 4 for quarterly data with an\n annual cycle or 7 for daily data with a weekly cycle. Default is\n no seasonal effects.\n initialization_method: {'estimated', 'concentrated',\n 'heuristic', 'known'}, optional\n Method for initialize the recursions. If 'known' initialization is\n used, then `initial_level` must be passed, as well as `initial_slope`\n and `initial_seasonal` if applicable. Default is 'estimated'.\n initial_level : float, optional\n The initial level component. Only used if initialization is 'known'.\n initial_trend : float, optional\n The initial trend component. Only used if initialization is 'known'.\n initial_seasonal : array_like, optional\n The initial seasonal component. An array of length `seasonal`\n or length `seasonal - 1` (in which case the last initial value\n is computed to make the average effect zero). Only used if\n initialization is 'known'.\n bounds : iterable of tuple, optional\n An iterable containing bounds for the parameters. Must contain four\n elements, where each element is a tuple of the form (lower, upper).\n Default is (0.0001, 0.9999) for the level, trend, and seasonal\n smoothing parameters and (0.8, 0.98) for the trend damping parameter.\n concentrate_scale : bool, optional\n Whether or not to concentrate the scale (variance of the error term)\n out of the likelihood.\n\n Notes\n -----\n The parameters and states of this model are estimated by setting up the\n exponential smoothing equations as a special case of a linear Gaussian\n state space model and applying the Kalman filter. As such, it has slightly\n worse performance than the dedicated exponential smoothing model,\n `sm.tsa.ExponentialSmoothing`, and it does not support multiplicative\n (nonlinear) exponential smoothing models.\n\n However, as a subclass of the state space models, this model class shares\n a consistent set of functionality with those models, which can make it\n easier to work with. In addition, it supports computing confidence\n intervals for forecasts and it supports concentrating the initial\n state out of the likelihood function.\n\n References\n ----------\n [1] Hyndman, Rob, Anne B. Koehler, J. Keith Ord, and Ralph D. Snyder.\n Forecasting with exponential smoothing: the state space approach.\n Springer Science & Business Media, 2008.\n \"\"\"\n def __init__(self, endog, trend=False, damped_trend=False, seasonal=None,\n initialization_method='estimated', initial_level=None,\n initial_trend=None, initial_seasonal=None, bounds=None,\n concentrate_scale=True, dates=None, freq=None,\n missing='none'):\n # Model definition\n self.trend = bool_like(trend, 'trend')\n self.damped_trend = bool_like(damped_trend, 'damped_trend')\n self.seasonal_periods = int_like(seasonal, 'seasonal', optional=True)\n self.seasonal = self.seasonal_periods is not None\n self.initialization_method = string_like(\n initialization_method, 'initialization_method').lower()\n self.concentrate_scale = bool_like(concentrate_scale,\n 'concentrate_scale')\n\n # TODO: add validation for bounds (e.g. have all bounds, upper > lower)\n # TODO: add `bounds_method` argument to choose between \"usual\" and\n # \"admissible\" as in Hyndman et al. (2008)\n self.bounds = bounds\n if self.bounds is None:\n self.bounds = [(1e-4, 1-1e-4)] * 3 + [(0.8, 0.98)]\n\n # Validation\n if self.seasonal_periods == 1:\n raise ValueError('Cannot have a seasonal period of 1.')\n\n if self.seasonal and self.seasonal_periods is None:\n raise NotImplementedError('Unable to detect season automatically;'\n ' please specify `seasonal_periods`.')\n\n if self.initialization_method not in ['concentrated', 'estimated',\n 'simple', 'heuristic', 'known']:\n raise ValueError('Invalid initialization method \"%s\".'\n % initialization_method)\n\n if self.initialization_method == 'known':\n if initial_level is None:\n raise ValueError('`initial_level` argument must be provided'\n ' when initialization method is set to'\n ' \"known\".')\n if initial_trend is None and self.trend:\n raise ValueError('`initial_trend` argument must be provided'\n ' for models with a trend component when'\n ' initialization method is set to \"known\".')\n if initial_seasonal is None and self.seasonal:\n raise ValueError('`initial_seasonal` argument must be provided'\n ' for models with a seasonal component when'\n ' initialization method is set to \"known\".')\n\n # Initialize the state space model\n if not self.seasonal or self.seasonal_periods is None:\n self._seasonal_periods = 0\n else:\n self._seasonal_periods = self.seasonal_periods\n\n k_states = 2 + int(self.trend) + self._seasonal_periods\n k_posdef = 1\n\n init = ss_init.Initialization(k_states, 'known',\n constant=[0] * k_states)\n super(ExponentialSmoothing, self).__init__(\n endog, k_states=k_states, k_posdef=k_posdef,\n initialization=init, dates=dates, freq=freq, missing=missing)\n\n # Concentrate the scale out of the likelihood function\n if self.concentrate_scale:\n self.ssm.filter_concentrated = True\n\n # Setup fixed elements of the system matrices\n # Observation error\n self.ssm['design', 0, 0] = 1.\n self.ssm['selection', 0, 0] = 1.\n self.ssm['state_cov', 0, 0] = 1.\n\n # Level\n self.ssm['design', 0, 1] = 1.\n self.ssm['transition', 1, 1] = 1.\n\n # Trend\n if self.trend:\n self.ssm['transition', 1:3, 2] = 1.\n\n # Seasonal\n if self.seasonal:\n k = 2 + int(self.trend)\n self.ssm['design', 0, k] = 1.\n self.ssm['transition', k, -1] = 1.\n self.ssm['transition', k + 1:k_states, k:k_states - 1] = (\n np.eye(self.seasonal_periods - 1))\n\n # Initialization of the states\n if self.initialization_method != 'known':\n msg = ('Cannot give `%%s` argument when initialization is \"%s\"'\n % initialization_method)\n if initial_level is not None:\n raise ValueError(msg % 'initial_level')\n if initial_trend is not None:\n raise ValueError(msg % 'initial_trend')\n if initial_seasonal is not None:\n raise ValueError(msg % 'initial_seasonal')\n\n if self.initialization_method == 'simple':\n initial_level, initial_trend, initial_seasonal = (\n es_init._initialization_simple(\n self.endog[:, 0], trend='add' if self.trend else None,\n seasonal='add' if self.seasonal else None,\n seasonal_periods=self.seasonal_periods))\n elif self.initialization_method == 'heuristic':\n initial_level, initial_trend, initial_seasonal = (\n es_init._initialization_heuristic(\n self.endog[:, 0], trend='add' if self.trend else None,\n seasonal='add' if self.seasonal else None,\n seasonal_periods=self.seasonal_periods))\n elif self.initialization_method == 'known':\n initial_level = float_like(initial_level, 'initial_level')\n if self.trend:\n initial_trend = float_like(initial_trend, 'initial_trend')\n if self.seasonal:\n initial_seasonal = array_like(initial_seasonal,\n 'initial_seasonal')\n\n if len(initial_seasonal) == self.seasonal_periods - 1:\n initial_seasonal = np.r_[initial_seasonal,\n 0 - np.sum(initial_seasonal)]\n\n if len(initial_seasonal) != self.seasonal_periods:\n raise ValueError(\n 'Invalid length of initial seasonal values. Must be'\n ' one of s or s-1, where s is the number of seasonal'\n ' periods.')\n\n self._initial_level = initial_level\n self._initial_trend = initial_trend\n self._initial_seasonal = initial_seasonal\n self._initial_state = None\n\n # Initialize now if possible (if we have a damped trend, then\n # initialization will depend on the phi parameter, and so has to be\n # done at each `update`)\n methods = ['simple', 'heuristic', 'known']\n if not self.damped_trend and self.initialization_method in methods:\n self._initialize_constant_statespace(initial_level, initial_trend,\n initial_seasonal)\n\n # Save keys for kwarg initialization\n self._init_keys += ['trend', 'damped_trend', 'seasonal',\n 'initialization_method', 'initial_level',\n 'initial_trend', 'initial_seasonal', 'bounds',\n 'concentrate_scale', 'dates', 'freq', 'missing']\n\n def _get_init_kwds(self):\n kwds = super()._get_init_kwds()\n kwds['seasonal'] = self.seasonal_periods\n return kwds\n\n @property\n def _res_classes(self):\n return {'fit': (ExponentialSmoothingResults,\n ExponentialSmoothingResultsWrapper)}\n\n def clone(self, endog, exog=None, **kwargs):\n if exog is not None:\n raise NotImplementedError(\n 'ExponentialSmoothing does not support `exog`.')\n return self._clone_from_init_kwds(endog, **kwargs)\n\n @property\n def state_names(self):\n state_names = ['error', 'level']\n if self.trend:\n state_names += ['trend']\n if self.seasonal:\n state_names += ['seasonal.%d' % i\n for i in range(self.seasonal_periods)]\n\n return state_names\n\n @property\n def param_names(self):\n param_names = ['smoothing_level']\n if self.trend:\n param_names += ['smoothing_trend']\n if self.seasonal:\n param_names += ['smoothing_seasonal']\n if self.damped_trend:\n param_names += ['damping_trend']\n if not self.concentrate_scale:\n param_names += ['sigma2']\n\n # Initialization\n if self.initialization_method == 'estimated':\n param_names += ['initial_level']\n if self.trend:\n param_names += ['initial_trend']\n if self.seasonal:\n param_names += ['initial_seasonal.%d' % i\n for i in range(self.seasonal_periods - 1)]\n\n return param_names\n\n @property\n def start_params(self):\n # Make sure starting parameters aren't beyond or right on the bounds\n bounds = [(x[0] + 1e-3, x[1] - 1e-3) for x in self.bounds]\n\n # See Hyndman p.24\n start_params = [np.clip(0.1, *bounds[0])]\n if self.trend:\n start_params += [np.clip(0.01, *bounds[1])]\n if self.seasonal:\n start_params += [np.clip(0.01, *bounds[2])]\n if self.damped_trend:\n start_params += [np.clip(0.98, *bounds[3])]\n if not self.concentrate_scale:\n start_params += [np.var(self.endog)]\n\n # Initialization\n if self.initialization_method == 'estimated':\n initial_level, initial_trend, initial_seasonal = (\n es_init._initialization_simple(\n self.endog[:, 0],\n trend='add' if self.trend else None,\n seasonal='add' if self.seasonal else None,\n seasonal_periods=self.seasonal_periods))\n start_params += [initial_level]\n if self.trend:\n start_params += [initial_trend]\n if self.seasonal:\n start_params += initial_seasonal.tolist()[:-1]\n\n return np.array(start_params)\n\n @property\n def k_params(self):\n k_params = (\n 1 + int(self.trend) + int(self.seasonal) +\n int(not self.concentrate_scale) + int(self.damped_trend))\n if self.initialization_method == 'estimated':\n k_params += (\n 1 + int(self.trend) +\n int(self.seasonal) * (self._seasonal_periods - 1))\n return k_params\n\n def transform_params(self, unconstrained):\n unconstrained = np.array(unconstrained, ndmin=1)\n constrained = np.zeros_like(unconstrained)\n\n # Alpha in (0, 1)\n low, high = self.bounds[0]\n constrained[0] = (\n 1 / (1 + np.exp(-unconstrained[0])) * (high - low) + low)\n i = 1\n\n # Beta in (0, alpha)\n if self.trend:\n low, high = self.bounds[1]\n high = min(high, constrained[0])\n constrained[i] = (\n 1 / (1 + np.exp(-unconstrained[i])) * (high - low) + low)\n i += 1\n\n # Gamma in (0, 1 - alpha)\n if self.seasonal:\n low, high = self.bounds[2]\n high = min(high, 1 - constrained[0])\n constrained[i] = (\n 1 / (1 + np.exp(-unconstrained[i])) * (high - low) + low)\n i += 1\n\n # Phi in bounds (e.g. default is [0.8, 0.98])\n if self.damped_trend:\n low, high = self.bounds[3]\n constrained[i] = (\n 1 / (1 + np.exp(-unconstrained[i])) * (high - low) + low)\n i += 1\n\n # sigma^2 positive\n if not self.concentrate_scale:\n constrained[i] = unconstrained[i]**2\n i += 1\n\n # Initial parameters are as-is\n if self.initialization_method == 'estimated':\n constrained[i:] = unconstrained[i:]\n\n return constrained\n\n def untransform_params(self, constrained):\n constrained = np.array(constrained, ndmin=1)\n unconstrained = np.zeros_like(constrained)\n\n # Alpha in (0, 1)\n low, high = self.bounds[0]\n tmp = (constrained[0] - low) / (high - low)\n unconstrained[0] = np.log(tmp / (1 - tmp))\n i = 1\n\n # Beta in (0, alpha)\n if self.trend:\n low, high = self.bounds[1]\n high = min(high, constrained[0])\n tmp = (constrained[i] - low) / (high - low)\n unconstrained[i] = np.log(tmp / (1 - tmp))\n i += 1\n\n # Gamma in (0, 1 - alpha)\n if self.seasonal:\n low, high = self.bounds[2]\n high = min(high, 1 - constrained[0])\n tmp = (constrained[i] - low) / (high - low)\n unconstrained[i] = np.log(tmp / (1 - tmp))\n i += 1\n\n # Phi in bounds (e.g. default is [0.8, 0.98])\n if self.damped_trend:\n low, high = self.bounds[3]\n tmp = (constrained[i] - low) / (high - low)\n unconstrained[i] = np.log(tmp / (1 - tmp))\n i += 1\n\n # sigma^2 positive\n if not self.concentrate_scale:\n unconstrained[i] = constrained[i]**0.5\n i += 1\n\n # Initial parameters are as-is\n if self.initialization_method == 'estimated':\n unconstrained[i:] = constrained[i:]\n\n return unconstrained\n\n def _initialize_constant_statespace(self, initial_level,\n initial_trend=None,\n initial_seasonal=None):\n # Note: this should be run after `update` has already put any new\n # parameters into the transition matrix, since it uses the transition\n # matrix explicitly.\n\n # Due to timing differences, the state space representation integrates\n # the trend into the level in the \"predicted_state\" (only the\n # \"filtered_state\" corresponds to the timing of the exponential\n # smoothing models)\n\n # Initial values are interpreted as \"filtered\" values\n constant = np.array([0., initial_level])\n if self.trend and initial_trend is not None:\n constant = np.r_[constant, initial_trend]\n if self.seasonal and initial_seasonal is not None:\n constant = np.r_[constant, initial_seasonal]\n self._initial_state = constant[1:]\n\n # Apply the prediction step to get to what we need for our Kalman\n # filter implementation\n constant = np.dot(self.ssm['transition'], constant)\n\n self.initialization.constant = constant\n\n def _initialize_stationary_cov_statespace(self):\n R = self.ssm['selection']\n Q = self.ssm['state_cov']\n self.initialization.stationary_cov = R.dot(Q).dot(R.T)\n\n def update(self, params, transformed=True, includes_fixed=False,\n complex_step=False):\n params = self.handle_params(params, transformed=transformed,\n includes_fixed=includes_fixed)\n\n # State space system matrices\n self.ssm['selection', 0, 0] = 1 - params[0]\n self.ssm['selection', 1, 0] = params[0]\n i = 1\n if self.trend:\n self.ssm['selection', 2, 0] = params[i]\n i += 1\n if self.seasonal:\n self.ssm['selection', 0, 0] -= params[i]\n self.ssm['selection', i + 1, 0] = params[i]\n i += 1\n if self.damped_trend:\n self.ssm['transition', 1:3, 2] = params[i]\n i += 1\n if not self.concentrate_scale:\n self.ssm['state_cov', 0, 0] = params[i]\n i += 1\n\n # State initialization\n if self.initialization_method == 'estimated':\n initial_level = params[i]\n i += 1\n initial_trend = None\n initial_seasonal = None\n\n if self.trend:\n initial_trend = params[i]\n i += 1\n if self.seasonal:\n initial_seasonal = params[i: i + self.seasonal_periods - 1]\n initial_seasonal = np.r_[initial_seasonal,\n 0 - np.sum(initial_seasonal)]\n self._initialize_constant_statespace(initial_level, initial_trend,\n initial_seasonal)\n\n methods = ['simple', 'heuristic', 'known']\n if self.damped_trend and self.initialization_method in methods:\n self._initialize_constant_statespace(\n self._initial_level, self._initial_trend,\n self._initial_seasonal)\n\n self._initialize_stationary_cov_statespace()\n\n def _compute_concentrated_states(self, params, *args, **kwargs):\n # Apply the usual filter, but keep forecasts\n kwargs['conserve_memory'] = MEMORY_CONSERVE & ~MEMORY_NO_FORECAST\n super().loglike(params, *args, **kwargs)\n\n # Compute the initial state vector\n y_tilde = np.array(self.ssm._kalman_filter.forecast_error[0],\n copy=True)\n\n # Need to modify our state space system matrices slightly to get them\n # back into the form of the innovations framework of\n # De Livera et al. (2011)\n T = self['transition', 1:, 1:]\n R = self['selection', 1:]\n Z = self['design', :, 1:].copy()\n i = 1\n if self.trend:\n Z[0, i] = 1.\n i += 1\n if self.seasonal:\n Z[0, i] = 0.\n Z[0, -1] = 1.\n\n # Now compute the regression components as described in\n # De Livera et al. (2011), equation (10).\n D = T - R.dot(Z)\n w = np.zeros((self.nobs, self.k_states - 1), dtype=D.dtype)\n w[0] = Z\n for i in range(self.nobs - 1):\n w[i + 1] = w[i].dot(D)\n mod_ols = GLM(y_tilde, w)\n\n # If we have seasonal parameters, constrain them to sum to zero\n # (otherwise the initial level gets confounded with the sum of the\n # seasonals).\n if self.seasonal:\n R = np.zeros_like(Z)\n R[0, -self.seasonal_periods:] = 1.\n q = np.zeros((1, 1))\n res_ols = mod_ols.fit_constrained((R, q))\n else:\n res_ols = mod_ols.fit()\n\n # Separate into individual components\n initial_level = res_ols.params[0]\n initial_trend = res_ols.params[1] if self.trend else None\n initial_seasonal = (\n res_ols.params[-self.seasonal_periods:] if self.seasonal else None)\n\n return initial_level, initial_trend, initial_seasonal\n\n @Appender(MLEModel.loglike.__doc__)\n def loglike(self, params, *args, **kwargs):\n if self.initialization_method == 'concentrated':\n self._initialize_constant_statespace(\n *self._compute_concentrated_states(params, *args, **kwargs))\n llf = self.ssm.loglike()\n self.ssm.initialization.constant = np.zeros(self.k_states)\n else:\n llf = super().loglike(params, *args, **kwargs)\n return llf\n\n @Appender(MLEModel.filter.__doc__)\n def filter(self, params, cov_type=None, cov_kwds=None,\n return_ssm=False, results_class=None,\n results_wrapper_class=None, *args, **kwargs):\n if self.initialization_method == 'concentrated':\n self._initialize_constant_statespace(\n *self._compute_concentrated_states(params, *args, **kwargs))\n\n results = super().filter(\n params, cov_type=cov_type, cov_kwds=cov_kwds,\n return_ssm=return_ssm, results_class=results_class,\n results_wrapper_class=results_wrapper_class, *args, **kwargs)\n\n if self.initialization_method == 'concentrated':\n self.ssm.initialization.constant = np.zeros(self.k_states)\n return results\n\n @Appender(MLEModel.smooth.__doc__)\n def smooth(self, params, cov_type=None, cov_kwds=None,\n return_ssm=False, results_class=None,\n results_wrapper_class=None, *args, **kwargs):\n if self.initialization_method == 'concentrated':\n self._initialize_constant_statespace(\n *self._compute_concentrated_states(params, *args, **kwargs))\n\n results = super().smooth(\n params, cov_type=cov_type, cov_kwds=cov_kwds,\n return_ssm=return_ssm, results_class=results_class,\n results_wrapper_class=results_wrapper_class, *args, **kwargs)\n\n if self.initialization_method == 'concentrated':\n self.ssm.initialization.constant = np.zeros(self.k_states)\n return results\n\n\nclass ExponentialSmoothingResults(MLEResults):\n def __init__(self, model, params, filter_results, cov_type=None,\n **kwargs):\n super().__init__(model, params, filter_results, cov_type, **kwargs)\n\n # Save the states\n self.initial_state = model._initial_state\n if isinstance(self.data, PandasData):\n index = self.data.row_labels\n self.initial_state = pd.DataFrame(\n [model._initial_state], columns=model.state_names[1:])\n if model._index_dates and model._index_freq is not None:\n self.initial_state.index = index.shift(-1)[:1]\n\n @Appender(MLEResults.summary.__doc__)\n def summary(self, alpha=.05, start=None):\n specification = ['A']\n if self.model.trend and self.model.damped_trend:\n specification.append('Ad')\n elif self.model.trend:\n specification.append('A')\n else:\n specification.append('N')\n if self.model.seasonal:\n specification.append('A')\n else:\n specification.append('N')\n\n model_name = 'ETS(' + ', '.join(specification) + ')'\n\n summary = super(ExponentialSmoothingResults, self).summary(\n alpha=alpha, start=start, title='Exponential Smoothing Results',\n model_name=model_name)\n\n if self.model.initialization_method != 'estimated':\n params = np.array(self.initial_state)\n if params.ndim > 1:\n params = params[0]\n names = self.model.state_names\n param_header = ['initialization method: %s'\n % self.model.initialization_method]\n params_stubs = names\n params_data = [[forg(params[i], prec=4)]\n for i in range(len(params))]\n\n initial_state_table = SimpleTable(params_data,\n param_header,\n params_stubs,\n txt_fmt=fmt_params)\n summary.tables.insert(-1, initial_state_table)\n\n return summary\n\n\nclass ExponentialSmoothingResultsWrapper(MLEResultsWrapper):\n _attrs = {}\n _wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,\n _attrs)\n _methods = {}\n _wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,\n _methods)\nwrap.populate_wrapper(ExponentialSmoothingResultsWrapper, # noqa:E305\n ExponentialSmoothingResults)\n", "\"\"\"\nTest index support in time series models\n\n1. Test support for passing / constructing the underlying index in __init__\n2. Test wrapping of output using the underlying index\n3. Test wrapping of prediction / forecasting using the underlying index or\n extensions of it.\n\nAuthor: Chad Fulton\nLicense: BSD-3\n\"\"\"\n\nimport pytest\nimport warnings\nimport numpy as np\nimport pandas as pd\n\nfrom numpy.testing import assert_equal, assert_raises\n\nfrom statsmodels.tsa.base import tsa_model\n\nnobs = 5\nbase_dta = np.arange(nobs)\ndta = [\n base_dta.tolist(),\n base_dta,\n pd.Series(base_dta),\n pd.DataFrame(base_dta)\n]\n\nbase_date_indexes = [\n # (usual candidates)\n pd.date_range(start='1950-01-01', periods=nobs, freq='D'),\n pd.date_range(start='1950-01-01', periods=nobs, freq='W'),\n pd.date_range(start='1950-01-01', periods=nobs, freq='M'),\n pd.date_range(start='1950-01-01', periods=nobs, freq='Q'),\n pd.date_range(start='1950-01-01', periods=nobs, freq='A'),\n # (some more complicated frequencies)\n pd.date_range(start='1950-01-01', periods=nobs, freq='2Q'),\n pd.date_range(start='1950-01-01', periods=nobs, freq='2QS'),\n pd.date_range(start='1950-01-01', periods=nobs, freq='5s'),\n pd.date_range(start='1950-01-01', periods=nobs, freq='1D10min')]\n\n# Note: we separate datetime indexes and period indexes because the\n# date coercion does not handle string versions of PeriodIndex objects\n# most of the time.\nbase_period_indexes = [\n pd.period_range(start='1950-01-01', periods=nobs, freq='D'),\n pd.period_range(start='1950-01-01', periods=nobs, freq='W'),\n pd.period_range(start='1950-01-01', periods=nobs, freq='M'),\n pd.period_range(start='1950-01-01', periods=nobs, freq='Q'),\n pd.period_range(start='1950-01-01', periods=nobs, freq='A')]\ntry:\n # Only later versions of pandas support these\n base_period_indexes += [\n pd.period_range(start='1950-01-01', periods=nobs, freq='2Q'),\n pd.period_range(start='1950-01-01', periods=nobs, freq='5s'),\n pd.period_range(start='1950-01-01', periods=nobs, freq='1D10min')]\nexcept AttributeError:\n pass\n\ndate_indexes = [\n (x, None) for x in base_date_indexes]\nperiod_indexes = [\n (x, None) for x in base_period_indexes]\n\nnumpy_datestr_indexes = [\n (x.map(str), x.freq) for x in base_date_indexes]\nlist_datestr_indexes = [\n (x.tolist(), y) for x, y in numpy_datestr_indexes]\nseries_datestr_indexes = [\n (pd.Series(x), y) for x, y in list_datestr_indexes]\n\nnumpy_datetime_indexes = [\n (pd.to_datetime(x).to_pydatetime(), x.freq)\n for x in base_date_indexes]\nlist_datetime_indexes = [\n (x.tolist(), y) for x, y in numpy_datetime_indexes]\nseries_datetime_indexes = [\n (pd.Series(x, dtype=object), y) for x, y in list_datetime_indexes]\n\nseries_timestamp_indexes = [\n (pd.Series(x), x.freq) for x in base_date_indexes]\n\n# Supported increment indexes\nsupported_increment_indexes = [\n (pd.Int64Index(np.arange(nobs)), None),\n (pd.RangeIndex(start=0, stop=nobs, step=1), None),\n (pd.RangeIndex(start=-5, stop=nobs - 5, step=1), None),\n (pd.RangeIndex(start=0, stop=nobs * 6, step=6), None)]\n\n# Supported date indexes\n# Only the Int64Index and the `date_indexes` are valid without\n# frequency information\nsupported_date_indexes = (\n numpy_datestr_indexes +\n list_datestr_indexes + series_datestr_indexes +\n numpy_datetime_indexes + list_datetime_indexes +\n series_datetime_indexes + series_timestamp_indexes)\n\n# Unsupported (but still valid) indexes\nunsupported_indexes = [\n # Non-incrementing-from-zero indexes\n (np.arange(1, nobs+1), None),\n (np.arange(nobs)[::-1], None),\n # Float indexes, even if they increment from zero\n (np.arange(nobs) * 1.0, None),\n # Non-date-string indexes\n ([x for x in 'abcde'], None),\n # Non-date-object indexes\n ([str, 1, 'a', -30.1, {}], None),\n]\n\n# Unsupported date indexes (i.e. those without inferrable frequency)\nunsupported_date_indexes = [\n (['1950', '1952', '1941', '1954', '1991'], None),\n (['1950-01-01', '1950-01-02', '1950-01-03',\n '1950-01-04', '1950-01-06'], None)\n]\n\n\ndef test_instantiation_valid():\n tsa_model.__warningregistry__ = {}\n\n # The primary goal of this test function is to make sure the\n # combinations that are supposed to be valid are actually valid, and\n # that valid but unsupported options give the appropriate warning\n # Secondarily, it also has some tests that invalid combinations raise\n # exceptions, although it's not intended to be comprehensive.\n #\n # Each of `endog`, `exog` can be in the following categories:\n # 0. None (only for exog)\n # 1. list\n # 2. numpy array\n # 3. pandas series\n # 4. pandas dataframe\n #\n # Each pandas index (of `endog`, `exog`, or passed to `dates`) can be:\n # 0. None\n # 1. RangeIndex (if applicable; i.e. if Pandas >= 0.18)\n # 2. Int64Index with values exactly equal to 0, 1, ..., nobs-1\n # 3. DatetimeIndex with frequency\n # 4. PeriodIndex with frequency\n # 5. Anything that does not fall into the above categories also should\n # only raise an exception if it was passed to dates, and may trigger\n # a warning otherwise.\n #\n # `date` can be one of the following:\n # 0. None\n # 2. Pandas index #2\n # 3. Pandas index #3\n # 4. List of date strings (requires freq)\n # 5. List of datetime objects (requires freq)\n # 6. Array of date strings (requires freq)\n # 7. Array of datetime objects (requires freq)\n # 8. Series of date strings (requires freq)\n # 9. Series of datetime objects (requires freq)\n # 10. Series of pandas timestamps (requires freq)\n # 11. Anything that does not fall into the above categories should raise\n # an exception.\n #\n # `freq` can be:\n # 0. None\n # 1. Something that can be passed to `pd.to_offset`\n # 2. Anything that cannot should raise an Exception\n #\n # Each test will be denoted by:\n # endog.index:exog.index/date/freq where the corresponding\n # location is the integer from above; e.g. 1.0:0.0/9/1 corresponds to\n # - List endog (with no index)\n # - No exog\n # - Series of datetime objects\n # - Something valid for `pd.to_offset` (e.g. 'D', if that works with\n # dates)\n #\n # Notice that the endog.index:exog.index really collapses to a single\n # element, which is the evaluated `row_label`. This is first the exog\n # index, if exists, then the endog index, if it exists, or None\n # otherwise. **Thus, we will not test `exog` here.**\n #\n # Example valid combinations of row_label/date/freq include:\n # - */0/0 (i.e. anything is valid if date and freq are not passed)\n # - */%/% where %/% denotes a valid date/freq combination (i.e. any\n # row_label is valid if a valid date/freq combination is given)\n #\n # Example invalid combinations include:\n # - [1-2],[3-4].4/0/[1-2] (i.e. if have freq, then must have, or\n # coerce, a date index)\n # - */[4-10]/0 (i.e. for some types of dates, freq must be passed)\n\n # Baseline: list, numpy endog with no dates, no freq\n for endog in dta[:2]:\n # No indexes, should not raise warnings\n with warnings.catch_warnings():\n warnings.simplefilter('error')\n\n mod = tsa_model.TimeSeriesModel(endog)\n assert_equal(isinstance(mod._index,\n (pd.Int64Index, pd.RangeIndex)), True)\n assert_equal(mod._index_none, True)\n assert_equal(mod._index_dates, False)\n assert_equal(mod._index_generated, True)\n assert_equal(mod.data.dates, None)\n assert_equal(mod.data.freq, None)\n\n # Test list, numpy endog, pandas w/o index; with dates / freq argument\n for endog in dta:\n # Supported date indexes, should not raise warnings, do not need freq\n with warnings.catch_warnings():\n warnings.simplefilter('error')\n\n for ix, freq in date_indexes + period_indexes:\n mod = tsa_model.TimeSeriesModel(endog, dates=ix)\n if freq is None:\n freq = ix.freq\n if not isinstance(freq, str):\n freq = freq.freqstr\n assert_equal(\n isinstance(mod._index, (pd.DatetimeIndex, pd.PeriodIndex)),\n True)\n assert_equal(mod._index_none, False)\n assert_equal(mod._index_dates, True)\n assert_equal(mod._index_generated, False)\n assert_equal(mod._index.freq, mod._index_freq)\n assert_equal(mod.data.dates.equals(mod._index), True)\n assert_equal(mod.data.freq, freq)\n\n # Supported date indexes, should not raise warnings, can use valid freq\n with warnings.catch_warnings():\n warnings.simplefilter('error')\n\n for ix, freq in date_indexes + period_indexes:\n mod = tsa_model.TimeSeriesModel(endog, dates=ix, freq=freq)\n if freq is None:\n freq = ix.freq\n if not isinstance(freq, str):\n freq = freq.freqstr\n assert_equal(\n isinstance(mod._index, (pd.DatetimeIndex, pd.PeriodIndex)),\n True)\n assert_equal(mod._index_none, False)\n assert_equal(mod._index_dates, True)\n assert_equal(mod._index_generated, False)\n assert_equal(mod._index.freq, mod._index_freq)\n assert_equal(mod.data.dates.equals(mod._index), True)\n assert_equal(mod.data.freq, freq)\n\n # Other supported indexes, with valid freq, should not raise warnings\n with warnings.catch_warnings():\n warnings.simplefilter('error')\n\n for ix, freq in supported_date_indexes:\n mod = tsa_model.TimeSeriesModel(endog, dates=ix, freq=freq)\n if freq is None:\n freq = ix.freq\n if not isinstance(freq, str):\n freq = freq.freqstr\n assert_equal(\n isinstance(mod._index, (pd.DatetimeIndex, pd.PeriodIndex)),\n True)\n assert_equal(mod._index_none, False)\n assert_equal(mod._index_dates, True)\n assert_equal(mod._index_generated, False)\n assert_equal(mod._index.freq, mod._index_freq)\n assert_equal(mod.data.dates.equals(mod._index), True)\n assert_equal(mod.data.freq, freq)\n\n # Since only supported indexes are valid `dates` arguments, everything\n # else is invalid here\n for ix, freq in supported_increment_indexes + unsupported_indexes:\n assert_raises(ValueError, tsa_model.TimeSeriesModel, endog,\n dates=ix)\n\n # Test pandas (Series, DataFrame); with index (no dates/freq argument)\n for base_endog in dta[2:4]:\n # DatetimeIndex and PeriodIndex, should not raise warnings\n with warnings.catch_warnings():\n warnings.simplefilter('error')\n\n for ix, freq in date_indexes + period_indexes:\n endog = base_endog.copy()\n endog.index = ix\n\n mod = tsa_model.TimeSeriesModel(endog)\n if freq is None:\n freq = ix.freq\n if not isinstance(freq, str):\n freq = freq.freqstr\n assert_equal(\n isinstance(mod._index, (pd.DatetimeIndex, pd.PeriodIndex)),\n True)\n assert_equal(mod._index_none, False)\n assert_equal(mod._index_dates, True)\n assert_equal(mod._index_generated, False)\n assert_equal(mod._index.freq, mod._index_freq)\n assert_equal(mod.data.dates.equals(mod._index), True)\n assert_equal(mod.data.freq, freq)\n\n # Increment index (this is a \"supported\" index in the sense that it\n # does not raise a warning, but obviously not a date index)\n endog = base_endog.copy()\n endog.index = supported_increment_indexes[0][0]\n\n mod = tsa_model.TimeSeriesModel(endog)\n assert_equal(type(mod._index) == pd.Int64Index, True)\n assert_equal(mod._index_none, False)\n assert_equal(mod._index_dates, False)\n assert_equal(mod._index_generated, False)\n assert_equal(mod._index_freq, None)\n assert_equal(mod.data.dates, None)\n assert_equal(mod.data.freq, None)\n\n # RangeIndex (start=0, end=nobs, so equivalent to increment index)\n endog = base_endog.copy()\n endog.index = supported_increment_indexes[1][0]\n\n mod = tsa_model.TimeSeriesModel(endog)\n assert_equal(type(mod._index) == pd.RangeIndex, True)\n assert_equal(mod._index_none, False)\n assert_equal(mod._index_dates, False)\n assert_equal(mod._index_generated, False)\n assert_equal(mod._index_freq, None)\n assert_equal(mod.data.dates, None)\n assert_equal(mod.data.freq, None)\n\n # Supported indexes *when a freq is given*, should not raise a warning\n with warnings.catch_warnings():\n warnings.simplefilter('error')\n\n for ix, freq in supported_date_indexes:\n endog = base_endog.copy()\n endog.index = ix\n\n mod = tsa_model.TimeSeriesModel(endog, freq=freq)\n if freq is None:\n freq = ix.freq\n if not isinstance(freq, str):\n freq = freq.freqstr\n assert_equal(\n isinstance(mod._index, (pd.DatetimeIndex, pd.PeriodIndex)),\n True)\n assert_equal(mod._index_none, False)\n assert_equal(mod._index_dates, True)\n assert_equal(mod._index_generated, False)\n assert_equal(mod._index.freq, mod._index_freq)\n assert_equal(mod.data.dates.equals(mod._index), True)\n assert_equal(mod.data.freq, freq)\n\n # Unsupported (or any) indexes to the given series, *when a supported\n # date and freq is given*, should not raise a warning\n with warnings.catch_warnings():\n warnings.simplefilter('error')\n\n for ix, freq in supported_date_indexes:\n endog = base_endog.copy()\n endog.index = unsupported_indexes[0][0]\n\n mod = tsa_model.TimeSeriesModel(endog, dates=ix, freq=freq)\n if freq is None:\n freq = ix.freq\n if not isinstance(freq, str):\n freq = freq.freqstr\n assert_equal(\n isinstance(mod._index, (pd.DatetimeIndex, pd.PeriodIndex)),\n True)\n assert_equal(mod._index_none, False)\n assert_equal(mod._index_dates, True)\n assert_equal(mod._index_generated, False)\n assert_equal(mod._index.freq, mod._index_freq)\n assert_equal(mod.data.dates.equals(mod._index), True)\n assert_equal(mod.data.freq, freq)\n\n # Date indexes with inferrable freq, but no given freq, should all give\n # warnings\n message = ('No frequency information was provided,'\n ' so inferred frequency %s will be used.')\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n\n for ix, freq in supported_date_indexes:\n endog = base_endog.copy()\n endog.index = ix\n mod = tsa_model.TimeSeriesModel(endog)\n if freq is None:\n freq = ix.freq\n if not isinstance(freq, str):\n freq = freq.freqstr\n assert_equal(type(mod._index) == pd.DatetimeIndex, True)\n assert_equal(mod._index_none, False)\n assert_equal(mod._index_dates, True)\n assert_equal(mod._index_generated, False)\n assert_equal(mod._index.freq, mod._index_freq)\n assert_equal(mod.data.dates.equals(mod._index), True)\n\n # Note: here, we need to hedge the test a little bit because\n # inferred frequencies are not always the same as the original\n # frequency. From the examples above, when the actual freq is\n # 2QS-OCT, the inferred freq is 2QS-JAN. This is an issue with\n # inferred frequencies, but since we are warning the user, it's\n # not a failure of the code. Thus we only test the \"major\" part\n # of the freq, and just test that the right message is given\n # (even though it will not have the actual freq of the data in\n # it).\n assert_equal(mod.data.freq.split('-')[0], freq.split('-')[0])\n assert_equal(str(w[-1].message), message % mod.data.freq)\n\n # Unsupported (but valid) indexes, should all give warnings\n message = ('An unsupported index was provided and will be'\n ' ignored when e.g. forecasting.')\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n\n for ix, freq in unsupported_indexes:\n endog = base_endog.copy()\n endog.index = ix\n mod = tsa_model.TimeSeriesModel(endog)\n assert_equal(isinstance(mod._index,\n (pd.Int64Index, pd.RangeIndex)), True)\n assert_equal(mod._index_none, False)\n assert_equal(mod._index_dates, False)\n assert_equal(mod._index_generated, True)\n assert_equal(mod._index_freq, None)\n assert_equal(mod.data.dates, None)\n assert_equal(mod.data.freq, None)\n\n assert_equal(str(w[0].message), message)\n\n # Date indexes without inferrable freq, and with no given freq, should\n # all give warnings\n message = ('A date index has been provided, but it has no'\n ' associated frequency information and so will be'\n ' ignored when e.g. forecasting.')\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n\n for ix, freq in unsupported_date_indexes:\n endog = base_endog.copy()\n endog.index = ix\n mod = tsa_model.TimeSeriesModel(endog)\n assert_equal(isinstance(mod._index,\n (pd.Int64Index, pd.RangeIndex)), True)\n assert_equal(mod._index_none, False)\n assert_equal(mod._index_dates, False)\n assert_equal(mod._index_generated, True)\n assert_equal(mod._index_freq, None)\n assert_equal(mod.data.dates, None)\n assert_equal(mod.data.freq, None)\n\n assert_equal(str(w[0].message), message)\n\n # Test (invalid) freq with no index\n endog = dta[0]\n assert_raises(ValueError, tsa_model.TimeSeriesModel, endog,\n freq=date_indexes[1][0].freq)\n\n # Test conflicting index, freq specifications\n endog = dta[2].copy()\n endog.index = date_indexes[0][0]\n assert_raises(ValueError, tsa_model.TimeSeriesModel, endog,\n freq=date_indexes[1][0].freq)\n\n # Test unsupported index, but a freq specification\n endog = dta[2].copy()\n endog.index = unsupported_indexes[0][0]\n assert_raises(ValueError, tsa_model.TimeSeriesModel, endog,\n freq=date_indexes[1][0].freq)\n\n # Test index that can coerce to date time but incorrect freq\n endog = dta[2].copy()\n endog.index = numpy_datestr_indexes[0][0]\n assert_raises(ValueError, tsa_model.TimeSeriesModel, endog,\n freq=date_indexes[1][0].freq)\n\n\ndef test_prediction_increment_unsupported():\n # a. Generated from unsupported index\n endog = dta[2].copy()\n endog.index = unsupported_indexes[-2][0]\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('ignore')\n mod = tsa_model.TimeSeriesModel(endog)\n\n # Tests three common use cases: basic prediction, negative indexes, and\n # out-of-sample indexes.\n\n # Basic prediction: [0, end]; notice that since this is an in-sample\n # prediction, the index returned is the (unsupported) original index\n start_key = 0\n end_key = None\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 0)\n assert_equal(end, nobs-1)\n assert_equal(out_of_sample, 0)\n assert_equal(prediction_index.equals(mod.data.row_labels), True)\n\n # Negative index: [-2, end]; notice that since this is an in-sample\n # prediction, the index returned is a piece of the (unsupported)\n # original index\n start_key = -2\n end_key = -1\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 3)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 0)\n assert_equal(prediction_index.equals(mod.data.row_labels[3:]), True)\n\n # Forecasting: [1, 5], notice that since an unsupported index was given,\n # a warning will be issued\n start_key = 1\n end_key = nobs\n message = ('No supported index is available.'\n ' Prediction results will be given with'\n ' an integer index beginning at `start`.')\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(str(w[0].message), message)\n\n assert_equal(start, 1)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 1)\n assert_equal(prediction_index.equals(pd.Index(np.arange(1, 6))), True)\n\n # Test getting a location that exists in the (internal) index\n loc, index, index_was_expanded = mod._get_index_loc(2)\n assert_equal(loc, 2)\n desired_index = pd.RangeIndex(start=0, stop=3, step=1)\n assert_equal(index.equals(desired_index), True)\n assert_equal(index_was_expanded, False)\n\n # Test getting a location that exists in the (internal) index\n # when using the function that alternatively falls back to the row labels\n loc, index, index_was_expanded = mod._get_index_label_loc(2)\n assert_equal(loc, 2)\n desired_index = pd.RangeIndex(start=0, stop=3, step=1)\n assert_equal(index.equals(desired_index), True)\n assert_equal(index_was_expanded, False)\n\n # Test getting a location that exists in the given (unsupported) index\n # Note that the returned index is now like the row labels\n loc, index, index_was_expanded = mod._get_index_label_loc('c')\n assert_equal(loc, 2)\n desired_index = mod.data.row_labels[:3]\n assert_equal(index.equals(desired_index), True)\n assert_equal(index_was_expanded, False)\n\n\ndef test_prediction_increment_nonpandas():\n endog = dta[0]\n mod = tsa_model.TimeSeriesModel(endog)\n\n # Tests three common use cases: basic prediction, negative indexes, and\n # out-of-sample indexes.\n\n # Basic prediction: [0, end]; since there was no index at all and the data\n # is not Pandas, the returned prediction_index is None\n start_key = 0\n end_key = None\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 0)\n assert_equal(end, nobs-1)\n assert_equal(out_of_sample, 0)\n assert_equal(prediction_index is None, True)\n\n # Negative index: [-2, end]; since there was no index at all and the data\n # is not Pandas, the returned prediction_index is None\n start_key = -2\n end_key = -1\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 3)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 0)\n assert_equal(prediction_index is None, True)\n\n # Forecasting: [1, 5]; since there was no index at all and the data\n # is not Pandas, the returned prediction_index is None\n start_key = 1\n end_key = nobs\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 1)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 1)\n assert_equal(prediction_index is None, True)\n\n\n # Test getting a location that exists in the (internal) index\n loc, index, index_was_expanded = mod._get_index_loc(2)\n assert_equal(loc, 2)\n desired_index = pd.RangeIndex(start=0, stop=3, step=1)\n assert_equal(index.equals(desired_index), True)\n assert_equal(index_was_expanded, False)\n\n # Test getting a location that exists in the (internal) index\n # when using the function that alternatively falls back to the row labels\n loc, index, index_was_expanded = mod._get_index_label_loc(2)\n assert_equal(loc, 2)\n desired_index = pd.RangeIndex(start=0, stop=3, step=1)\n assert_equal(index.equals(desired_index), True)\n assert_equal(index_was_expanded, False)\n\n\ndef test_prediction_increment_pandas_noindex():\n endog = dta[2].copy()\n mod = tsa_model.TimeSeriesModel(endog)\n\n # Tests three common use cases: basic prediction, negative indexes, and\n # out-of-sample indexes.\n\n # Basic prediction: [0, end]; since there was no index and the data is\n # Pandas, the index is the generated incrementing index, and no warning is\n # issued\n start_key = 0\n end_key = None\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 0)\n assert_equal(end, nobs-1)\n assert_equal(out_of_sample, 0)\n assert_equal(prediction_index.equals(mod._index), True)\n\n # Negative index: [-2, end]; since there was no index and the data is\n # Pandas, the index is the generated incrementing index, and no warning is\n # issued\n start_key = -2\n end_key = -1\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 3)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 0)\n assert_equal(prediction_index.equals(mod._index[3:]), True)\n\n # Forecasting: [1, 5]; since there was no index and the data is\n # Pandas, the index is the generated incrementing index, and no warning is\n # issued\n start_key = 1\n end_key = nobs\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 1)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 1)\n assert_equal(prediction_index.equals(pd.Index(np.arange(1, 6))), True)\n\n\ndef test_prediction_increment_pandas_dates_daily():\n # Date-based index\n endog = dta[2].copy()\n endog.index = date_indexes[0][0] # Daily, 1950-01-01, 1950-01-02, ...\n mod = tsa_model.TimeSeriesModel(endog)\n\n # Tests three common use cases: basic prediction, negative indexes, and\n # out-of-sample indexes.\n\n # Basic prediction: [0, end]; the index is the date index\n start_key = 0\n end_key = None\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 0)\n assert_equal(end, nobs-1)\n assert_equal(out_of_sample, 0)\n assert type(prediction_index) is type(endog.index) # noqa: E721\n assert_equal(prediction_index.equals(mod._index), True)\n\n # In-sample prediction: [0, 3]; the index is a subset of the date index\n start_key = 0\n end_key = 3\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 0)\n assert_equal(end, 3)\n assert_equal(out_of_sample, 0)\n assert type(prediction_index) is type(endog.index) # noqa: E721\n assert_equal(prediction_index.equals(mod._index[:4]), True)\n\n # Negative index: [-2, end]\n start_key = -2\n end_key = -1\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 3)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 0)\n assert type(prediction_index) is type(endog.index) # noqa: E721\n assert_equal(prediction_index.equals(mod._index[3:]), True)\n\n # Forecasting: [1, 5]; the index is an extended version of the date index\n start_key = 1\n end_key = nobs\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 1)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 1)\n desired_index = pd.date_range(start='1950-01-02', periods=5, freq='D')\n assert_equal(prediction_index.equals(desired_index), True)\n\n # Date-based keys\n\n # In-sample prediction (equivalent to [1, 3])\n start_key = '1950-01-02'\n end_key = '1950-01-04'\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 1)\n assert_equal(end, 3)\n assert_equal(out_of_sample, 0)\n assert type(prediction_index) is type(endog.index) # noqa: E721\n assert_equal(prediction_index.equals(mod._index[1:4]), True)\n\n # Out-of-sample forecasting (equivalent to [0, 5])\n start_key = '1950-01-01'\n end_key = '1950-01-08'\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 0)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 3)\n desired_index = pd.date_range(start='1950-01-01', periods=8, freq='D')\n assert_equal(prediction_index.equals(desired_index), True)\n\n # Test getting a location that exists in the (internal) index\n loc, index, index_was_expanded = mod._get_index_loc(2)\n assert_equal(loc, 2)\n desired_index = pd.date_range(start='1950-01-01', periods=3, freq='D')\n assert_equal(index.equals(desired_index), True)\n assert_equal(index_was_expanded, False)\n\n # Test getting a location that exists in the (internal) index\n # when using the function that alternatively falls back to the row labels\n loc, index, index_was_expanded = mod._get_index_label_loc(2)\n assert_equal(loc, 2)\n desired_index = pd.date_range(start='1950-01-01', periods=3, freq='D')\n assert_equal(index.equals(desired_index), True)\n assert_equal(index_was_expanded, False)\n\n # Test getting a location that exists in the given (unsupported) index\n # Note that the returned index is now like the row labels\n loc, index, index_was_expanded = mod._get_index_label_loc('1950-01-03')\n assert_equal(loc, 2)\n desired_index = mod.data.row_labels[:3]\n assert_equal(index.equals(desired_index), True)\n assert_equal(index_was_expanded, False)\n\n\ndef test_prediction_increment_pandas_dates_monthly():\n # Date-based index\n endog = dta[2].copy()\n endog.index = date_indexes[2][0] # Monthly, 1950-01, 1950-02, ...\n mod = tsa_model.TimeSeriesModel(endog)\n\n # Tests three common use cases: basic prediction, negative indexes, and\n # out-of-sample indexes.\n\n # Basic prediction: [0, end]; the index is the date index\n start_key = 0\n end_key = None\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 0)\n assert_equal(end, nobs-1)\n assert_equal(out_of_sample, 0)\n assert type(prediction_index) is type(endog.index) # noqa: E721\n assert_equal(prediction_index.equals(mod._index), True)\n\n # In-sample prediction: [0, 3]; the index is a subset of the date index\n start_key = 0\n end_key = 3\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 0)\n assert_equal(end, 3)\n assert_equal(out_of_sample, 0)\n assert type(prediction_index) is type(endog.index) # noqa: E721\n assert_equal(prediction_index.equals(mod._index[:4]), True)\n\n # Negative index: [-2, end]\n start_key = -2\n end_key = -1\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 3)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 0)\n assert type(prediction_index) is type(endog.index) # noqa: E721\n assert_equal(prediction_index.equals(mod._index[3:]), True)\n\n # Forecasting: [1, 5]; the index is an extended version of the date index\n start_key = 1\n end_key = nobs\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 1)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 1)\n desired_index = pd.date_range(start='1950-02', periods=5, freq='M')\n assert_equal(prediction_index.equals(desired_index), True)\n\n # Date-based keys\n\n # In-sample prediction (equivalent to [1, 3])\n start_key = '1950-02'\n end_key = '1950-04'\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 1)\n assert_equal(end, 3)\n assert_equal(out_of_sample, 0)\n assert type(prediction_index) is type(endog.index) # noqa: E721\n assert_equal(prediction_index.equals(mod._index[1:4]), True)\n\n # Out-of-sample forecasting (equivalent to [0, 5])\n start_key = '1950-01'\n end_key = '1950-08'\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 0)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 3)\n desired_index = pd.date_range(start='1950-01', periods=8, freq='M')\n assert_equal(prediction_index.equals(desired_index), True)\n\n # Test getting a location that exists in the (internal) index\n loc, index, index_was_expanded = mod._get_index_loc(2)\n assert_equal(loc, 2)\n desired_index = pd.date_range(start='1950-01', periods=3, freq='M')\n assert_equal(index.equals(desired_index), True)\n assert_equal(index_was_expanded, False)\n\n # Test getting a location that exists in the (internal) index\n # when using the function that alternatively falls back to the row labels\n loc, index, index_was_expanded = mod._get_index_label_loc(2)\n assert_equal(loc, 2)\n desired_index = pd.date_range(start='1950-01', periods=3, freq='M')\n assert_equal(index.equals(desired_index), True)\n assert_equal(index_was_expanded, False)\n\n # Test getting a location that exists in the given (unsupported) index\n # Note that the returned index is now like the row labels\n loc, index, index_was_expanded = mod._get_index_label_loc('1950-03')\n assert_equal(loc, slice(2, 3, None))\n desired_index = mod.data.row_labels[:3]\n assert_equal(index.equals(desired_index), True)\n assert_equal(index_was_expanded, False)\n\n\ndef test_prediction_increment_pandas_dates_nanosecond():\n # Date-based index\n endog = dta[2].copy()\n endog.index = pd.date_range(start='1970-01-01', periods=len(endog),\n freq='N')\n mod = tsa_model.TimeSeriesModel(endog)\n\n # Tests three common use cases: basic prediction, negative indexes, and\n # out-of-sample indexes.\n\n # Basic prediction: [0, end]; the index is the date index\n start_key = 0\n end_key = None\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 0)\n assert_equal(end, nobs-1)\n assert_equal(out_of_sample, 0)\n assert type(prediction_index) is type(endog.index) # noqa: E721\n assert_equal(prediction_index.equals(mod._index), True)\n\n # Negative index: [-2, end]\n start_key = -2\n end_key = -1\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 3)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 0)\n assert type(prediction_index) is type(endog.index) # noqa: E721\n assert_equal(prediction_index.equals(mod._index[3:]), True)\n\n # Forecasting: [1, 5]; the index is an extended version of the date index\n start_key = 1\n end_key = nobs\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 1)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 1)\n desired_index = pd.date_range(start='1970-01-01', periods=6, freq='N')[1:]\n assert_equal(prediction_index.equals(desired_index), True)\n\n # Date-based keys\n start_key = pd.Timestamp('1970-01-01')\n end_key = pd.Timestamp(start_key.value + 7)\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 0)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 3)\n desired_index = pd.date_range(start='1970-01-01', periods=8, freq='N')\n assert_equal(prediction_index.equals(desired_index), True)\n\n\ndef test_range_index():\n tsa_model.__warningregistry__ = {}\n\n endog = pd.Series(np.random.normal(size=5))\n assert_equal(isinstance(endog.index, pd.RangeIndex), True)\n # Warning should not be given\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n mod = tsa_model.TimeSeriesModel(endog)\n assert_equal(len(w), 0)\n\n\ndef test_prediction_rangeindex():\n index = supported_increment_indexes[2][0]\n endog = pd.Series(dta[0], index=index)\n mod = tsa_model.TimeSeriesModel(endog)\n\n # Tests three common use cases: basic prediction, negative indexes, and\n # out-of-sample indexes.\n\n # Basic prediction: [0, end]\n start_key = 0\n end_key = None\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 0)\n assert_equal(end, nobs - 1)\n assert_equal(out_of_sample, 0)\n desired_index = pd.RangeIndex(start=-5, stop=0, step=1)\n assert_equal(prediction_index.equals(desired_index), True)\n\n # Negative index: [-2, end]\n start_key = -2\n end_key = -1\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 3)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 0)\n desired_index = pd.RangeIndex(start=-2, stop=0, step=1)\n assert_equal(prediction_index.equals(desired_index), True)\n\n # Forecasting: [1, 5]\n start_key = 1\n end_key = nobs\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 1)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 1)\n desired_index = pd.RangeIndex(start=-4, stop=1, step=1)\n assert_equal(prediction_index.equals(desired_index), True)\n\n\ndef test_prediction_rangeindex_withstep():\n index = supported_increment_indexes[3][0]\n endog = pd.Series(dta[0], index=index)\n mod = tsa_model.TimeSeriesModel(endog)\n\n # Tests three common use cases: basic prediction, negative indexes, and\n # out-of-sample indexes.\n\n # Basic prediction: [0, end]\n start_key = 0\n end_key = None\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 0)\n assert_equal(end, nobs - 1)\n assert_equal(out_of_sample, 0)\n desired_index = pd.RangeIndex(start=0, stop=nobs * 6, step=6)\n assert_equal(prediction_index.equals(desired_index), True)\n\n # Negative index: [-2, end]\n start_key = -2\n end_key = -1\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 3)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 0)\n desired_index = pd.RangeIndex(start=3 * 6, stop=nobs * 6, step=6)\n assert_equal(prediction_index.equals(desired_index), True)\n\n # Forecasting: [1, 5]\n start_key = 1\n end_key = nobs\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n assert_equal(start, 1)\n assert_equal(end, 4)\n assert_equal(out_of_sample, 1)\n desired_index = pd.RangeIndex(start=1 * 6, stop=(nobs + 1) * 6, step=6)\n assert_equal(prediction_index.equals(desired_index), True)\n\n # Test getting a location that exists in the index\n loc, index, index_was_expanded = mod._get_index_loc(2)\n assert_equal(loc, 2)\n desired_index = pd.RangeIndex(start=0, stop=3 * 6, step=6)\n assert_equal(index.equals(desired_index), True)\n assert_equal(index_was_expanded, False)\n\n\ndef test_custom_index():\n tsa_model.__warningregistry__ = {}\n\n endog = pd.Series(np.random.normal(size=5),\n index=['a', 'b', 'c', 'd', 'e'])\n message = ('An unsupported index was provided and will be ignored when'\n ' e.g. forecasting.')\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n\n mod = tsa_model.TimeSeriesModel(endog)\n assert_equal(str(w[0].message), message)\n start_key = -2\n end_key = -1\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n\n # Test the default output index\n assert_equal(prediction_index.equals(pd.Index(['d', 'e'])), True)\n\n # Test custom output index\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key, index=['f', 'g']))\n assert_equal(prediction_index.equals(pd.Index(['f', 'g'])), True)\n\n # Test getting a location in the index w/o fallback to row labels\n loc, index, index_was_expanded = mod._get_index_loc(2)\n assert_equal(loc, 2)\n assert_equal(index.equals(pd.RangeIndex(0, 3)), True)\n assert_equal(index_was_expanded, False)\n assert_equal(index_was_expanded, False)\n\n # Test getting an invalid location in the index w/ fallback to row labels\n with pytest.raises(KeyError):\n mod._get_index_loc('c')\n\n # Test getting a location in the index w/ fallback to row labels\n loc, index, index_was_expanded = mod._get_index_label_loc('c')\n assert_equal(loc, 2)\n assert_equal(index.equals(pd.Index(['a', 'b', 'c'])), True)\n assert_equal(index_was_expanded, False)\n\n # Test getting an invalid location in the index w/ fallback to row labels\n with pytest.raises(KeyError):\n mod._get_index_label_loc('aa')\n\n # Test out-of-sample\n start_key = 4\n end_key = 5\n message = ('No supported index is available.'\n ' Prediction results will be given with'\n ' an integer index beginning at `start`.')\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key))\n assert_equal(prediction_index.equals(pd.Index([4, 5])), True)\n assert_equal(str(w[0].message), message)\n\n # Test out-of-sample custom index\n start, end, out_of_sample, prediction_index = (\n mod._get_prediction_index(start_key, end_key, index=['f', 'g']))\n assert_equal(prediction_index.equals(pd.Index(['f', 'g'])), True)\n\n # Test invalid custom index\n assert_raises(ValueError, mod._get_prediction_index, start_key, end_key,\n index=['f', 'g', 'h'])\n", "# -*- coding: utf-8 -*-\nimport copy\n\nimport numpy as np\nfrom numpy.linalg import inv, slogdet\nfrom scipy.stats import norm\n\nfrom statsmodels.compat.pandas import Appender\nimport statsmodels.base.model as base\nimport statsmodels.base.wrapper as wrap\nfrom statsmodels.iolib.summary import Summary\nfrom statsmodels.regression.linear_model import OLS\nfrom statsmodels.tools.decorators import cache_readonly, cache_writable\nfrom statsmodels.tools.numdiff import approx_fprime, approx_hess\nfrom statsmodels.tools.validation import array_like\nfrom statsmodels.tsa.base import tsa_model\nfrom statsmodels.tsa.kalmanf.kalmanfilter import KalmanFilter\nfrom statsmodels.tsa.tsatools import (lagmat, add_trend, _ar_transparams,\n _ar_invtransparams)\nfrom statsmodels.tsa.vector_ar import util\n\n__all__ = ['AR']\n\nREPEATED_FIT_ERROR = \"\"\"\nModel has been fit using maxlag={0}, method={1}, ic={2}, trend={3}. These\ncannot be changed in subsequent calls to `fit`. Instead, use a new instance of\nAR.\n\"\"\"\n\n\ndef sumofsq(x, axis=0):\n \"\"\"Helper function to calculate sum of squares along first axis\"\"\"\n return np.sum(x ** 2, axis=axis)\n\n\ndef _ar_predict_out_of_sample(y, params, k_ar, k_trend, steps, start=0):\n mu = params[:k_trend] if k_trend else 0 # only have to worry constant\n arparams = params[k_trend:][::-1] # reverse for dot\n\n # dynamic endogenous variable\n endog = np.zeros(k_ar + steps) # this is one too big but does not matter\n if start:\n endog[:k_ar] = y[start - k_ar:start]\n else:\n endog[:k_ar] = y[-k_ar:]\n\n forecast = np.zeros(steps)\n for i in range(steps):\n fcast = mu + np.dot(arparams, endog[i:i + k_ar])\n forecast[i] = fcast\n endog[i + k_ar] = fcast\n\n return forecast\n\n\nclass AR(tsa_model.TimeSeriesModel):\n __doc__ = tsa_model._tsa_doc % {\"model\": \"Autoregressive AR(p) model.\",\n \"params\": \"\"\"endog : array_like\n A 1-d endogenous response variable. The independent variable.\"\"\",\n \"extra_params\": base._missing_param_doc,\n \"extra_sections\": \"\"}\n\n def __init__(self, endog, dates=None, freq=None, missing='none'):\n super(AR, self).__init__(endog, None, dates, freq, missing=missing)\n endog = self.endog # original might not have been an ndarray\n if endog.ndim == 1:\n endog = endog[:, None]\n self.endog = endog # to get shapes right\n elif endog.ndim > 1 and endog.shape[1] != 1:\n raise ValueError(\"Only the univariate case is implemented\")\n self._fit_params = None\n\n def initialize(self):\n \"\"\"Initialization of the model (no-op).\"\"\"\n pass\n\n def _transparams(self, params):\n \"\"\"\n Transforms params to induce stationarity/invertability.\n\n Reference\n ---------\n Jones(1980)\n \"\"\"\n p = self.k_ar\n k = self.k_trend\n newparams = params.copy()\n newparams[k:k + p] = _ar_transparams(params[k:k + p].copy())\n return newparams\n\n def _invtransparams(self, start_params):\n \"\"\"\n Inverse of the Jones reparameterization\n \"\"\"\n p = self.k_ar\n k = self.k_trend\n newparams = start_params.copy()\n newparams[k:k + p] = _ar_invtransparams(start_params[k:k + p].copy())\n return newparams\n\n def _presample_fit(self, params, start, p, end, y, predictedvalues):\n \"\"\"\n Return the pre-sample predicted values using the Kalman Filter\n\n Notes\n -----\n See predict method for how to use start and p.\n \"\"\"\n k = self.k_trend\n\n # build system matrices\n T_mat = KalmanFilter.T(params, p, k, p)\n R_mat = KalmanFilter.R(params, p, k, 0, p)\n\n # Initial State mean and variance\n alpha = np.zeros((p, 1))\n Q_0 = np.dot(inv(np.identity(p ** 2) - np.kron(T_mat, T_mat)),\n np.dot(R_mat, R_mat.T).ravel('F'))\n\n Q_0 = Q_0.reshape(p, p, order='F') # TODO: order might need to be p+k\n P = Q_0\n Z_mat = KalmanFilter.Z(p)\n for i in range(end): # iterate p-1 times to fit presample\n v_mat = y[i] - np.dot(Z_mat, alpha)\n F_mat = np.dot(np.dot(Z_mat, P), Z_mat.T)\n Finv = 1. / F_mat # inv. always scalar\n K = np.dot(np.dot(np.dot(T_mat, P), Z_mat.T), Finv)\n # update state\n alpha = np.dot(T_mat, alpha) + np.dot(K, v_mat)\n L = T_mat - np.dot(K, Z_mat)\n P = np.dot(np.dot(T_mat, P), L.T) + np.dot(R_mat, R_mat.T)\n if i >= start - 1: # only record if we ask for it\n predictedvalues[i + 1 - start] = np.dot(Z_mat, alpha)\n\n def _get_prediction_index(self, start, end, dynamic, index=None):\n method = getattr(self, 'method', 'mle')\n k_ar = getattr(self, 'k_ar', 0)\n if start is None:\n if method == 'mle' and not dynamic:\n start = 0\n else: # cannot do presample fit for cmle or dynamic\n start = k_ar\n start = self._index[start]\n if end is None:\n end = self._index[-1]\n\n start, end, out_of_sample, prediction_index = (\n super(AR, self)._get_prediction_index(start, end, index))\n\n # Other validation\n if (method == 'cmle' or dynamic) and start < k_ar:\n raise ValueError(\"Start must be >= k_ar for conditional MLE \"\n \"or dynamic forecast. Got %d\" % start)\n\n return start, end, out_of_sample, prediction_index\n\n def predict(self, params, start=None, end=None, dynamic=False):\n \"\"\"\n Construct in-sample and out-of-sample prediction.\n\n Parameters\n ----------\n params : array\n The fitted model parameters.\n start : int, str, or datetime\n Zero-indexed observation number at which to start forecasting, ie.,\n the first forecast is start. Can also be a date string to\n parse or a datetime type.\n end : int, str, or datetime\n Zero-indexed observation number at which to end forecasting, ie.,\n the first forecast is start. Can also be a date string to\n parse or a datetime type.\n dynamic : bool\n The `dynamic` keyword affects in-sample prediction. If dynamic\n is False, then the in-sample lagged values are used for\n prediction. If `dynamic` is True, then in-sample forecasts are\n used in place of lagged dependent variables. The first forecasted\n value is `start`.\n\n Returns\n -------\n array_like\n An array containing the predicted values.\n\n Notes\n -----\n The linear Gaussian Kalman filter is used to return pre-sample fitted\n values. The exact initial Kalman Filter is used. See Durbin and Koopman\n in the references for more information.\n \"\"\"\n if not (hasattr(self, 'k_ar') and hasattr(self, 'k_trend')):\n raise RuntimeError('Model must be fit before calling predict')\n # will return an index of a date\n start, end, out_of_sample, _ = (\n self._get_prediction_index(start, end, dynamic))\n\n k_ar = self.k_ar\n k_trend = self.k_trend\n method = self.method\n endog = self.endog.squeeze()\n\n if dynamic:\n out_of_sample += end - start + 1\n return _ar_predict_out_of_sample(endog, params, k_ar,\n k_trend, out_of_sample, start)\n\n predictedvalues = np.zeros(end + 1 - start)\n\n # fit pre-sample\n if method == 'mle': # use Kalman Filter to get initial values\n if k_trend:\n mu = params[0] / (1 - np.sum(params[k_trend:]))\n else:\n mu = 0\n\n # modifies predictedvalues in place\n if start < k_ar:\n self._presample_fit(params, start, k_ar, min(k_ar - 1, end),\n endog[:k_ar] - mu, predictedvalues)\n predictedvalues[:k_ar - start] += mu\n\n if end < k_ar:\n return predictedvalues\n\n # just do the whole thing and truncate\n fittedvalues = np.dot(self.X, params)\n\n pv_start = max(k_ar - start, 0)\n fv_start = max(start - k_ar, 0)\n fv_end = min(len(fittedvalues), end - k_ar + 1)\n predictedvalues[pv_start:] = fittedvalues[fv_start:fv_end]\n\n if out_of_sample:\n forecastvalues = _ar_predict_out_of_sample(endog, params,\n k_ar, k_trend,\n out_of_sample)\n predictedvalues = np.r_[predictedvalues, forecastvalues]\n\n return predictedvalues\n\n def _presample_varcov(self, params):\n \"\"\"\n Returns the inverse of the presample variance-covariance.\n\n Notes\n -----\n See Hamilton p. 125\n \"\"\"\n k = self.k_trend\n p = self.k_ar\n\n # get inv(Vp) Hamilton 5.3.7\n params0 = np.r_[-1, params[k:]]\n\n Vpinv = np.zeros((p, p), dtype=params.dtype)\n for i in range(1, p + 1):\n Vpinv[i - 1, i - 1:] = np.correlate(params0, params0[:i])[:-1]\n Vpinv[i - 1, i - 1:] -= np.correlate(params0[-i:], params0)[:-1]\n\n Vpinv = Vpinv + Vpinv.T - np.diag(Vpinv.diagonal())\n return Vpinv\n\n def _loglike_css(self, params):\n \"\"\"\n Loglikelihood of AR(p) process using conditional sum of squares\n \"\"\"\n nobs = self.nobs\n Y = self.Y\n X = self.X\n ssr = sumofsq(Y.squeeze() - np.dot(X, params))\n sigma2 = ssr / nobs\n return -nobs / 2 * (np.log(2 * np.pi) + np.log(sigma2) + 1)\n\n def _loglike_mle(self, params):\n \"\"\"\n Loglikelihood of AR(p) process using exact maximum likelihood\n \"\"\"\n nobs = self.nobs\n X = self.X\n endog = self.endog\n k_ar = self.k_ar\n k_trend = self.k_trend\n\n # reparameterize according to Jones (1980) like in ARMA/Kalman Filter\n if self.transparams:\n params = self._transparams(params)\n\n # get mean and variance for pre-sample lags\n yp = endog[:k_ar].copy()\n if k_trend:\n c = [params[0]] * k_ar\n else:\n c = [0]\n mup = np.asarray(c / (1 - np.sum(params[k_trend:])))\n diffp = yp - mup[:, None]\n\n # get inv(Vp) Hamilton 5.3.7\n Vpinv = self._presample_varcov(params)\n\n diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()\n ssr = sumofsq(endog[k_ar:].squeeze() - np.dot(X, params))\n\n # concentrating the likelihood means that sigma2 is given by\n sigma2 = 1. / nobs * (diffpVpinv + ssr)\n self.sigma2 = sigma2\n logdet = slogdet(Vpinv)[1] # TODO: add check for singularity\n loglike = -1 / 2. * (nobs * (np.log(2 * np.pi) + np.log(sigma2))\n - logdet + diffpVpinv / sigma2 + ssr / sigma2)\n return loglike\n\n def loglike(self, params):\n r\"\"\"\n The loglikelihood of an AR(p) process.\n\n Parameters\n ----------\n params : array\n The fitted parameters of the AR model.\n\n Returns\n -------\n float\n The loglikelihood evaluated at `params`.\n\n Notes\n -----\n Contains constant term. If the model is fit by OLS then this returns\n the conditional maximum likelihood.\n\n .. math::\n\n \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)\n +\\log\\left(\\sigma^{2}\\right)\\right)\n -\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}\n\n If it is fit by MLE then the (exact) unconditional maximum likelihood\n is returned.\n\n .. math::\n\n -\\frac{n}{2}log\\left(2\\pi\\right)\n -\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)\n +\\frac{1}{2}\\left|V_{p}^{-1}\\right|\n -\\frac{1}{2\\sigma^{2}}\\left(y_{p}\n -\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)\n -\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}\n\n where\n\n :math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the\n mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)\n variance-covariance matrix of the first `p` observations.\n \"\"\"\n # Math is on Hamilton ~pp 124-5\n if self.method == \"cmle\":\n return self._loglike_css(params)\n\n else:\n return self._loglike_mle(params)\n\n def score(self, params):\n \"\"\"\n Compute the gradient of the log-likelihood at params.\n\n Parameters\n ----------\n params : array_like\n The parameter values at which to evaluate the score function.\n\n Returns\n -------\n ndarray\n The gradient computed using numerical methods.\n \"\"\"\n loglike = self.loglike\n return approx_fprime(params, loglike, epsilon=1e-8)\n\n def information(self, params):\n \"\"\"\n Not implemented.\n\n Parameters\n ----------\n params : ndarray\n The model parameters.\n \"\"\"\n return\n\n def hessian(self, params):\n \"\"\"\n Compute the hessian using a numerical approximation.\n\n Parameters\n ----------\n params : ndarray\n The model parameters.\n\n Returns\n -------\n ndarray\n The hessian evaluated at params.\n \"\"\"\n loglike = self.loglike\n return approx_hess(params, loglike)\n\n def _stackX(self, k_ar, trend):\n \"\"\"\n Private method to build the RHS matrix for estimation.\n\n Columns are trend terms then lags.\n \"\"\"\n endog = self.endog\n X = lagmat(endog, maxlag=k_ar, trim='both')\n k_trend = util.get_trendorder(trend)\n if k_trend:\n X = add_trend(X, prepend=True, trend=trend, has_constant=\"raise\")\n self.k_trend = k_trend\n return X\n\n def select_order(self, maxlag, ic, trend='c', method='mle'):\n \"\"\"\n Select the lag order according to the information criterion.\n\n Parameters\n ----------\n maxlag : int\n The highest lag length tried. See `AR.fit`.\n ic : {'aic','bic','hqic','t-stat'}\n Criterion used for selecting the optimal lag length.\n See `AR.fit`.\n trend : {'c','nc'}\n Whether to include a constant or not. 'c' - include constant.\n 'nc' - no constant.\n method : {'cmle', 'mle'}, optional\n The method to use in estimation.\n\n * 'cmle' - Conditional maximum likelihood using OLS\n * 'mle' - Unconditional (exact) maximum likelihood. See `solver`\n and the Notes.\n\n Returns\n -------\n int\n Best lag according to the information criteria.\n \"\"\"\n endog = self.endog\n\n # make Y and X with same nobs to compare ICs\n Y = endog[maxlag:]\n self.Y = Y # attach to get correct fit stats\n X = self._stackX(maxlag, trend) # sets k_trend\n self.X = X\n k = self.k_trend # k_trend set in _stackX\n k = max(1, k) # handle if startlag is 0\n results = {}\n\n if ic != 't-stat':\n for lag in range(k, maxlag + 1):\n # have to reinstantiate the model to keep comparable models\n endog_tmp = endog[maxlag - lag:]\n fit = AR(endog_tmp).fit(maxlag=lag, method=method,\n full_output=0, trend=trend,\n maxiter=100, disp=0)\n results[lag] = getattr(fit, ic)\n bestic, bestlag = min((res, k) for k, res in results.items())\n\n else: # choose by last t-stat.\n stop = 1.6448536269514722 # for t-stat, norm.ppf(.95)\n for lag in range(maxlag, k - 1, -1):\n # have to reinstantiate the model to keep comparable models\n endog_tmp = endog[maxlag - lag:]\n fit = AR(endog_tmp).fit(maxlag=lag, method=method,\n full_output=0, trend=trend,\n maxiter=35, disp=-1)\n\n bestlag = 0\n if np.abs(fit.tvalues[-1]) >= stop:\n bestlag = lag\n break\n return bestlag\n\n def fit(self, maxlag=None, method='cmle', ic=None, trend='c',\n transparams=True, start_params=None, solver='lbfgs', maxiter=35,\n full_output=1, disp=1, callback=None, **kwargs):\n \"\"\"\n Fit the unconditional maximum likelihood of an AR(p) process.\n\n Parameters\n ----------\n maxlag : int\n If `ic` is None, then maxlag is the lag length used in fit. If\n `ic` is specified then maxlag is the highest lag order used to\n select the correct lag order. If maxlag is None, the default is\n round(12*(nobs/100.)**(1/4.)).\n method : {'cmle', 'mle'}, optional\n The method to use in estimation.\n\n * 'cmle' - Conditional maximum likelihood using OLS\n * 'mle' - Unconditional (exact) maximum likelihood. See `solver`\n and the Notes.\n ic : {'aic','bic','hic','t-stat'}\n Criterion used for selecting the optimal lag length.\n\n * 'aic' - Akaike Information Criterion\n * 'bic' - Bayes Information Criterion\n * 't-stat' - Based on last lag\n * 'hqic' - Hannan-Quinn Information Criterion\n\n If any of the information criteria are selected, the lag length\n which results in the lowest value is selected. If t-stat, the\n model starts with maxlag and drops a lag until the highest lag\n has a t-stat that is significant at the 95 % level.\n trend : {'c','nc'}\n Whether to include a constant or not.\n\n * 'c' - include constant.\n * 'nc' - no constant.\n transparams : bool, optional\n Whether or not to transform the parameters to ensure stationarity.\n Uses the transformation suggested in Jones (1980).\n start_params : array_like, optional\n A first guess on the parameters. Default is cmle estimates.\n solver : str or None, optional\n Solver to be used if method is 'mle'. The default is 'lbfgs'\n (limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices\n are 'bfgs', 'newton' (Newton-Raphson), 'nm' (Nelder-Mead),\n 'cg' - (conjugate gradient), 'ncg' (non-conjugate gradient),\n and 'powell'.\n maxiter : int, optional\n The maximum number of function evaluations. Default is 35.\n full_output : bool, optional\n If True, all output from solver will be available in\n the Results object's mle_retvals attribute. Output is dependent\n on the solver. See Notes for more information.\n disp : bool, optional\n If True, convergence information is output.\n callback : function, optional\n Called after each iteration as callback(xk) where xk is the current\n parameter vector.\n **kwargs\n See LikelihoodModel.fit for keyword arguments that can be passed\n to fit.\n\n Returns\n -------\n ARResults\n Results instance.\n\n See Also\n --------\n statsmodels.base.model.LikelihoodModel.fit\n Base fit class with further details about options.\n\n Notes\n -----\n The parameters after `trend` are only used when method is 'mle'.\n\n References\n ----------\n .. [*] Jones, R.H. 1980 \"Maximum likelihood fitting of ARMA models to\n time series with missing observations.\" `Technometrics`. 22.3.\n 389-95.\n \"\"\"\n start_params = array_like(start_params, 'start_params', ndim=1,\n optional=True)\n method = method.lower()\n if method not in ['cmle', 'mle']:\n raise ValueError(\"Method %s not recognized\" % method)\n self.method = method\n self.trend = trend\n self.transparams = transparams\n nobs = len(self.endog) # overwritten if method is 'cmle'\n endog = self.endog\n # The parameters are no longer allowed to change in an instance\n fit_params = (maxlag, method, ic, trend)\n if self._fit_params is not None and self._fit_params != fit_params:\n raise RuntimeError(REPEATED_FIT_ERROR.format(*self._fit_params))\n if maxlag is None:\n maxlag = int(round(12 * (nobs / 100.) ** (1 / 4.)))\n k_ar = maxlag # stays this if ic is None\n\n # select lag length\n if ic is not None:\n ic = ic.lower()\n if ic not in ['aic', 'bic', 'hqic', 't-stat']:\n raise ValueError(\"ic option %s not understood\" % ic)\n k_ar = self.select_order(k_ar, ic, trend, method)\n\n self.k_ar = k_ar # change to what was chosen by ic\n\n # redo estimation for best lag\n # make LHS\n Y = endog[k_ar:, :]\n # make lagged RHS\n X = self._stackX(k_ar, trend) # sets self.k_trend\n k_trend = self.k_trend\n self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)\n self.Y = Y\n self.X = X\n\n if method == \"cmle\": # do OLS\n arfit = OLS(Y, X).fit()\n params = arfit.params\n self.nobs = nobs - k_ar\n self.sigma2 = arfit.ssr / arfit.nobs # needed for predict fcasterr\n\n else: # method == \"mle\"\n solver = solver.lower()\n self.nobs = nobs\n if start_params is None:\n start_params = OLS(Y, X).fit().params\n else:\n if len(start_params) != k_trend + k_ar:\n raise ValueError(\"Length of start params is %d. There\"\n \" are %d parameters.\" %\n (len(start_params), k_trend + k_ar))\n start_params = self._invtransparams(start_params)\n if solver == 'lbfgs':\n kwargs.setdefault('pgtol', 1e-8)\n kwargs.setdefault('factr', 1e2)\n kwargs.setdefault('m', 12)\n kwargs.setdefault('approx_grad', True)\n mlefit = super(AR, self).fit(start_params=start_params,\n method=solver, maxiter=maxiter,\n full_output=full_output, disp=disp,\n callback=callback, **kwargs)\n\n params = mlefit.params\n if self.transparams:\n params = self._transparams(params)\n self.transparams = False # turn off now for other results\n\n pinv_exog = np.linalg.pinv(X)\n normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)\n arfit = ARResults(copy.copy(self), params, normalized_cov_params)\n if method == 'mle' and full_output:\n arfit.mle_retvals = mlefit.mle_retvals\n arfit.mle_settings = mlefit.mle_settings\n # Set fit params since completed the fit\n if self._fit_params is None:\n self._fit_params = fit_params\n return ARResultsWrapper(arfit)\n\n\n_preddoc = (AR.predict.__doc__ or \"\").split('\\n')\n\n\nclass ARResults(tsa_model.TimeSeriesModelResults):\n \"\"\"\n Class to hold results from fitting an AR model.\n\n Parameters\n ----------\n model : AR Model instance\n Reference to the model that is fit.\n params : array\n The fitted parameters from the AR Model.\n normalized_cov_params : array\n The array inv(dot(x.T,x)) where x contains the regressors in the\n model.\n scale : float, optional\n An estimate of the scale of the model.\n\n Attributes\n ----------\n k_ar : float\n Lag length. Sometimes used as `p` in the docs.\n k_trend : float\n The number of trend terms included. 'nc'=0, 'c'=1.\n llf : float\n The loglikelihood of the model evaluated at `params`. See `AR.loglike`\n model : AR model instance\n A reference to the fitted AR model.\n nobs : float\n The number of available observations `nobs` - `k_ar`\n n_totobs : float\n The number of total observations in `endog`. Sometimes `n` in the docs.\n params : array\n The fitted parameters of the model.\n scale : float\n Same as sigma2\n sigma2 : float\n The variance of the innovations (residuals).\n trendorder : int\n The polynomial order of the trend. 'nc' = None, 'c' or 't' = 0,\n 'ct' = 1, etc.\n \"\"\"\n\n _cache = {} # for scale setter\n\n def __init__(self, model, params, normalized_cov_params=None, scale=1.):\n super(ARResults, self).__init__(model, params, normalized_cov_params,\n scale)\n self._cache = {}\n self.nobs = model.nobs\n n_totobs = len(model.endog)\n self.n_totobs = n_totobs\n self.X = model.X # copy?\n self.Y = model.Y\n k_ar = model.k_ar\n self.k_ar = k_ar\n k_trend = model.k_trend\n self.k_trend = k_trend\n trendorder = None\n if k_trend > 0:\n trendorder = k_trend - 1\n self.trendorder = trendorder\n # TODO: cmle vs mle?\n self.df_model = k_ar + k_trend\n self.df_resid = self.model.df_resid = n_totobs - self.df_model\n\n @cache_writable()\n def sigma2(self):\n model = self.model\n if model.method == \"cmle\": # do DOF correction\n return 1. / self.nobs * sumofsq(self.resid)\n else:\n return self.model.sigma2\n\n @cache_writable() # for compatability with RegressionResults\n def scale(self):\n return self.sigma2\n\n @cache_readonly\n def bse(self): # allow user to specify?\n \"\"\"\n The standard errors of the estimated parameters.\n\n If `method` is 'cmle', then the standard errors that are returned are\n the OLS standard errors of the coefficients. If the `method` is 'mle'\n then they are computed using the numerical Hessian.\n \"\"\"\n if self.model.method == \"cmle\": # uses different scale/sigma def.\n resid = self.resid\n ssr = np.dot(resid, resid)\n ols_scale = ssr / (self.nobs - self.k_ar - self.k_trend)\n return np.sqrt(np.diag(self.cov_params(scale=ols_scale)))\n else:\n hess = approx_hess(self.params, self.model.loglike)\n return np.sqrt(np.diag(-np.linalg.inv(hess)))\n\n @cache_readonly\n def pvalues(self):\n \"\"\"The p values associated with the standard errors.\"\"\"\n return norm.sf(np.abs(self.tvalues)) * 2\n\n @cache_readonly\n def aic(self):\n \"\"\"\n Akaike Information Criterion using Lutkephol's definition.\n\n :math:`log(sigma) + 2*(1 + k_ar + k_trend)/nobs`\n \"\"\"\n # TODO: this is based on loglike with dropped constant terms ?\n # Lutkepohl\n # return np.log(self.sigma2) + 1./self.model.nobs * self.k_ar\n # Include constant as estimated free parameter and double the loss\n return np.log(self.sigma2) + 2 * (1 + self.df_model) / self.nobs\n # Stata defintion\n # nobs = self.nobs\n # return -2 * self.llf/nobs + 2 * (self.k_ar+self.k_trend)/nobs\n\n @cache_readonly\n def hqic(self):\n \"\"\"Hannan-Quinn Information Criterion.\"\"\"\n nobs = self.nobs\n # Lutkepohl\n # return np.log(self.sigma2)+ 2 * np.log(np.log(nobs))/nobs * self.k_ar\n # R uses all estimated parameters rather than just lags\n return (np.log(self.sigma2) + 2 * np.log(np.log(nobs))\n / nobs * (1 + self.df_model))\n # Stata\n # nobs = self.nobs\n # return -2 * self.llf/nobs + 2 * np.log(np.log(nobs))/nobs * \\\n # (self.k_ar + self.k_trend)\n\n @cache_readonly\n def fpe(self):\n \"\"\"\n Final prediction error using Lütkepohl's definition.\n\n ((n_totobs+k_trend)/(n_totobs-k_ar-k_trend))*sigma\n \"\"\"\n nobs = self.nobs\n df_model = self.df_model\n # Lutkepohl\n return ((nobs + df_model) / (nobs - df_model)) * self.sigma2\n\n @cache_readonly\n def bic(self):\n \"\"\"\n Bayes Information Criterion\n\n :math:`\\\\log(\\\\sigma) + (1 + k_ar + k_trend)*\\\\log(nobs)/nobs`\n \"\"\"\n nobs = self.nobs\n # Lutkepohl\n # return np.log(self.sigma2) + np.log(nobs)/nobs * self.k_ar\n # Include constant as est. free parameter\n return np.log(self.sigma2) + (1 + self.df_model) * np.log(nobs) / nobs\n # Stata\n # return -2 * self.llf/nobs + np.log(nobs)/nobs * (self.k_ar + \\\n # self.k_trend)\n\n @cache_readonly\n def resid(self):\n \"\"\"\n The residuals of the model.\n\n If the model is fit by 'mle' then the pre-sample residuals are\n calculated using fittedvalues from the Kalman Filter.\n \"\"\"\n # NOTE: uses fittedvalues because it calculate presample values for mle\n model = self.model\n endog = model.endog.squeeze()\n if model.method == \"cmle\": # elimate pre-sample\n return endog[self.k_ar:] - self.fittedvalues\n else:\n return model.endog.squeeze() - self.fittedvalues\n\n @cache_readonly\n def roots(self):\n \"\"\"\n The roots of the AR process.\n\n The roots are the solution to\n (1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0.\n Stability requires that the roots in modulus lie outside the unit\n circle.\n \"\"\"\n k = self.k_trend\n return np.roots(np.r_[1, -self.params[k:]]) ** -1\n\n @cache_readonly\n def arfreq(self):\n r\"\"\"\n Returns the frequency of the AR roots.\n\n This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the\n roots.\n \"\"\"\n z = self.roots\n return np.arctan2(z.imag, z.real) / (2 * np.pi)\n\n @cache_readonly\n def fittedvalues(self):\n \"\"\"\n The in-sample predicted values of the fitted AR model.\n\n The `k_ar` initial values are computed via the Kalman Filter if the\n model is fit by `mle`.\n \"\"\"\n return self.model.predict(self.params)\n\n # Same docstring as AR.predict, but with \"params\" parameter removed\n # TODO: Should have an official docstring cleaner to remove parameters\n @Appender('\\n'.join(_preddoc[:5] + _preddoc[7:]))\n def predict(self, start=None, end=None, dynamic=False):\n params = self.params\n predictedvalues = self.model.predict(params, start, end, dynamic)\n return predictedvalues\n # TODO: consider returning forecast errors and confidence intervals?\n\n def summary(self, alpha=.05):\n \"\"\"Summarize the Model\n\n Parameters\n ----------\n alpha : float, optional\n Significance level for the confidence intervals.\n\n Returns\n -------\n smry : Summary instance\n This holds the summary table and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary\n \"\"\"\n model = self.model\n title = model.__class__.__name__ + ' Model Results'\n method = model.method\n # get sample\n start = 0 if 'mle' in method else self.k_ar\n if self.data.dates is not None:\n dates = self.data.dates\n sample = [dates[start].strftime('%m-%d-%Y')]\n sample += ['- ' + dates[-1].strftime('%m-%d-%Y')]\n else:\n sample = str(start) + ' - ' + str(len(self.data.orig_endog))\n\n k_ar = self.k_ar\n order = '({0})'.format(k_ar)\n dep_name = str(self.model.endog_names)\n top_left = [('Dep. Variable:', dep_name),\n ('Model:', [model.__class__.__name__ + order]),\n ('Method:', [method]),\n ('Date:', None),\n ('Time:', None),\n ('Sample:', [sample[0]]),\n ('', [sample[1]])\n ]\n\n top_right = [\n ('No. Observations:', [str(len(self.model.endog))]),\n ('Log Likelihood', [\"%#5.3f\" % self.llf]),\n ('S.D. of innovations', [\"%#5.3f\" % self.sigma2 ** .5]),\n ('AIC', [\"%#5.3f\" % self.aic]),\n ('BIC', [\"%#5.3f\" % self.bic]),\n ('HQIC', [\"%#5.3f\" % self.hqic])]\n\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right,\n title=title)\n smry.add_table_params(self, alpha=alpha, use_t=False)\n\n # Make the roots table\n from statsmodels.iolib.table import SimpleTable\n\n if k_ar:\n arstubs = [\"AR.%d\" % i for i in range(1, k_ar + 1)]\n stubs = arstubs\n roots = self.roots\n freq = self.arfreq\n else: # AR(0) model\n stubs = []\n if len(stubs): # not AR(0)\n modulus = np.abs(roots)\n data = np.column_stack((roots.real, roots.imag, modulus, freq))\n roots_table = SimpleTable([('%17.4f' % row[0],\n '%+17.4fj' % row[1],\n '%17.4f' % row[2],\n '%17.4f' % row[3]) for row in data],\n headers=[' Real',\n ' Imaginary',\n ' Modulus',\n ' Frequency'],\n title=\"Roots\",\n stubs=stubs)\n\n smry.tables.append(roots_table)\n return smry\n\n\nclass ARResultsWrapper(wrap.ResultsWrapper):\n _attrs = {}\n _wrap_attrs = wrap.union_dicts(\n tsa_model.TimeSeriesResultsWrapper._wrap_attrs, _attrs)\n _methods = {}\n _wrap_methods = wrap.union_dicts(\n tsa_model.TimeSeriesResultsWrapper._wrap_methods, _methods)\n\n\nwrap.populate_wrapper(ARResultsWrapper, ARResults)\n", "'''Parametric Mixture Distributions\n\nCreated on Sat Jun 04 2011\n\nAuthor: Josef Perktold\n\n\nNotes:\n\nCompound Poisson has mass point at zero\nhttps://en.wikipedia.org/wiki/Compound_Poisson_distribution\nand would need special treatment\n\nneed a distribution that has discrete mass points and contiuous range, e.g.\ncompound Poisson, Tweedie (for some parameter range),\npdf of Tobit model (?) - truncation with clipping\n\nQuestion: Metaclasses and class factories for generating new distributions from\nexisting distributions by transformation, mixing, compounding\n\n'''\n\n\nimport numpy as np\nfrom scipy import stats\n\nclass ParametricMixtureD(object):\n '''mixtures with a discrete distribution\n\n The mixing distribution is a discrete distribution like scipy.stats.poisson.\n All distribution in the mixture of the same type and parameterized\n by the outcome of the mixing distribution and have to be a continuous\n distribution (or have a pdf method).\n As an example, a mixture of normal distributed random variables with\n Poisson as the mixing distribution.\n\n\n assumes vectorized shape, loc and scale as in scipy.stats.distributions\n\n assume mixing_dist is frozen\n\n initialization looks fragile for all possible cases of lower and upper\n bounds of the distributions.\n\n '''\n def __init__(self, mixing_dist, base_dist, bd_args_func, bd_kwds_func,\n cutoff=1e-3):\n '''create a mixture distribution\n\n Parameters\n ----------\n mixing_dist : discrete frozen distribution\n mixing distribution\n base_dist : continuous distribution\n parameterized distributions in the mixture\n bd_args_func : callable\n function that builds the tuple of args for the base_dist.\n The function obtains as argument the values in the support of\n the mixing distribution and should return an empty tuple or\n a tuple of arrays.\n bd_kwds_func : callable\n function that builds the dictionary of kwds for the base_dist.\n The function obtains as argument the values in the support of\n the mixing distribution and should return an empty dictionary or\n a dictionary with arrays as values.\n cutoff : float\n If the mixing distribution has infinite support, then the\n distribution is truncated with approximately (subject to integer\n conversion) the cutoff probability in the missing tail. Random\n draws that are outside the truncated range are clipped, that is\n assigned to the highest or lowest value in the truncated support.\n\n '''\n self.mixing_dist = mixing_dist\n self.base_dist = base_dist\n #self.bd_args = bd_args\n if not np.isneginf(mixing_dist.dist.a):\n lower = mixing_dist.dist.a\n else:\n lower = mixing_dist.ppf(1e-4)\n if not np.isposinf(mixing_dist.dist.b):\n upper = mixing_dist.dist.b\n else:\n upper = mixing_dist.isf(1e-4)\n self.ma = lower\n self.mb = upper\n mixing_support = np.arange(lower, upper+1)\n self.mixing_probs = mixing_dist.pmf(mixing_support)\n\n self.bd_args = bd_args_func(mixing_support)\n self.bd_kwds = bd_kwds_func(mixing_support)\n\n def rvs(self, size=1):\n mrvs = self.mixing_dist.rvs(size)\n #TODO: check strange cases ? this assumes continous integers\n mrvs_idx = (np.clip(mrvs, self.ma, self.mb) - self.ma).astype(int)\n\n bd_args = tuple(md[mrvs_idx] for md in self.bd_args)\n bd_kwds = dict((k, self.bd_kwds[k][mrvs_idx]) for k in self.bd_kwds)\n kwds = {'size':size}\n kwds.update(bd_kwds)\n rvs = self.base_dist.rvs(*self.bd_args, **kwds)\n return rvs, mrvs_idx\n\n\n\n\n\n def pdf(self, x):\n x = np.asarray(x)\n if np.size(x) > 1:\n x = x[...,None] #[None, ...]\n bd_probs = self.base_dist.pdf(x, *self.bd_args, **self.bd_kwds)\n prob = (bd_probs * self.mixing_probs).sum(-1)\n return prob, bd_probs\n\n def cdf(self, x):\n x = np.asarray(x)\n if np.size(x) > 1:\n x = x[...,None] #[None, ...]\n bd_probs = self.base_dist.cdf(x, *self.bd_args, **self.bd_kwds)\n prob = (bd_probs * self.mixing_probs).sum(-1)\n return prob, bd_probs\n\n\n#try:\n\nclass ClippedContinuous(object):\n '''clipped continuous distribution with a masspoint at clip_lower\n\n\n Notes\n -----\n first version, to try out possible designs\n insufficient checks for valid arguments and not clear\n whether it works for distributions that have compact support\n\n clip_lower is fixed and independent of the distribution parameters.\n The clip_lower point in the pdf has to be interpreted as a mass point,\n i.e. different treatment in integration and expect function, which means\n none of the generic methods for this can be used.\n\n maybe this will be better designed as a mixture between a degenerate or\n discrete and a continuous distribution\n\n Warning: uses equality to check for clip_lower values in function\n arguments, since these are floating points, the comparison might fail\n if clip_lower values are not exactly equal.\n We could add a check whether the values are in a small neighborhood, but\n it would be expensive (need to search and check all values).\n\n '''\n\n def __init__(self, base_dist, clip_lower):\n self.base_dist = base_dist\n self.clip_lower = clip_lower\n\n def _get_clip_lower(self, kwds):\n '''helper method to get clip_lower from kwds or attribute\n\n '''\n if 'clip_lower' not in kwds:\n clip_lower = self.clip_lower\n else:\n clip_lower = kwds.pop('clip_lower')\n return clip_lower, kwds\n\n def rvs(self, *args, **kwds):\n clip_lower, kwds = self._get_clip_lower(kwds)\n rvs_ = self.base_dist.rvs(*args, **kwds)\n #same as numpy.clip ?\n rvs_[rvs_ < clip_lower] = clip_lower\n return rvs_\n\n\n\n def pdf(self, x, *args, **kwds):\n x = np.atleast_1d(x)\n if 'clip_lower' not in kwds:\n clip_lower = self.clip_lower\n else:\n #allow clip_lower to be a possible parameter\n clip_lower = kwds.pop('clip_lower')\n pdf_raw = np.atleast_1d(self.base_dist.pdf(x, *args, **kwds))\n clip_mask = (x == self.clip_lower)\n if np.any(clip_mask):\n clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds)\n pdf_raw[clip_mask] = clip_prob\n\n #the following will be handled by sub-classing rv_continuous\n pdf_raw[x < clip_lower] = 0\n\n return pdf_raw\n\n def cdf(self, x, *args, **kwds):\n if 'clip_lower' not in kwds:\n clip_lower = self.clip_lower\n else:\n #allow clip_lower to be a possible parameter\n clip_lower = kwds.pop('clip_lower')\n cdf_raw = self.base_dist.cdf(x, *args, **kwds)\n\n #not needed if equality test is used\n## clip_mask = (x == self.clip_lower)\n## if np.any(clip_mask):\n## clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds)\n## pdf_raw[clip_mask] = clip_prob\n\n #the following will be handled by sub-classing rv_continuous\n #if self.a is defined\n cdf_raw[x < clip_lower] = 0\n\n return cdf_raw\n\n def sf(self, x, *args, **kwds):\n if 'clip_lower' not in kwds:\n clip_lower = self.clip_lower\n else:\n #allow clip_lower to be a possible parameter\n clip_lower = kwds.pop('clip_lower')\n\n sf_raw = self.base_dist.sf(x, *args, **kwds)\n sf_raw[x <= clip_lower] = 1\n\n return sf_raw\n\n\n def ppf(self, x, *args, **kwds):\n raise NotImplementedError\n\n def plot(self, x, *args, **kwds):\n\n clip_lower, kwds = self._get_clip_lower(kwds)\n mass = self.pdf(clip_lower, *args, **kwds)\n xr = np.concatenate(([clip_lower+1e-6], x[x>clip_lower]))\n import matplotlib.pyplot as plt\n #x = np.linspace(-4, 4, 21)\n #plt.figure()\n plt.xlim(clip_lower-0.1, x.max())\n #remove duplicate calculation\n xpdf = self.pdf(x, *args, **kwds)\n plt.ylim(0, max(mass, xpdf.max())*1.1)\n plt.plot(xr, self.pdf(xr, *args, **kwds))\n #plt.vline(clip_lower, self.pdf(clip_lower, *args, **kwds))\n plt.stem([clip_lower], [mass],\n linefmt='b-', markerfmt='bo', basefmt='r-')\n return\n\n\n\n\nif __name__ == '__main__':\n\n doplots = 1\n\n #*********** Poisson-Normal Mixture\n mdist = stats.poisson(2.)\n bdist = stats.norm\n bd_args_fn = lambda x: ()\n #bd_kwds_fn = lambda x: {'loc': np.atleast_2d(10./(1+x))}\n bd_kwds_fn = lambda x: {'loc': x, 'scale': 0.1*np.ones_like(x)} #10./(1+x)}\n\n\n pd = ParametricMixtureD(mdist, bdist, bd_args_fn, bd_kwds_fn)\n print(pd.pdf(1))\n p, bp = pd.pdf(np.linspace(0,20,21))\n pc, bpc = pd.cdf(np.linspace(0,20,21))\n print(pd.rvs())\n rvs, m = pd.rvs(size=1000)\n\n\n if doplots:\n import matplotlib.pyplot as plt\n plt.hist(rvs, bins = 100)\n plt.title('poisson mixture of normal distributions')\n\n #********** clipped normal distribution (Tobit)\n\n bdist = stats.norm\n clip_lower_ = 0. #-0.5\n cnorm = ClippedContinuous(bdist, clip_lower_)\n x = np.linspace(1e-8, 4, 11)\n print(cnorm.pdf(x))\n print(cnorm.cdf(x))\n\n if doplots:\n #plt.figure()\n #cnorm.plot(x)\n plt.figure()\n cnorm.plot(x = np.linspace(-1, 4, 51), loc=0.5, scale=np.sqrt(2))\n plt.title('clipped normal distribution')\n\n fig = plt.figure()\n for i, loc in enumerate([0., 0.5, 1.,2.]):\n fig.add_subplot(2,2,i+1)\n cnorm.plot(x = np.linspace(-1, 4, 51), loc=loc, scale=np.sqrt(2))\n plt.title('clipped normal, loc = %3.2f' % loc)\n\n\n loc = 1.5\n rvs = cnorm.rvs(loc=loc, size=2000)\n plt.figure()\n plt.hist(rvs, bins=50)\n plt.title('clipped normal rvs, loc = %3.2f' % loc)\n\n\n #plt.show()\n" ]
[ [ "numpy.dot", "numpy.log", "numpy.clip", "numpy.eye", "pandas.DataFrame", "numpy.exp", "numpy.zeros_like", "numpy.var", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.testing.assert_equal", "pandas.to_datetime", "pandas.Series", "pandas.period_range", "pandas.RangeIndex", "numpy.arange", "pandas.Index", "pandas.DataFrame", "numpy.random.normal", "numpy.testing.assert_raises", "pandas.date_range", "pandas.Timestamp" ], [ "numpy.dot", "numpy.log", "numpy.abs", "numpy.linalg.inv", "numpy.linalg.slogdet", "numpy.kron", "numpy.roots", "numpy.linalg.pinv", "numpy.arctan2", "numpy.identity", "numpy.column_stack", "numpy.correlate", "numpy.zeros", "numpy.sum" ], [ "matplotlib.pyplot.stem", "numpy.ones_like", "numpy.linspace", "matplotlib.pyplot.title", "numpy.asarray", "numpy.arange", "numpy.sqrt", "numpy.isposinf", "numpy.clip", "numpy.atleast_1d", "numpy.concatenate", "scipy.stats.poisson", "numpy.isneginf", "numpy.size", "numpy.any", "matplotlib.pyplot.hist", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mizolotu/izi
[ "d2d00813919259aad3dcdbc54039c30cbb16b125", "d2d00813919259aad3dcdbc54039c30cbb16b125", "d2d00813919259aad3dcdbc54039c30cbb16b125" ]
[ "plot_roc_old.py", "reinforcement_learning/common/buffers.py", "reinforcement_learning/a2c/a2c.py" ]
[ "import os\nimport os.path as osp\nimport plotly.io as pio\nimport plotly.graph_objs as go\nimport numpy as np\nimport argparse as arp\n\nfrom common.plot import generate_line_scatter\nfrom config import *\n\nif __name__ == '__main__':\n\n parser = arp.ArgumentParser(description='Plot ROC')\n parser.add_argument('-m', '--models', help='Models used for detection', nargs='+', default=['ae', 'som', 'mlp'])\n parser.add_argument('-l', '--labels', help='Labels used for model training', nargs='+', default=['2,3,4', '1,3,4', '1,2,4', '1,2,3'])\n parser.add_argument('-a', '--attacks', help='Attacks labels', nargs='+', default=['1', '2', '3', '4'])\n args = parser.parse_args()\n\n colors = ['royalblue', 'firebrick', 'seagreen']\n dashes = [None, 'dash', 'dot', 'dashdot']\n model_attacks = {\n '1': 'DDoS',\n '2': 'Web',\n '3': 'Infiltration',\n '4': 'Botnet',\n '1,2,3,4': 'Baseline'\n }\n\n for label in os.listdir(ids_results_dir):\n\n if label in args.attacks:\n\n # prepare data\n\n data = []\n names = []\n models_path = osp.join(ids_results_dir, label)\n models = sorted(os.listdir(models_path))\n model_names = []\n cs = []\n cs_sorted = []\n ds = []\n ds_sorted = []\n attack_labels_str = []\n ls = []\n ws = []\n models_ = []\n\n for m in models:\n\n spl = m.split('_')\n train_labels = spl[-2]\n\n if train_labels in args.labels:\n\n m_type = spl[0]\n if m_type in args.models:\n\n m_type = m_type.upper()\n\n models_.append(m)\n if train_labels in model_attacks.keys():\n a_type = model_attacks[train_labels]\n else:\n a_type = None\n ma_type = f'{m_type}_{a_type}'\n if ma_type not in ls:\n ls.append(ma_type)\n a_idx = ls.index(ma_type)\n cs.append(colors[a_idx])\n\n w_size = spl[-1]\n if w_size not in ws:\n ws.append(w_size)\n w_idx = ws.index(w_size)\n ds.append(dashes[w_idx])\n\n if a_type is not None:\n model_name = f'{a_type} {m_type}, {w_size} sec.'\n else:\n model_name = f'{m_type}, {w_size} sec.'\n model_names.append(model_name)\n if train_labels not in attack_labels_str:\n attack_labels_str.append(train_labels)\n\n m_idx = sorted(range(len(model_names)), key=lambda k: model_names[k])\n for idx in m_idx:\n model_path = osp.join(models_path, models_[idx])\n if osp.isdir(model_path) and roc_fname in os.listdir(model_path):\n roc_path = osp.join(model_path, roc_fname)\n roc = np.genfromtxt(roc_path, dtype=float, delimiter=' ')\n x = roc[:, 0]\n y = roc[:, 1]\n data.append([x, y])\n names.append(model_names[idx])\n cs_sorted.append(cs[idx])\n ds_sorted.append(ds[idx])\n\n # generate layout and traces\n\n traces, layout = generate_line_scatter(names, data, cs_sorted, ds_sorted, xlabel='FPR', ylabel='TPR', xrange=[0, 0.01], yrange=[0, 0.8])\n\n # save results\n\n ftypes = ['png', 'pdf']\n fig_fname = '{0}/{1}_{2}'.format(roc_dir, label, '_'.join(attack_labels_str))\n fig = go.Figure(data=traces, layout=layout)\n for ftype in ftypes:\n pio.write_image(fig, '{0}.{1}'.format(fig_fname, ftype))", "import random\nfrom typing import Optional, List, Union\n\nimport numpy as np\n\nfrom common.segment_tree import SumSegmentTree, MinSegmentTree\nfrom common.vec_env import VecNormalize\n\n\nclass ReplayBuffer(object):\n def __init__(self, size: int):\n \"\"\"\n Implements a ring buffer (FIFO).\n\n :param size: (int) Max number of transitions to store in the buffer. When the buffer overflows the old\n memories are dropped.\n \"\"\"\n self._storage = []\n self._maxsize = size\n self._next_idx = 0\n\n def __len__(self) -> int:\n return len(self._storage)\n\n @property\n def storage(self):\n \"\"\"[(Union[np.ndarray, int], Union[np.ndarray, int], float, Union[np.ndarray, int], bool)]: content of the replay buffer\"\"\"\n return self._storage\n\n @property\n def buffer_size(self) -> int:\n \"\"\"float: Max capacity of the buffer\"\"\"\n return self._maxsize\n\n def can_sample(self, n_samples: int) -> bool:\n \"\"\"\n Check if n_samples samples can be sampled\n from the buffer.\n\n :param n_samples: (int)\n :return: (bool)\n \"\"\"\n return len(self) >= n_samples\n\n def is_full(self) -> int:\n \"\"\"\n Check whether the replay buffer is full or not.\n\n :return: (bool)\n \"\"\"\n return len(self) == self.buffer_size\n\n def add(self, obs_t, action, reward, obs_tp1, done):\n \"\"\"\n add a new transition to the buffer\n\n :param obs_t: (Union[np.ndarray, int]) the last observation\n :param action: (Union[np.ndarray, int]) the action\n :param reward: (float) the reward of the transition\n :param obs_tp1: (Union[np.ndarray, int]) the current observation\n :param done: (bool) is the episode done\n \"\"\"\n data = (obs_t, action, reward, obs_tp1, done)\n\n if self._next_idx >= len(self._storage):\n self._storage.append(data)\n else:\n self._storage[self._next_idx] = data\n self._next_idx = (self._next_idx + 1) % self._maxsize\n\n def extend(self, obs_t, action, reward, obs_tp1, done):\n \"\"\"\n add a new batch of transitions to the buffer\n\n :param obs_t: (Union[Tuple[Union[np.ndarray, int]], np.ndarray]) the last batch of observations\n :param action: (Union[Tuple[Union[np.ndarray, int]]], np.ndarray]) the batch of actions\n :param reward: (Union[Tuple[float], np.ndarray]) the batch of the rewards of the transition\n :param obs_tp1: (Union[Tuple[Union[np.ndarray, int]], np.ndarray]) the current batch of observations\n :param done: (Union[Tuple[bool], np.ndarray]) terminal status of the batch\n\n Note: uses the same names as .add to keep compatibility with named argument passing\n but expects iterables and arrays with more than 1 dimensions\n \"\"\"\n for data in zip(obs_t, action, reward, obs_tp1, done):\n if self._next_idx >= len(self._storage):\n self._storage.append(data)\n else:\n self._storage[self._next_idx] = data\n self._next_idx = (self._next_idx + 1) % self._maxsize\n\n @staticmethod\n def _normalize_obs(obs: np.ndarray,\n env: Optional[VecNormalize] = None) -> np.ndarray:\n \"\"\"\n Helper for normalizing the observation.\n \"\"\"\n if env is not None:\n return env.normalize_obs(obs)\n return obs\n\n @staticmethod\n def _normalize_reward(reward: np.ndarray,\n env: Optional[VecNormalize] = None) -> np.ndarray:\n \"\"\"\n Helper for normalizing the reward.\n \"\"\"\n if env is not None:\n return env.normalize_reward(reward)\n return reward\n\n def _encode_sample(self, idxes: Union[List[int], np.ndarray], env: Optional[VecNormalize] = None):\n obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []\n for i in idxes:\n data = self._storage[i]\n obs_t, action, reward, obs_tp1, done = data\n obses_t.append(np.array(obs_t, copy=False))\n actions.append(np.array(action, copy=False))\n rewards.append(reward)\n obses_tp1.append(np.array(obs_tp1, copy=False))\n dones.append(done)\n return (self._normalize_obs(np.array(obses_t), env),\n np.array(actions),\n self._normalize_reward(np.array(rewards), env),\n self._normalize_obs(np.array(obses_tp1), env),\n np.array(dones))\n\n def sample(self, batch_size: int, env: Optional[VecNormalize] = None, **_kwargs):\n \"\"\"\n Sample a batch of experiences.\n\n :param batch_size: (int) How many transitions to sample.\n :param env: (Optional[VecNormalize]) associated gym VecEnv\n to normalize the observations/rewards when sampling\n :return:\n - obs_batch: (np.ndarray) batch of observations\n - act_batch: (numpy float) batch of actions executed given obs_batch\n - rew_batch: (numpy float) rewards received as results of executing act_batch\n - next_obs_batch: (np.ndarray) next set of observations seen after executing act_batch\n - done_mask: (numpy bool) done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode\n and 0 otherwise.\n \"\"\"\n idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]\n return self._encode_sample(idxes, env=env)\n\n\nclass PrioritizedReplayBuffer(ReplayBuffer):\n def __init__(self, size, alpha):\n \"\"\"\n Create Prioritized Replay buffer.\n\n See Also ReplayBuffer.__init__\n\n :param size: (int) Max number of transitions to store in the buffer. When the buffer overflows the old memories\n are dropped.\n :param alpha: (float) how much prioritization is used (0 - no prioritization, 1 - full prioritization)\n \"\"\"\n super(PrioritizedReplayBuffer, self).__init__(size)\n assert alpha >= 0\n self._alpha = alpha\n\n it_capacity = 1\n while it_capacity < size:\n it_capacity *= 2\n\n self._it_sum = SumSegmentTree(it_capacity)\n self._it_min = MinSegmentTree(it_capacity)\n self._max_priority = 1.0\n\n def add(self, obs_t, action, reward, obs_tp1, done):\n \"\"\"\n add a new transition to the buffer\n\n :param obs_t: (Any) the last observation\n :param action: ([float]) the action\n :param reward: (float) the reward of the transition\n :param obs_tp1: (Any) the current observation\n :param done: (bool) is the episode done\n \"\"\"\n idx = self._next_idx\n super().add(obs_t, action, reward, obs_tp1, done)\n self._it_sum[idx] = self._max_priority ** self._alpha\n self._it_min[idx] = self._max_priority ** self._alpha\n\n def extend(self, obs_t, action, reward, obs_tp1, done):\n \"\"\"\n add a new batch of transitions to the buffer\n\n :param obs_t: (Union[Tuple[Union[np.ndarray, int]], np.ndarray]) the last batch of observations\n :param action: (Union[Tuple[Union[np.ndarray, int]]], np.ndarray]) the batch of actions\n :param reward: (Union[Tuple[float], np.ndarray]) the batch of the rewards of the transition\n :param obs_tp1: (Union[Tuple[Union[np.ndarray, int]], np.ndarray]) the current batch of observations\n :param done: (Union[Tuple[bool], np.ndarray]) terminal status of the batch\n\n Note: uses the same names as .add to keep compatibility with named argument passing\n but expects iterables and arrays with more than 1 dimensions\n \"\"\"\n idx = self._next_idx\n super().extend(obs_t, action, reward, obs_tp1, done)\n while idx != self._next_idx:\n self._it_sum[idx] = self._max_priority ** self._alpha\n self._it_min[idx] = self._max_priority ** self._alpha\n idx = (idx + 1) % self._maxsize\n\n def _sample_proportional(self, batch_size):\n mass = []\n total = self._it_sum.sum(0, len(self._storage) - 1)\n # TODO(szymon): should we ensure no repeats?\n mass = np.random.random(size=batch_size) * total\n idx = self._it_sum.find_prefixsum_idx(mass)\n return idx\n\n def sample(self, batch_size: int, beta: float = 0, env: Optional[VecNormalize] = None):\n \"\"\"\n Sample a batch of experiences.\n\n compared to ReplayBuffer.sample\n it also returns importance weights and idxes\n of sampled experiences.\n\n :param batch_size: (int) How many transitions to sample.\n :param beta: (float) To what degree to use importance weights (0 - no corrections, 1 - full correction)\n :param env: (Optional[VecNormalize]) associated gym VecEnv\n to normalize the observations/rewards when sampling\n :return:\n - obs_batch: (np.ndarray) batch of observations\n - act_batch: (numpy float) batch of actions executed given obs_batch\n - rew_batch: (numpy float) rewards received as results of executing act_batch\n - next_obs_batch: (np.ndarray) next set of observations seen after executing act_batch\n - done_mask: (numpy bool) done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode\n and 0 otherwise.\n - weights: (numpy float) Array of shape (batch_size,) and dtype np.float32 denoting importance weight of\n each sampled transition\n - idxes: (numpy int) Array of shape (batch_size,) and dtype np.int32 idexes in buffer of sampled experiences\n \"\"\"\n assert beta > 0\n\n idxes = self._sample_proportional(batch_size)\n weights = []\n p_min = self._it_min.min() / self._it_sum.sum()\n max_weight = (p_min * len(self._storage)) ** (-beta)\n p_sample = self._it_sum[idxes] / self._it_sum.sum()\n weights = (p_sample * len(self._storage)) ** (-beta) / max_weight\n encoded_sample = self._encode_sample(idxes, env=env)\n return tuple(list(encoded_sample) + [weights, idxes])\n\n def update_priorities(self, idxes, priorities):\n \"\"\"\n Update priorities of sampled transitions.\n\n sets priority of transition at index idxes[i] in buffer\n to priorities[i].\n\n :param idxes: ([int]) List of idxes of sampled transitions\n :param priorities: ([float]) List of updated priorities corresponding to transitions at the sampled idxes\n denoted by variable `idxes`.\n \"\"\"\n assert len(idxes) == len(priorities)\n assert np.min(priorities) > 0\n assert np.min(idxes) >= 0\n assert np.max(idxes) < len(self.storage)\n self._it_sum[idxes] = priorities ** self._alpha\n self._it_min[idxes] = priorities ** self._alpha\n\n self._max_priority = max(self._max_priority, np.max(priorities))\n", "import time\n\nfrom reinforcement_learning import gym\nimport numpy as np\nimport tensorflow as tf\n\nfrom reinforcement_learning import logger\nfrom reinforcement_learning.common import explained_variance, tf_util, ActorCriticRLModel, SetVerbosity, TensorboardWriter\nfrom reinforcement_learning.common.policies import ActorCriticPolicy, RecurrentActorCriticPolicy\nfrom reinforcement_learning.common.runners import AbstractEnvRunner\nfrom reinforcement_learning.common.schedules import Scheduler\nfrom reinforcement_learning.common.tf_util import mse, total_episode_reward_logger\nfrom reinforcement_learning.common.math_util import safe_mean\n\n\ndef discount_with_dones(rewards, dones, gamma):\n \"\"\"\n Apply the discount value to the reward, where the environment is not done\n\n :param rewards: ([float]) The rewards\n :param dones: ([bool]) Whether an environment is done or not\n :param gamma: (float) The discount value\n :return: ([float]) The discounted rewards\n \"\"\"\n discounted = []\n ret = 0 # Return: discounted reward\n for reward, done in zip(rewards[::-1], dones[::-1]):\n ret = reward + gamma * ret * (1. - done) # fixed off by one bug\n discounted.append(ret)\n return discounted[::-1]\n\n\nclass A2C(ActorCriticRLModel):\n \"\"\"\n The A2C (Advantage Actor Critic) model class, https://arxiv.org/abs/1602.01783\n\n :param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)\n :param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)\n :param gamma: (float) Discount factor\n :param n_steps: (int) The number of steps to run for each environment per update\n (i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)\n :param vf_coef: (float) Value function coefficient for the loss calculation\n :param ent_coef: (float) Entropy coefficient for the loss calculation\n :param max_grad_norm: (float) The maximum value for the gradient clipping\n :param learning_rate: (float) The learning rate\n :param alpha: (float) RMSProp decay parameter (default: 0.99)\n :param momentum: (float) RMSProp momentum parameter (default: 0.0)\n :param epsilon: (float) RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update)\n (default: 1e-5)\n :param lr_schedule: (str) The type of scheduler for the learning rate update ('linear', 'constant',\n 'double_linear_con', 'middle_drop' or 'double_middle_drop')\n :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n :param tensorboard_log: (str) the log location for tensorboard (if None, no logging)\n :param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance\n (used only for loading)\n :param policy_kwargs: (dict) additional arguments to be passed to the policy on creation\n :param full_tensorboard_log: (bool) enable additional logging when using tensorboard\n WARNING: this logging can take a lot of space quickly\n :param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).\n If None (default), use random seed. Note that if you want completely deterministic\n results, you must set `n_cpu_tf_sess` to 1.\n :param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations\n If None, the number of cpu of the current machine will be used.\n \"\"\"\n\n def __init__(self, policy, env, gamma=0.99, n_steps=2048, vf_coef=0.5, ent_coef=0.001, max_grad_norm=0.5,\n learning_rate=1e-3, alpha=0.99, momentum=0.0, epsilon=1e-5, lr_schedule='constant',\n verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None,\n full_tensorboard_log=False, seed=None, n_cpu_tf_sess=None):\n\n self.n_steps = n_steps\n self.gamma = gamma\n self.vf_coef = vf_coef\n self.ent_coef = ent_coef\n self.max_grad_norm = max_grad_norm\n self.alpha = alpha\n self.momentum = momentum\n self.epsilon = epsilon\n self.lr_schedule = lr_schedule\n self.learning_rate = learning_rate\n self.tensorboard_log = tensorboard_log\n self.full_tensorboard_log = full_tensorboard_log\n\n self.learning_rate_ph = None\n self.n_batch = None\n self.actions_ph = None\n self.advs_ph = None\n self.rewards_ph = None\n self.pg_loss = None\n self.vf_loss = None\n self.entropy = None\n self.apply_backprop = None\n self.train_model = None\n self.step_model = None\n self.proba_step = None\n self.value = None\n self.initial_state = None\n self.learning_rate_schedule = None\n self.summary = None\n\n super(A2C, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True,\n _init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,\n seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)\n\n # if we are loading, it is possible the environment is not known, however the obs and action space are known\n if _init_setup_model:\n self.setup_model()\n\n def _make_runner(self) -> AbstractEnvRunner:\n return A2CRunner(self.env, self, n_steps=self.n_steps, gamma=self.gamma)\n\n def _get_pretrain_placeholders(self):\n policy = self.train_model\n if isinstance(self.action_space, gym.spaces.Discrete):\n return policy.obs_ph, self.actions_ph, policy.policy\n return policy.obs_ph, self.actions_ph, policy.deterministic_action\n\n def setup_model(self):\n with SetVerbosity(self.verbose):\n\n assert issubclass(self.policy, ActorCriticPolicy), \"Error: the input policy for the A2C model must be an \" \\\n \"instance of common.policies.ActorCriticPolicy.\"\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.set_random_seed(self.seed)\n self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)\n\n self.n_batch = self.n_envs * self.n_steps\n\n n_batch_step = None\n n_batch_train = None\n if issubclass(self.policy, RecurrentActorCriticPolicy):\n n_batch_step = self.n_envs\n n_batch_train = self.n_envs * self.n_steps\n\n step_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,\n n_batch_step, reuse=False, **self.policy_kwargs)\n\n with tf.compat.v1.variable_scope(\"train_model\", reuse=True,\n custom_getter=tf_util.outer_scope_getter(\"train_model\")):\n train_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs,\n self.n_steps, n_batch_train, reuse=True, **self.policy_kwargs)\n\n with tf.compat.v1.variable_scope(\"loss\", reuse=False):\n self.actions_ph = train_model.pdtype.sample_placeholder([None], name=\"action_ph\")\n self.advs_ph = tf.compat.v1.placeholder(tf.float32, [None], name=\"advs_ph\")\n self.rewards_ph = tf.compat.v1.placeholder(tf.float32, [None], name=\"rewards_ph\")\n self.learning_rate_ph = tf.compat.v1.placeholder(tf.float32, [], name=\"learning_rate_ph\")\n\n neglogpac = train_model.proba_distribution.neglogp(self.actions_ph)\n self.entropy = tf.reduce_mean(input_tensor=train_model.proba_distribution.entropy())\n self.pg_loss = tf.reduce_mean(input_tensor=self.advs_ph * neglogpac)\n self.vf_loss = mse(tf.squeeze(train_model.value_flat), self.rewards_ph)\n # https://arxiv.org/pdf/1708.04782.pdf#page=9, https://arxiv.org/pdf/1602.01783.pdf#page=4\n # and https://github.com/dennybritz/reinforcement-learning/issues/34\n # suggest to add an entropy component in order to improve exploration.\n loss = self.pg_loss - self.entropy * self.ent_coef + self.vf_loss * self.vf_coef\n\n tf.compat.v1.summary.scalar('entropy_loss', self.entropy)\n tf.compat.v1.summary.scalar('policy_gradient_loss', self.pg_loss)\n tf.compat.v1.summary.scalar('value_function_loss', self.vf_loss)\n tf.compat.v1.summary.scalar('loss', loss)\n\n self.params = tf_util.get_trainable_vars(\"model\")\n grads = tf.gradients(ys=loss, xs=self.params)\n if self.max_grad_norm is not None:\n grads, _ = tf.clip_by_global_norm(grads, self.max_grad_norm)\n grads = list(zip(grads, self.params))\n\n with tf.compat.v1.variable_scope(\"input_info\", reuse=False):\n tf.compat.v1.summary.scalar('discounted_rewards', tf.reduce_mean(input_tensor=self.rewards_ph))\n tf.compat.v1.summary.scalar('learning_rate', tf.reduce_mean(input_tensor=self.learning_rate_ph))\n tf.compat.v1.summary.scalar('advantage', tf.reduce_mean(input_tensor=self.advs_ph))\n if self.full_tensorboard_log:\n tf.compat.v1.summary.histogram('discounted_rewards', self.rewards_ph)\n tf.compat.v1.summary.histogram('learning_rate', self.learning_rate_ph)\n tf.compat.v1.summary.histogram('advantage', self.advs_ph)\n if tf_util.is_image(self.observation_space):\n tf.compat.v1.summary.image('observation', train_model.obs_ph)\n else:\n tf.compat.v1.summary.histogram('observation', train_model.obs_ph)\n\n trainer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.alpha,\n epsilon=self.epsilon, momentum=self.momentum)\n self.apply_backprop = trainer.apply_gradients(grads)\n\n self.train_model = train_model\n self.step_model = step_model\n self.step = step_model.step\n self.proba_step = step_model.proba_step\n self.value = step_model.value\n self.initial_state = step_model.initial_state\n tf.compat.v1.global_variables_initializer().run(session=self.sess)\n\n self.summary = tf.compat.v1.summary.merge_all()\n\n def _train_step(self, obs, states, rewards, masks, actions, values, update, writer=None):\n \"\"\"\n applies a training step to the model\n\n :param obs: ([float]) The input observations\n :param states: ([float]) The states (used for recurrent policies)\n :param rewards: ([float]) The rewards from the environment\n :param masks: ([bool]) Whether or not the episode is over (used for recurrent policies)\n :param actions: ([float]) The actions taken\n :param values: ([float]) The logits values\n :param update: (int) the current step iteration\n :param writer: (TensorFlow Summary.writer) the writer for tensorboard\n :return: (float, float, float) policy loss, value loss, policy entropy\n \"\"\"\n advs = rewards - values\n cur_lr = None\n for _ in range(len(obs)):\n cur_lr = self.learning_rate_schedule.value()\n assert cur_lr is not None, \"Error: the observation input array cannon be empty\"\n\n td_map = {self.train_model.obs_ph: obs, self.actions_ph: actions, self.advs_ph: advs,\n self.rewards_ph: rewards, self.learning_rate_ph: cur_lr}\n if states is not None:\n td_map[self.train_model.states_ph] = states\n td_map[self.train_model.dones_ph] = masks\n\n if writer is not None:\n # run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...)\n if self.full_tensorboard_log and (1 + update) % 10 == 0:\n run_options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE)\n run_metadata = tf.compat.v1.RunMetadata()\n summary, policy_loss, value_loss, policy_entropy, _ = self.sess.run(\n [self.summary, self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop],\n td_map, options=run_options, run_metadata=run_metadata)\n writer.add_run_metadata(run_metadata, 'step%d' % (update * self.n_batch))\n else:\n summary, policy_loss, value_loss, policy_entropy, _ = self.sess.run(\n [self.summary, self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop], td_map)\n writer.add_summary(summary, update * self.n_batch)\n\n else:\n policy_loss, value_loss, policy_entropy, _ = self.sess.run(\n [self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop], td_map)\n\n return policy_loss, value_loss, policy_entropy\n\n def learn(self, total_timesteps, callback=None, log_interval=1, tb_log_name=\"A2C\", reset_num_timesteps=True):\n\n new_tb_log = self._init_num_timesteps(reset_num_timesteps)\n callback = self._init_callback(callback)\n\n with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) as writer:\n self._setup_learn()\n self.learning_rate_schedule = Scheduler(initial_value=self.learning_rate, n_values=total_timesteps, schedule=self.lr_schedule)\n\n t_start = time.time()\n callback.on_training_start(locals(), globals())\n\n for update in range(1, total_timesteps // self.n_batch + 1):\n\n callback.on_rollout_start()\n # true_reward is the reward without discount\n rollout = self.runner.run(callback)\n # unpack\n obs, states, rewards, masks, actions, values, ep_infos, true_reward = rollout\n callback.update_locals(locals())\n callback.on_rollout_end()\n\n # Early stopping due to the callback\n if not self.runner.continue_training:\n break\n\n self.ep_info_buf.extend(ep_infos)\n _, value_loss, policy_entropy = self._train_step(obs, states, rewards, masks, actions, values,\n self.num_timesteps // self.n_batch, writer)\n n_seconds = time.time() - t_start\n fps = int((update * self.n_batch) / n_seconds)\n\n if writer is not None:\n total_episode_reward_logger(self.episode_reward,\n true_reward.reshape((self.n_envs, self.n_steps)),\n masks.reshape((self.n_envs, self.n_steps)),\n writer, self.num_timesteps)\n\n if self.verbose >= 1 and (update % log_interval == 0 or update == 1):\n explained_var = explained_variance(values, rewards)\n logger.record_tabular(\"nupdates\", update)\n logger.record_tabular(\"total_timesteps\", self.num_timesteps)\n logger.record_tabular(\"fps\", fps)\n logger.record_tabular(\"policy_entropy\", float(policy_entropy))\n logger.record_tabular(\"value_loss\", float(value_loss))\n logger.record_tabular(\"explained_variance\", float(explained_var))\n if len(self.ep_info_buf) > 0 and len(self.ep_info_buf[0]) > 0:\n logger.logkv('ep_reward_mean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buf]))\n #logger.logkv('ep_len_mean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buf]))\n logger.dump_tabular()\n\n callback.on_training_end()\n return self\n\n def save(self, save_path, cloudpickle=False):\n data = {\n \"gamma\": self.gamma,\n \"n_steps\": self.n_steps,\n \"vf_coef\": self.vf_coef,\n \"ent_coef\": self.ent_coef,\n \"max_grad_norm\": self.max_grad_norm,\n \"learning_rate\": self.learning_rate,\n \"alpha\": self.alpha,\n \"epsilon\": self.epsilon,\n \"lr_schedule\": self.lr_schedule,\n \"verbose\": self.verbose,\n \"policy\": self.policy,\n \"observation_space\": self.observation_space,\n \"action_space\": self.action_space,\n \"n_envs\": self.n_envs,\n \"n_cpu_tf_sess\": self.n_cpu_tf_sess,\n \"seed\": self.seed,\n \"_vectorize_action\": self._vectorize_action,\n \"policy_kwargs\": self.policy_kwargs\n }\n\n params_to_save = self.get_parameters()\n\n self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)\n\n\nclass A2CRunner(AbstractEnvRunner):\n def __init__(self, env, model, n_steps=5, gamma=0.99):\n \"\"\"\n A runner to learn the policy of an environment for an a2c model\n\n :param env: (Gym environment) The environment to learn from\n :param model: (Model) The model to learn\n :param n_steps: (int) The number of steps to run for each environment\n :param gamma: (float) Discount factor\n \"\"\"\n super(A2CRunner, self).__init__(env=env, model=model, n_steps=n_steps)\n self.gamma = gamma\n\n def _run(self):\n \"\"\"\n Run a learning step of the model\n\n :return: ([float], [float], [float], [bool], [float], [float])\n observations, states, rewards, masks, actions, values\n \"\"\"\n mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [], [], [], [], []\n mb_states = self.states\n ep_infos = []\n for _ in range(self.n_steps):\n actions, values, states, _ = self.model.step(self.obs, self.states, self.dones)\n mb_obs.append(np.copy(self.obs))\n mb_actions.append(actions)\n mb_values.append(values)\n mb_dones.append(self.dones)\n clipped_actions = actions\n # Clip the actions to avoid out of bound error\n if isinstance(self.env.action_space, gym.spaces.Box):\n clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)\n obs, rewards, dones, infos = self.env.step(clipped_actions)\n\n self.model.num_timesteps += self.n_envs\n\n if self.callback is not None:\n # Abort training early\n self.callback.update_locals(locals())\n if self.callback.on_step() is False:\n self.continue_training = False\n # Return dummy values\n return [None] * 8\n\n for info, reward in zip(infos, rewards):\n maybe_ep_info = {'r': reward} # info.get('episode')\n if maybe_ep_info is not None:\n ep_infos.append(maybe_ep_info)\n\n self.states = states\n self.dones = dones\n self.obs = obs\n mb_rewards.append(rewards)\n mb_dones.append(self.dones)\n # batch of steps to batch of rollouts\n mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype).swapaxes(1, 0).reshape(self.batch_ob_shape)\n mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(0, 1)\n mb_actions = np.asarray(mb_actions, dtype=self.env.action_space.dtype).swapaxes(0, 1)\n mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(0, 1)\n mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(0, 1)\n mb_masks = mb_dones[:, :-1]\n mb_dones = mb_dones[:, 1:]\n true_rewards = np.copy(mb_rewards)\n last_values = self.model.value(self.obs, self.states, self.dones).tolist()\n # discount/bootstrap off value fn\n for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):\n rewards = rewards.tolist()\n dones = dones.tolist()\n if dones[-1] == 0:\n rewards = discount_with_dones(rewards + [value], dones + [0], self.gamma)[:-1]\n else:\n rewards = discount_with_dones(rewards, dones, self.gamma)\n mb_rewards[n] = rewards\n\n # convert from [n_env, n_steps, ...] to [n_steps * n_env, ...]\n mb_rewards = mb_rewards.reshape(-1, *mb_rewards.shape[2:])\n mb_actions = mb_actions.reshape(-1, *mb_actions.shape[2:])\n mb_values = mb_values.reshape(-1, *mb_values.shape[2:])\n mb_masks = mb_masks.reshape(-1, *mb_masks.shape[2:])\n true_rewards = true_rewards.reshape(-1, *true_rewards.shape[2:])\n return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, ep_infos, true_rewards\n" ]
[ [ "numpy.genfromtxt" ], [ "numpy.max", "numpy.array", "numpy.random.random", "numpy.min" ], [ "tensorflow.Graph", "tensorflow.compat.v1.summary.merge_all", "tensorflow.compat.v1.train.RMSPropOptimizer", "numpy.clip", "numpy.asarray", "tensorflow.reduce_mean", "tensorflow.compat.v1.summary.image", "tensorflow.gradients", "tensorflow.squeeze", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.RunOptions", "numpy.copy", "tensorflow.compat.v1.placeholder", "tensorflow.clip_by_global_norm", "tensorflow.compat.v1.summary.scalar", "tensorflow.compat.v1.summary.histogram", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.RunMetadata" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nightfuryyy/mmpose
[ "93c3a742c540dfb4ca515ad545cef705a07d90b4", "93c3a742c540dfb4ca515ad545cef705a07d90b4", "93c3a742c540dfb4ca515ad545cef705a07d90b4", "93c3a742c540dfb4ca515ad545cef705a07d90b4", "910d9e31dd9d46e3329be1b7567e6309d70ab64c", "93c3a742c540dfb4ca515ad545cef705a07d90b4" ]
[ "mmpose/models/backbones/vgg.py", "mmpose/datasets/datasets/mesh/mesh_base_dataset.py", "tests/test_post_processing.py", "tests/test_backbones/test_resnet.py", "mmpose/apis/inference.py", "tests/test_backbones/test_mobilenet_v3.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, constant_init, kaiming_init, normal_init\nfrom mmcv.utils.parrots_wrapper import _BatchNorm\n\nfrom ..builder import BACKBONES\nfrom .base_backbone import BaseBackbone\n\n\ndef make_vgg_layer(in_channels,\n out_channels,\n num_blocks,\n conv_cfg=None,\n norm_cfg=None,\n act_cfg=dict(type='ReLU'),\n dilation=1,\n with_norm=False,\n ceil_mode=False):\n layers = []\n for _ in range(num_blocks):\n layer = ConvModule(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=3,\n dilation=dilation,\n padding=dilation,\n bias=True,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg)\n layers.append(layer)\n in_channels = out_channels\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode))\n\n return layers\n\n\[email protected]_module()\nclass VGG(BaseBackbone):\n \"\"\"VGG backbone.\n\n Args:\n depth (int): Depth of vgg, from {11, 13, 16, 19}.\n with_norm (bool): Use BatchNorm or not.\n num_classes (int): number of classes for classification.\n num_stages (int): VGG stages, normally 5.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages. If only one\n stage is specified, a single tensor (feature map) is returned,\n otherwise multiple stages are specified, a tuple of tensors will\n be returned. When it is None, the default behavior depends on\n whether num_classes is specified. If num_classes <= 0, the default\n value is (4, ), outputting the last feature map before classifier.\n If num_classes > 0, the default value is (5, ), outputting the\n classification score. Default: None.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n ceil_mode (bool): Whether to use ceil_mode of MaxPool. Default: False.\n with_last_pool (bool): Whether to keep the last pooling before\n classifier. Default: True.\n \"\"\"\n\n # Parameters to build layers. Each element specifies the number of conv in\n # each stage. For example, VGG11 contains 11 layers with learnable\n # parameters. 11 is computed as 11 = (1 + 1 + 2 + 2 + 2) + 3,\n # where 3 indicates the last three fully-connected layers.\n arch_settings = {\n 11: (1, 1, 2, 2, 2),\n 13: (2, 2, 2, 2, 2),\n 16: (2, 2, 3, 3, 3),\n 19: (2, 2, 4, 4, 4)\n }\n\n def __init__(self,\n depth,\n num_classes=-1,\n num_stages=5,\n dilations=(1, 1, 1, 1, 1),\n out_indices=None,\n frozen_stages=-1,\n conv_cfg=None,\n norm_cfg=None,\n act_cfg=dict(type='ReLU'),\n norm_eval=False,\n ceil_mode=False,\n with_last_pool=True):\n super().__init__()\n if depth not in self.arch_settings:\n raise KeyError(f'invalid depth {depth} for vgg')\n assert num_stages >= 1 and num_stages <= 5\n stage_blocks = self.arch_settings[depth]\n self.stage_blocks = stage_blocks[:num_stages]\n assert len(dilations) == num_stages\n\n self.num_classes = num_classes\n self.frozen_stages = frozen_stages\n self.norm_eval = norm_eval\n with_norm = norm_cfg is not None\n\n if out_indices is None:\n out_indices = (5, ) if num_classes > 0 else (4, )\n assert max(out_indices) <= num_stages\n self.out_indices = out_indices\n\n self.in_channels = 3\n start_idx = 0\n vgg_layers = []\n self.range_sub_modules = []\n for i, num_blocks in enumerate(self.stage_blocks):\n num_modules = num_blocks + 1\n end_idx = start_idx + num_modules\n dilation = dilations[i]\n out_channels = 64 * 2**i if i < 4 else 512\n vgg_layer = make_vgg_layer(\n self.in_channels,\n out_channels,\n num_blocks,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n dilation=dilation,\n with_norm=with_norm,\n ceil_mode=ceil_mode)\n vgg_layers.extend(vgg_layer)\n self.in_channels = out_channels\n self.range_sub_modules.append([start_idx, end_idx])\n start_idx = end_idx\n if not with_last_pool:\n vgg_layers.pop(-1)\n self.range_sub_modules[-1][1] -= 1\n self.module_name = 'features'\n self.add_module(self.module_name, nn.Sequential(*vgg_layers))\n\n if self.num_classes > 0:\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, num_classes),\n )\n\n def init_weights(self, pretrained=None):\n super().init_weights(pretrained)\n if pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n kaiming_init(m)\n elif isinstance(m, _BatchNorm):\n constant_init(m, 1)\n elif isinstance(m, nn.Linear):\n normal_init(m, std=0.01)\n\n def forward(self, x):\n outs = []\n vgg_layers = getattr(self, self.module_name)\n for i in range(len(self.stage_blocks)):\n for j in range(*self.range_sub_modules[i]):\n vgg_layer = vgg_layers[j]\n x = vgg_layer(x)\n if i in self.out_indices:\n outs.append(x)\n if self.num_classes > 0:\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n outs.append(x)\n if len(outs) == 1:\n return outs[0]\n else:\n return tuple(outs)\n\n def _freeze_stages(self):\n vgg_layers = getattr(self, self.module_name)\n for i in range(self.frozen_stages):\n for j in range(*self.range_sub_modules[i]):\n m = vgg_layers[j]\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n def train(self, mode=True):\n super().train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, _BatchNorm):\n m.eval()\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy as cp\nimport os\nfrom abc import ABCMeta\n\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom mmpose.datasets.pipelines import Compose\n\n\nclass MeshBaseDataset(Dataset, metaclass=ABCMeta):\n \"\"\"Base dataset for 3D human mesh estimation task. In 3D humamesh\n estimation task, all datasets share this BaseDataset for training and have\n their own evaluate function.\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n This dataset can only be used for training.\n For evaluation, subclass should write an extra evaluate function.\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n \"\"\"\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n test_mode=False):\n\n self.image_info = {}\n self.ann_info = {}\n\n self.ann_file = ann_file\n self.img_prefix = img_prefix\n self.pipeline = pipeline\n self.test_mode = test_mode\n\n self.ann_info['image_size'] = np.array(data_cfg['image_size'])\n self.ann_info['iuv_size'] = np.array(data_cfg['iuv_size'])\n self.ann_info['num_joints'] = data_cfg['num_joints']\n self.ann_info['flip_pairs'] = None\n self.db = []\n self.pipeline = Compose(self.pipeline)\n\n # flip_pairs\n # For all mesh dataset, we use 24 joints as CMR and SPIN.\n self.ann_info['flip_pairs'] = [[0, 5], [1, 4], [2, 3], [6, 11],\n [7, 10], [8, 9], [20, 21], [22, 23]]\n self.ann_info['use_different_joint_weights'] = False\n assert self.ann_info['num_joints'] == 24\n self.ann_info['joint_weights'] = np.ones([24, 1], dtype=np.float32)\n\n self.ann_info['uv_type'] = data_cfg['uv_type']\n self.ann_info['use_IUV'] = data_cfg['use_IUV']\n uv_type = self.ann_info['uv_type']\n self.iuv_prefix = os.path.join(self.img_prefix, f'{uv_type}_IUV_gt')\n self.db = self._get_db(ann_file)\n\n def _get_db(self, ann_file):\n \"\"\"Load dataset.\"\"\"\n data = np.load(ann_file)\n tmpl = dict(\n image_file=None,\n center=None,\n scale=None,\n rotation=0,\n joints_2d=None,\n joints_2d_visible=None,\n joints_3d=None,\n joints_3d_visible=None,\n gender=None,\n pose=None,\n beta=None,\n has_smpl=0,\n iuv_file=None,\n has_iuv=0)\n gt_db = []\n\n _imgnames = data['imgname']\n _scales = data['scale'].astype(np.float32)\n _centers = data['center'].astype(np.float32)\n dataset_len = len(_imgnames)\n\n # Get 2D keypoints\n if 'part' in data.keys():\n _keypoints = data['part'].astype(np.float32)\n else:\n _keypoints = np.zeros((dataset_len, 24, 3), dtype=np.float32)\n\n # Get gt 3D joints, if available\n if 'S' in data.keys():\n _joints_3d = data['S'].astype(np.float32)\n else:\n _joints_3d = np.zeros((dataset_len, 24, 4), dtype=np.float32)\n\n # Get gt SMPL parameters, if available\n if 'pose' in data.keys() and 'shape' in data.keys():\n _poses = data['pose'].astype(np.float32)\n _betas = data['shape'].astype(np.float32)\n has_smpl = 1\n else:\n _poses = np.zeros((dataset_len, 72), dtype=np.float32)\n _betas = np.zeros((dataset_len, 10), dtype=np.float32)\n has_smpl = 0\n\n # Get gender data, if available\n if 'gender' in data.keys():\n _genders = data['gender']\n _genders = np.array([str(g) != 'm' for g in _genders]).astype(int)\n else:\n _genders = -1 * np.ones(dataset_len).astype(int)\n\n # Get IUV image, if available\n if 'iuv_names' in data.keys():\n _iuv_names = data['iuv_names']\n has_iuv = has_smpl\n else:\n _iuv_names = [''] * dataset_len\n has_iuv = 0\n\n for i in range(len(_imgnames)):\n newitem = cp.deepcopy(tmpl)\n newitem['image_file'] = os.path.join(self.img_prefix, _imgnames[i])\n newitem['scale'] = np.array([_scales[i], _scales[i]])\n newitem['center'] = _centers[i]\n newitem['joints_2d'] = _keypoints[i, :, :2]\n newitem['joints_2d_visible'] = _keypoints[i, :, -1][:, None]\n newitem['joints_3d'] = _joints_3d[i, :, :3]\n newitem['joints_3d_visible'] = _joints_3d[i, :, -1][:, None]\n newitem['pose'] = _poses[i]\n newitem['beta'] = _betas[i]\n newitem['has_smpl'] = has_smpl\n newitem['gender'] = _genders[i]\n newitem['iuv_file'] = os.path.join(self.iuv_prefix, _iuv_names[i])\n newitem['has_iuv'] = has_iuv\n gt_db.append(newitem)\n return gt_db\n\n def __len__(self, ):\n \"\"\"Get the size of the dataset.\"\"\"\n return len(self.db)\n\n def __getitem__(self, idx):\n \"\"\"Get the sample given index.\"\"\"\n results = cp.deepcopy(self.db[idx])\n results['ann_info'] = self.ann_info\n return self.pipeline(results)\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\n\nfrom mmpose.core import (affine_transform, flip_back, fliplr_joints,\n fliplr_regression, get_affine_transform, rotate_point,\n transform_preds)\n\n\ndef test_affine_transform():\n pt = np.array([0, 1])\n trans = np.array([[1, 0, 1], [0, 1, 0]])\n result = affine_transform(pt, trans)\n assert_array_almost_equal(result, np.array([1, 1]), decimal=4)\n assert isinstance(result, np.ndarray)\n\n\ndef test_rotate_point():\n src_point = np.array([0, 1])\n rot_rad = np.pi / 2.\n result = rotate_point(src_point, rot_rad)\n assert_array_almost_equal(result, np.array([-1, 0]), decimal=4)\n assert isinstance(result, list)\n\n\ndef test_fliplr_joints():\n joints = np.array([[0, 0, 0], [1, 1, 0]])\n joints_vis = np.array([[1], [1]])\n joints_flip, _ = fliplr_joints(joints, joints_vis, 5, [[0, 1]])\n res = np.array([[3, 1, 0], [4, 0, 0]])\n assert_array_almost_equal(joints_flip, res)\n\n\ndef test_flip_back():\n heatmaps = np.random.random([1, 2, 32, 32])\n flipped_heatmaps = flip_back(heatmaps, [[0, 1]])\n heatmaps_new = flip_back(flipped_heatmaps, [[0, 1]])\n assert_array_almost_equal(heatmaps, heatmaps_new)\n\n heatmaps = np.random.random([1, 2, 32, 32])\n flipped_heatmaps = flip_back(heatmaps, [[0, 1]])\n heatmaps_new = flipped_heatmaps[..., ::-1]\n assert_array_almost_equal(heatmaps[:, 0], heatmaps_new[:, 1])\n assert_array_almost_equal(heatmaps[:, 1], heatmaps_new[:, 0])\n\n ori_heatmaps = heatmaps.copy()\n # test in-place flip\n heatmaps = heatmaps[:, :, :, ::-1]\n assert_array_almost_equal(ori_heatmaps[:, :, :, ::-1], heatmaps)\n\n\ndef test_transform_preds():\n coords = np.random.random([2, 2])\n center = np.array([50, 50])\n scale = np.array([100 / 200.0, 100 / 200.0])\n size = np.array([100, 100])\n result = transform_preds(coords, center, scale, size)\n assert_array_almost_equal(coords, result)\n\n coords = np.random.random([2, 2])\n center = np.array([50, 50])\n scale = np.array([100 / 200.0, 100 / 200.0])\n size = np.array([101, 101])\n result = transform_preds(coords, center, scale, size, use_udp=True)\n assert_array_almost_equal(coords, result)\n\n\ndef test_get_affine_transform():\n center = np.array([50, 50])\n scale = np.array([100 / 200.0, 100 / 200.0])\n size = np.array([100, 100])\n result = get_affine_transform(center, scale, 0, size)\n trans = np.array([[1, 0, 0], [0, 1, 0]])\n assert_array_almost_equal(trans, result)\n\n\ndef test_flip_regression():\n coords = np.random.rand(3, 3)\n flip_pairs = [[1, 2]]\n root = coords[:1]\n coords_flipped = coords.copy()\n coords_flipped[1] = coords[2]\n coords_flipped[2] = coords[1]\n coords_flipped[..., 0] = 2 * root[..., 0] - coords_flipped[..., 0]\n\n # static mode\n res_static = fliplr_regression(\n coords, flip_pairs, center_mode='static', center_x=root[0, 0])\n assert_array_almost_equal(res_static, coords_flipped)\n\n # root mode\n res_root = fliplr_regression(\n coords, flip_pairs, center_mode='root', center_index=0)\n assert_array_almost_equal(res_root, coords_flipped)\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.utils.parrots_wrapper import _BatchNorm\n\nfrom mmpose.models.backbones import ResNet, ResNetV1d\nfrom mmpose.models.backbones.resnet import (BasicBlock, Bottleneck, ResLayer,\n get_expansion)\n\n\ndef is_block(modules):\n \"\"\"Check if is ResNet building block.\"\"\"\n if isinstance(modules, (BasicBlock, Bottleneck)):\n return True\n return False\n\n\ndef all_zeros(modules):\n \"\"\"Check if the weight(and bias) is all zero.\"\"\"\n weight_zero = torch.equal(modules.weight.data,\n torch.zeros_like(modules.weight.data))\n if hasattr(modules, 'bias'):\n bias_zero = torch.equal(modules.bias.data,\n torch.zeros_like(modules.bias.data))\n else:\n bias_zero = True\n\n return weight_zero and bias_zero\n\n\ndef check_norm_state(modules, train_state):\n \"\"\"Check if norm layer is in correct train state.\"\"\"\n for mod in modules:\n if isinstance(mod, _BatchNorm):\n if mod.training != train_state:\n return False\n return True\n\n\ndef test_get_expansion():\n assert get_expansion(Bottleneck, 2) == 2\n assert get_expansion(BasicBlock) == 1\n assert get_expansion(Bottleneck) == 4\n\n class MyResBlock(nn.Module):\n\n expansion = 8\n\n assert get_expansion(MyResBlock) == 8\n\n # expansion must be an integer or None\n with pytest.raises(TypeError):\n get_expansion(Bottleneck, '0')\n\n # expansion is not specified and cannot be inferred\n with pytest.raises(TypeError):\n\n class SomeModule(nn.Module):\n pass\n\n get_expansion(SomeModule)\n\n\ndef test_basic_block():\n # expansion must be 1\n with pytest.raises(AssertionError):\n BasicBlock(64, 64, expansion=2)\n\n # BasicBlock with stride 1, out_channels == in_channels\n block = BasicBlock(64, 64)\n assert block.in_channels == 64\n assert block.mid_channels == 64\n assert block.out_channels == 64\n assert block.conv1.in_channels == 64\n assert block.conv1.out_channels == 64\n assert block.conv1.kernel_size == (3, 3)\n assert block.conv1.stride == (1, 1)\n assert block.conv2.in_channels == 64\n assert block.conv2.out_channels == 64\n assert block.conv2.kernel_size == (3, 3)\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n # BasicBlock with stride 1 and downsample\n downsample = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=1, bias=False), nn.BatchNorm2d(128))\n block = BasicBlock(64, 128, downsample=downsample)\n assert block.in_channels == 64\n assert block.mid_channels == 128\n assert block.out_channels == 128\n assert block.conv1.in_channels == 64\n assert block.conv1.out_channels == 128\n assert block.conv1.kernel_size == (3, 3)\n assert block.conv1.stride == (1, 1)\n assert block.conv2.in_channels == 128\n assert block.conv2.out_channels == 128\n assert block.conv2.kernel_size == (3, 3)\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 128, 56, 56])\n\n # BasicBlock with stride 2 and downsample\n downsample = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=1, stride=2, bias=False),\n nn.BatchNorm2d(128))\n block = BasicBlock(64, 128, stride=2, downsample=downsample)\n assert block.in_channels == 64\n assert block.mid_channels == 128\n assert block.out_channels == 128\n assert block.conv1.in_channels == 64\n assert block.conv1.out_channels == 128\n assert block.conv1.kernel_size == (3, 3)\n assert block.conv1.stride == (2, 2)\n assert block.conv2.in_channels == 128\n assert block.conv2.out_channels == 128\n assert block.conv2.kernel_size == (3, 3)\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 128, 28, 28])\n\n # forward with checkpointing\n block = BasicBlock(64, 64, with_cp=True)\n assert block.with_cp\n x = torch.randn(1, 64, 56, 56, requires_grad=True)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n\ndef test_bottleneck():\n # style must be in ['pytorch', 'caffe']\n with pytest.raises(AssertionError):\n Bottleneck(64, 64, style='tensorflow')\n\n # expansion must be divisible by out_channels\n with pytest.raises(AssertionError):\n Bottleneck(64, 64, expansion=3)\n\n # Test Bottleneck style\n block = Bottleneck(64, 64, stride=2, style='pytorch')\n assert block.conv1.stride == (1, 1)\n assert block.conv2.stride == (2, 2)\n block = Bottleneck(64, 64, stride=2, style='caffe')\n assert block.conv1.stride == (2, 2)\n assert block.conv2.stride == (1, 1)\n\n # Bottleneck with stride 1\n block = Bottleneck(64, 64, style='pytorch')\n assert block.in_channels == 64\n assert block.mid_channels == 16\n assert block.out_channels == 64\n assert block.conv1.in_channels == 64\n assert block.conv1.out_channels == 16\n assert block.conv1.kernel_size == (1, 1)\n assert block.conv2.in_channels == 16\n assert block.conv2.out_channels == 16\n assert block.conv2.kernel_size == (3, 3)\n assert block.conv3.in_channels == 16\n assert block.conv3.out_channels == 64\n assert block.conv3.kernel_size == (1, 1)\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == (1, 64, 56, 56)\n\n # Bottleneck with stride 1 and downsample\n downsample = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=1), nn.BatchNorm2d(128))\n block = Bottleneck(64, 128, style='pytorch', downsample=downsample)\n assert block.in_channels == 64\n assert block.mid_channels == 32\n assert block.out_channels == 128\n assert block.conv1.in_channels == 64\n assert block.conv1.out_channels == 32\n assert block.conv1.kernel_size == (1, 1)\n assert block.conv2.in_channels == 32\n assert block.conv2.out_channels == 32\n assert block.conv2.kernel_size == (3, 3)\n assert block.conv3.in_channels == 32\n assert block.conv3.out_channels == 128\n assert block.conv3.kernel_size == (1, 1)\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == (1, 128, 56, 56)\n\n # Bottleneck with stride 2 and downsample\n downsample = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=1, stride=2), nn.BatchNorm2d(128))\n block = Bottleneck(\n 64, 128, stride=2, style='pytorch', downsample=downsample)\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == (1, 128, 28, 28)\n\n # Bottleneck with expansion 2\n block = Bottleneck(64, 64, style='pytorch', expansion=2)\n assert block.in_channels == 64\n assert block.mid_channels == 32\n assert block.out_channels == 64\n assert block.conv1.in_channels == 64\n assert block.conv1.out_channels == 32\n assert block.conv1.kernel_size == (1, 1)\n assert block.conv2.in_channels == 32\n assert block.conv2.out_channels == 32\n assert block.conv2.kernel_size == (3, 3)\n assert block.conv3.in_channels == 32\n assert block.conv3.out_channels == 64\n assert block.conv3.kernel_size == (1, 1)\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == (1, 64, 56, 56)\n\n # Test Bottleneck with checkpointing\n block = Bottleneck(64, 64, with_cp=True)\n block.train()\n assert block.with_cp\n x = torch.randn(1, 64, 56, 56, requires_grad=True)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n\ndef test_basicblock_reslayer():\n # 3 BasicBlock w/o downsample\n layer = ResLayer(BasicBlock, 3, 32, 32)\n assert len(layer) == 3\n for i in range(3):\n assert layer[i].in_channels == 32\n assert layer[i].out_channels == 32\n assert layer[i].downsample is None\n x = torch.randn(1, 32, 56, 56)\n x_out = layer(x)\n assert x_out.shape == (1, 32, 56, 56)\n\n # 3 BasicBlock w/ stride 1 and downsample\n layer = ResLayer(BasicBlock, 3, 32, 64)\n assert len(layer) == 3\n assert layer[0].in_channels == 32\n assert layer[0].out_channels == 64\n assert layer[0].downsample is not None and len(layer[0].downsample) == 2\n assert isinstance(layer[0].downsample[0], nn.Conv2d)\n assert layer[0].downsample[0].stride == (1, 1)\n for i in range(1, 3):\n assert layer[i].in_channels == 64\n assert layer[i].out_channels == 64\n assert layer[i].downsample is None\n x = torch.randn(1, 32, 56, 56)\n x_out = layer(x)\n assert x_out.shape == (1, 64, 56, 56)\n\n # 3 BasicBlock w/ stride 2 and downsample\n layer = ResLayer(BasicBlock, 3, 32, 64, stride=2)\n assert len(layer) == 3\n assert layer[0].in_channels == 32\n assert layer[0].out_channels == 64\n assert layer[0].stride == 2\n assert layer[0].downsample is not None and len(layer[0].downsample) == 2\n assert isinstance(layer[0].downsample[0], nn.Conv2d)\n assert layer[0].downsample[0].stride == (2, 2)\n for i in range(1, 3):\n assert layer[i].in_channels == 64\n assert layer[i].out_channels == 64\n assert layer[i].stride == 1\n assert layer[i].downsample is None\n x = torch.randn(1, 32, 56, 56)\n x_out = layer(x)\n assert x_out.shape == (1, 64, 28, 28)\n\n # 3 BasicBlock w/ stride 2 and downsample with avg pool\n layer = ResLayer(BasicBlock, 3, 32, 64, stride=2, avg_down=True)\n assert len(layer) == 3\n assert layer[0].in_channels == 32\n assert layer[0].out_channels == 64\n assert layer[0].stride == 2\n assert layer[0].downsample is not None and len(layer[0].downsample) == 3\n assert isinstance(layer[0].downsample[0], nn.AvgPool2d)\n assert layer[0].downsample[0].stride == 2\n for i in range(1, 3):\n assert layer[i].in_channels == 64\n assert layer[i].out_channels == 64\n assert layer[i].stride == 1\n assert layer[i].downsample is None\n x = torch.randn(1, 32, 56, 56)\n x_out = layer(x)\n assert x_out.shape == (1, 64, 28, 28)\n\n\ndef test_bottleneck_reslayer():\n # 3 Bottleneck w/o downsample\n layer = ResLayer(Bottleneck, 3, 32, 32)\n assert len(layer) == 3\n for i in range(3):\n assert layer[i].in_channels == 32\n assert layer[i].out_channels == 32\n assert layer[i].downsample is None\n x = torch.randn(1, 32, 56, 56)\n x_out = layer(x)\n assert x_out.shape == (1, 32, 56, 56)\n\n # 3 Bottleneck w/ stride 1 and downsample\n layer = ResLayer(Bottleneck, 3, 32, 64)\n assert len(layer) == 3\n assert layer[0].in_channels == 32\n assert layer[0].out_channels == 64\n assert layer[0].stride == 1\n assert layer[0].conv1.out_channels == 16\n assert layer[0].downsample is not None and len(layer[0].downsample) == 2\n assert isinstance(layer[0].downsample[0], nn.Conv2d)\n assert layer[0].downsample[0].stride == (1, 1)\n for i in range(1, 3):\n assert layer[i].in_channels == 64\n assert layer[i].out_channels == 64\n assert layer[i].conv1.out_channels == 16\n assert layer[i].stride == 1\n assert layer[i].downsample is None\n x = torch.randn(1, 32, 56, 56)\n x_out = layer(x)\n assert x_out.shape == (1, 64, 56, 56)\n\n # 3 Bottleneck w/ stride 2 and downsample\n layer = ResLayer(Bottleneck, 3, 32, 64, stride=2)\n assert len(layer) == 3\n assert layer[0].in_channels == 32\n assert layer[0].out_channels == 64\n assert layer[0].stride == 2\n assert layer[0].conv1.out_channels == 16\n assert layer[0].downsample is not None and len(layer[0].downsample) == 2\n assert isinstance(layer[0].downsample[0], nn.Conv2d)\n assert layer[0].downsample[0].stride == (2, 2)\n for i in range(1, 3):\n assert layer[i].in_channels == 64\n assert layer[i].out_channels == 64\n assert layer[i].conv1.out_channels == 16\n assert layer[i].stride == 1\n assert layer[i].downsample is None\n x = torch.randn(1, 32, 56, 56)\n x_out = layer(x)\n assert x_out.shape == (1, 64, 28, 28)\n\n # 3 Bottleneck w/ stride 2 and downsample with avg pool\n layer = ResLayer(Bottleneck, 3, 32, 64, stride=2, avg_down=True)\n assert len(layer) == 3\n assert layer[0].in_channels == 32\n assert layer[0].out_channels == 64\n assert layer[0].stride == 2\n assert layer[0].conv1.out_channels == 16\n assert layer[0].downsample is not None and len(layer[0].downsample) == 3\n assert isinstance(layer[0].downsample[0], nn.AvgPool2d)\n assert layer[0].downsample[0].stride == 2\n for i in range(1, 3):\n assert layer[i].in_channels == 64\n assert layer[i].out_channels == 64\n assert layer[i].conv1.out_channels == 16\n assert layer[i].stride == 1\n assert layer[i].downsample is None\n x = torch.randn(1, 32, 56, 56)\n x_out = layer(x)\n assert x_out.shape == (1, 64, 28, 28)\n\n # 3 Bottleneck with custom expansion\n layer = ResLayer(Bottleneck, 3, 32, 32, expansion=2)\n assert len(layer) == 3\n for i in range(3):\n assert layer[i].in_channels == 32\n assert layer[i].out_channels == 32\n assert layer[i].stride == 1\n assert layer[i].conv1.out_channels == 16\n assert layer[i].downsample is None\n x = torch.randn(1, 32, 56, 56)\n x_out = layer(x)\n assert x_out.shape == (1, 32, 56, 56)\n\n\ndef test_resnet():\n \"\"\"Test resnet backbone.\"\"\"\n with pytest.raises(KeyError):\n # ResNet depth should be in [18, 34, 50, 101, 152]\n ResNet(20)\n\n with pytest.raises(AssertionError):\n # In ResNet: 1 <= num_stages <= 4\n ResNet(50, num_stages=0)\n\n with pytest.raises(AssertionError):\n # In ResNet: 1 <= num_stages <= 4\n ResNet(50, num_stages=5)\n\n with pytest.raises(AssertionError):\n # len(strides) == len(dilations) == num_stages\n ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3)\n\n with pytest.raises(TypeError):\n # pretrained must be a string path\n model = ResNet(50)\n model.init_weights(pretrained=0)\n\n with pytest.raises(AssertionError):\n # Style must be in ['pytorch', 'caffe']\n ResNet(50, style='tensorflow')\n\n # Test ResNet50 norm_eval=True\n model = ResNet(50, norm_eval=True)\n model.init_weights()\n model.train()\n assert check_norm_state(model.modules(), False)\n\n # Test ResNet50 with torchvision pretrained weight\n model = ResNet(depth=50, norm_eval=True)\n model.init_weights('torchvision://resnet50')\n model.train()\n assert check_norm_state(model.modules(), False)\n\n # Test ResNet50 with first stage frozen\n frozen_stages = 1\n model = ResNet(50, frozen_stages=frozen_stages)\n model.init_weights()\n model.train()\n assert model.norm1.training is False\n for layer in [model.conv1, model.norm1]:\n for param in layer.parameters():\n assert param.requires_grad is False\n for i in range(1, frozen_stages + 1):\n layer = getattr(model, f'layer{i}')\n for mod in layer.modules():\n if isinstance(mod, _BatchNorm):\n assert mod.training is False\n for param in layer.parameters():\n assert param.requires_grad is False\n\n # Test ResNet18 forward\n model = ResNet(18, out_indices=(0, 1, 2, 3))\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == (1, 64, 56, 56)\n assert feat[1].shape == (1, 128, 28, 28)\n assert feat[2].shape == (1, 256, 14, 14)\n assert feat[3].shape == (1, 512, 7, 7)\n\n # Test ResNet50 with BatchNorm forward\n model = ResNet(50, out_indices=(0, 1, 2, 3))\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == (1, 256, 56, 56)\n assert feat[1].shape == (1, 512, 28, 28)\n assert feat[2].shape == (1, 1024, 14, 14)\n assert feat[3].shape == (1, 2048, 7, 7)\n\n # Test ResNet50 with layers 1, 2, 3 out forward\n model = ResNet(50, out_indices=(0, 1, 2))\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 3\n assert feat[0].shape == (1, 256, 56, 56)\n assert feat[1].shape == (1, 512, 28, 28)\n assert feat[2].shape == (1, 1024, 14, 14)\n\n # Test ResNet50 with layers 3 (top feature maps) out forward\n model = ResNet(50, out_indices=(3, ))\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert feat.shape == (1, 2048, 7, 7)\n\n # Test ResNet50 with checkpoint forward\n model = ResNet(50, out_indices=(0, 1, 2, 3), with_cp=True)\n for m in model.modules():\n if is_block(m):\n assert m.with_cp\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == (1, 256, 56, 56)\n assert feat[1].shape == (1, 512, 28, 28)\n assert feat[2].shape == (1, 1024, 14, 14)\n assert feat[3].shape == (1, 2048, 7, 7)\n\n # zero initialization of residual blocks\n model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True)\n model.init_weights()\n for m in model.modules():\n if isinstance(m, Bottleneck):\n assert all_zeros(m.norm3)\n elif isinstance(m, BasicBlock):\n assert all_zeros(m.norm2)\n\n # non-zero initialization of residual blocks\n model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=False)\n model.init_weights()\n for m in model.modules():\n if isinstance(m, Bottleneck):\n assert not all_zeros(m.norm3)\n elif isinstance(m, BasicBlock):\n assert not all_zeros(m.norm2)\n\n\ndef test_resnet_v1d():\n model = ResNetV1d(depth=50, out_indices=(0, 1, 2, 3))\n model.init_weights()\n model.train()\n\n assert len(model.stem) == 3\n for i in range(3):\n assert isinstance(model.stem[i], ConvModule)\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model.stem(imgs)\n assert feat.shape == (1, 64, 112, 112)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == (1, 256, 56, 56)\n assert feat[1].shape == (1, 512, 28, 28)\n assert feat[2].shape == (1, 1024, 14, 14)\n assert feat[3].shape == (1, 2048, 7, 7)\n\n # Test ResNet50V1d with first stage frozen\n frozen_stages = 1\n model = ResNetV1d(depth=50, frozen_stages=frozen_stages)\n assert len(model.stem) == 3\n for i in range(3):\n assert isinstance(model.stem[i], ConvModule)\n model.init_weights()\n model.train()\n check_norm_state(model.stem, False)\n for param in model.stem.parameters():\n assert param.requires_grad is False\n for i in range(1, frozen_stages + 1):\n layer = getattr(model, f'layer{i}')\n for mod in layer.modules():\n if isinstance(mod, _BatchNorm):\n assert mod.training is False\n for param in layer.parameters():\n assert param.requires_grad is False\n\n\ndef test_resnet_half_channel():\n model = ResNet(50, base_channels=32, out_indices=(0, 1, 2, 3))\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == (1, 128, 56, 56)\n assert feat[1].shape == (1, 256, 28, 28)\n assert feat[2].shape == (1, 512, 14, 14)\n assert feat[3].shape == (1, 1024, 7, 7)\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nimport warnings\n\nimport cv2\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.parallel import collate, scatter\nfrom mmcv.runner import load_checkpoint\nfrom PIL import Image\n\nfrom mmpose.core.post_processing import oks_nms\nfrom mmpose.datasets.dataset_info import DatasetInfo\nfrom mmpose.datasets.pipelines import Compose\nfrom mmpose.models import build_posenet\nfrom mmpose.utils.hooks import OutputHook\n\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'\n\n\ndef init_pose_model(config, checkpoint=None, device='cuda:0'):\n \"\"\"Initialize a pose model from config file.\n\n Args:\n config (str or :obj:`mmcv.Config`): Config file path or the config\n object.\n checkpoint (str, optional): Checkpoint path. If left as None, the model\n will not load any weights.\n\n Returns:\n nn.Module: The constructed detector.\n \"\"\"\n if isinstance(config, str):\n config = mmcv.Config.fromfile(config)\n elif not isinstance(config, mmcv.Config):\n raise TypeError('config must be a filename or Config object, '\n f'but got {type(config)}')\n config.model.pretrained = None\n model = build_posenet(config.model)\n if checkpoint is not None:\n # load model checkpoint\n load_checkpoint(model, checkpoint, map_location=device)\n # save the config in the model for convenience\n model.cfg = config\n model.to(device)\n model.eval()\n return model\n\n\ndef _xyxy2xywh(bbox_xyxy):\n \"\"\"Transform the bbox format from x1y1x2y2 to xywh.\n\n Args:\n bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or\n (n, 5). (left, top, right, bottom, [score])\n\n Returns:\n np.ndarray: Bounding boxes (with scores),\n shaped (n, 4) or (n, 5). (left, top, width, height, [score])\n \"\"\"\n bbox_xywh = bbox_xyxy.copy()\n bbox_xywh[:, 2] = bbox_xywh[:, 2] - bbox_xywh[:, 0] + 1\n bbox_xywh[:, 3] = bbox_xywh[:, 3] - bbox_xywh[:, 1] + 1\n\n return bbox_xywh\n\n\ndef _xywh2xyxy(bbox_xywh):\n \"\"\"Transform the bbox format from xywh to x1y1x2y2.\n\n Args:\n bbox_xywh (ndarray): Bounding boxes (with scores),\n shaped (n, 4) or (n, 5). (left, top, width, height, [score])\n Returns:\n np.ndarray: Bounding boxes (with scores), shaped (n, 4) or\n (n, 5). (left, top, right, bottom, [score])\n \"\"\"\n bbox_xyxy = bbox_xywh.copy()\n bbox_xyxy[:, 2] = bbox_xyxy[:, 2] + bbox_xyxy[:, 0] - 1\n bbox_xyxy[:, 3] = bbox_xyxy[:, 3] + bbox_xyxy[:, 1] - 1\n\n return bbox_xyxy\n\n\ndef _box2cs(cfg, box):\n \"\"\"This encodes bbox(x,y,w,h) into (center, scale)\n\n Args:\n x, y, w, h\n\n Returns:\n tuple: A tuple containing center and scale.\n\n - np.ndarray[float32](2,): Center of the bbox (x, y).\n - np.ndarray[float32](2,): Scale of the bbox w & h.\n \"\"\"\n\n x, y, w, h = box[:4]\n input_size = cfg.data_cfg['image_size']\n aspect_ratio = input_size[0] / input_size[1]\n center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)\n\n if w > aspect_ratio * h:\n h = w * 1.0 / aspect_ratio\n elif w < aspect_ratio * h:\n w = h * aspect_ratio\n\n # pixel std is 200.0\n scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)\n\n scale = scale * 1.25\n\n return center, scale\n\n\nclass LoadImage:\n \"\"\"A simple pipeline to load image.\"\"\"\n\n def __init__(self, color_type='color', channel_order='rgb'):\n self.color_type = color_type\n self.channel_order = channel_order\n\n def __call__(self, results):\n \"\"\"Call function to load images into results.\n\n Args:\n results (dict): A result dict contains the img_or_path.\n\n Returns:\n dict: ``results`` will be returned containing loaded image.\n \"\"\"\n if isinstance(results['img_or_path'], str):\n results['image_file'] = results['img_or_path']\n img = mmcv.imread(results['img_or_path'], self.color_type,\n self.channel_order)\n elif isinstance(results['img_or_path'], np.ndarray):\n results['image_file'] = ''\n if self.color_type == 'color' and self.channel_order == 'rgb':\n img = cv2.cvtColor(results['img_or_path'], cv2.COLOR_BGR2RGB)\n else:\n img = results['img_or_path']\n else:\n raise TypeError('\"img_or_path\" must be a numpy array or a str or '\n 'a pathlib.Path object')\n\n results['img'] = img\n return results\n\n\ndef _inference_single_pose_model(model,\n img_or_path,\n bboxes,\n dataset='TopDownCocoDataset',\n dataset_info=None,\n return_heatmap=False):\n \"\"\"Inference human bounding boxes.\n\n num_bboxes: N\n num_keypoints: K\n\n Args:\n model (nn.Module): The loaded pose model.\n img_or_path (str | np.ndarray): Image filename or loaded image.\n bboxes (list | np.ndarray): All bounding boxes (with scores),\n shaped (N, 4) or (N, 5). (left, top, width, height, [score])\n where N is number of bounding boxes.\n dataset (str): Dataset name. Deprecated.\n dataset_info (DatasetInfo): A class containing all dataset info.\n outputs (list[str] | tuple[str]): Names of layers whose output is\n to be returned, default: None\n\n Returns:\n ndarray[NxKx3]: Predicted pose x, y, score.\n heatmap[N, K, H, W]: Model output heatmap.\n \"\"\"\n\n cfg = model.cfg\n device = next(model.parameters()).device\n\n # build the data pipeline\n channel_order = cfg.test_pipeline[0].get('channel_order', 'rgb')\n test_pipeline = [LoadImage(channel_order=channel_order)\n ] + cfg.test_pipeline[1:]\n test_pipeline = Compose(test_pipeline)\n\n assert len(bboxes[0]) in [4, 5]\n\n if dataset_info is not None:\n dataset_name = dataset_info.dataset_name\n flip_pairs = dataset_info.flip_pairs\n else:\n warnings.warn(\n 'dataset is deprecated.'\n 'Please set `dataset_info` in the config.'\n 'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',\n DeprecationWarning)\n # TODO: These will be removed in the later versions.\n if dataset in ('TopDownCocoDataset', 'TopDownOCHumanDataset',\n 'AnimalMacaqueDataset'):\n flip_pairs = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12],\n [13, 14], [15, 16]]\n elif dataset == 'TopDownCocoWholeBodyDataset':\n body = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12],\n [13, 14], [15, 16]]\n foot = [[17, 20], [18, 21], [19, 22]]\n\n face = [[23, 39], [24, 38], [25, 37], [26, 36], [27, 35], [28, 34],\n [29, 33], [30, 32], [40, 49], [41, 48], [42, 47], [43, 46],\n [44, 45], [54, 58], [55, 57], [59, 68], [60, 67], [61, 66],\n [62, 65], [63, 70], [64, 69], [71, 77], [72, 76], [73, 75],\n [78, 82], [79, 81], [83, 87], [84, 86], [88, 90]]\n\n hand = [[91, 112], [92, 113], [93, 114], [94, 115], [95, 116],\n [96, 117], [97, 118], [98, 119], [99, 120], [100, 121],\n [101, 122], [102, 123], [103, 124], [104, 125], [105, 126],\n [106, 127], [107, 128], [108, 129], [109, 130], [110, 131],\n [111, 132]]\n flip_pairs = body + foot + face + hand\n elif dataset == 'TopDownAicDataset':\n flip_pairs = [[0, 3], [1, 4], [2, 5], [6, 9], [7, 10], [8, 11]]\n elif dataset == 'TopDownMpiiDataset':\n flip_pairs = [[0, 5], [1, 4], [2, 3], [10, 15], [11, 14], [12, 13]]\n elif dataset == 'TopDownMpiiTrbDataset':\n flip_pairs = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11],\n [14, 15], [16, 22], [28, 34], [17, 23], [29, 35],\n [18, 24], [30, 36], [19, 25], [31, 37], [20, 26],\n [32, 38], [21, 27], [33, 39]]\n elif dataset in ('OneHand10KDataset', 'FreiHandDataset',\n 'PanopticDataset', 'InterHand2DDataset'):\n flip_pairs = []\n elif dataset in 'Face300WDataset':\n flip_pairs = [[0, 16], [1, 15], [2, 14], [3, 13], [4, 12], [5, 11],\n [6, 10], [7, 9], [17, 26], [18, 25], [19, 24],\n [20, 23], [21, 22], [31, 35], [32, 34], [36, 45],\n [37, 44], [38, 43], [39, 42], [40, 47], [41, 46],\n [48, 54], [49, 53], [50, 52], [61, 63], [60, 64],\n [67, 65], [58, 56], [59, 55]]\n\n elif dataset in 'FaceAFLWDataset':\n flip_pairs = [[0, 5], [1, 4], [2, 3], [6, 11], [7, 10], [8, 9],\n [12, 14], [15, 17]]\n\n elif dataset in 'FaceCOFWDataset':\n flip_pairs = [[0, 1], [4, 6], [2, 3], [5, 7], [8, 9], [10, 11],\n [12, 14], [16, 17], [13, 15], [18, 19], [22, 23]]\n\n elif dataset in 'FaceWFLWDataset':\n flip_pairs = [[0, 32], [1, 31], [2, 30], [3, 29], [4, 28], [5, 27],\n [6, 26], [7, 25], [8, 24], [9, 23], [10, 22],\n [11, 21], [12, 20], [13, 19], [14, 18], [15, 17],\n [33, 46], [34, 45], [35, 44], [36, 43], [37, 42],\n [38, 50], [39, 49], [40, 48], [41, 47], [60, 72],\n [61, 71], [62, 70], [63, 69], [64, 68], [65, 75],\n [66, 74], [67, 73], [55, 59], [56, 58], [76, 82],\n [77, 81], [78, 80], [87, 83], [86, 84], [88, 92],\n [89, 91], [95, 93], [96, 97]]\n\n elif dataset in 'AnimalFlyDataset':\n flip_pairs = [[1, 2], [6, 18], [7, 19], [8, 20], [9, 21], [10, 22],\n [11, 23], [12, 24], [13, 25], [14, 26], [15, 27],\n [16, 28], [17, 29], [30, 31]]\n elif dataset in 'AnimalHorse10Dataset':\n flip_pairs = []\n\n elif dataset in 'AnimalLocustDataset':\n flip_pairs = [[5, 20], [6, 21], [7, 22], [8, 23], [9, 24],\n [10, 25], [11, 26], [12, 27], [13, 28], [14, 29],\n [15, 30], [16, 31], [17, 32], [18, 33], [19, 34]]\n\n elif dataset in 'AnimalZebraDataset':\n flip_pairs = [[3, 4], [5, 6]]\n\n elif dataset in 'AnimalPoseDataset':\n flip_pairs = [[0, 1], [2, 3], [8, 9], [10, 11], [12, 13], [14, 15],\n [16, 17], [18, 19]]\n else:\n raise NotImplementedError()\n dataset_name = dataset\n\n batch_data = []\n for bbox in bboxes:\n center, scale = _box2cs(cfg, bbox)\n\n # prepare data\n data = {\n 'img_or_path':\n img_or_path,\n 'center':\n center,\n 'scale':\n scale,\n 'bbox_score':\n bbox[4] if len(bbox) == 5 else 1,\n 'bbox_id':\n 0, # need to be assigned if batch_size > 1\n 'dataset':\n dataset_name,\n 'joints_3d':\n np.zeros((cfg.data_cfg.num_joints, 3), dtype=np.float32),\n 'joints_3d_visible':\n np.zeros((cfg.data_cfg.num_joints, 3), dtype=np.float32),\n 'rotation':\n 0,\n 'ann_info': {\n 'image_size': np.array(cfg.data_cfg['image_size']),\n 'num_joints': cfg.data_cfg['num_joints'],\n 'flip_pairs': flip_pairs\n }\n }\n data = test_pipeline(data)\n batch_data.append(data)\n\n batch_data = collate(batch_data, samples_per_gpu=1)\n\n if next(model.parameters()).is_cuda:\n # scatter not work so just move image to cuda device\n batch_data['img'] = batch_data['img'].to(device)\n # get all img_metas of each bounding box\n batch_data['img_metas'] = [\n img_metas[0] for img_metas in batch_data['img_metas'].data\n ]\n\n # forward the model\n with torch.no_grad():\n result = model(\n img=batch_data['img'],\n img_metas=batch_data['img_metas'],\n return_loss=False,\n return_heatmap=return_heatmap)\n\n return result['preds'], result['output_heatmap']\n\n\ndef inference_top_down_pose_model(model,\n img_or_path,\n person_results=None,\n bbox_thr=None,\n format='xywh',\n dataset='TopDownCocoDataset',\n dataset_info=None,\n return_heatmap=False,\n outputs=None):\n \"\"\"Inference a single image with a list of person bounding boxes.\n\n num_people: P\n num_keypoints: K\n bbox height: H\n bbox width: W\n\n Args:\n model (nn.Module): The loaded pose model.\n img_or_path (str| np.ndarray): Image filename or loaded image.\n person_results (List(dict), optional): a list of detected persons that\n contains following items:\n - 'bbox' and/or 'track_id'.\n - 'bbox' (4, ) or (5, ): The person bounding box, which contains\n 4 box coordinates (and score).\n - 'track_id' (int): The unique id for each human instance.\n If not provided, a dummy person result with a bbox covering the\n entire image will be used. Default: None.\n bbox_thr: Threshold for bounding boxes. Only bboxes with higher scores\n will be fed into the pose detector. If bbox_thr is None, ignore it.\n format: bbox format ('xyxy' | 'xywh'). Default: 'xywh'.\n 'xyxy' means (left, top, right, bottom),\n 'xywh' means (left, top, width, height).\n dataset (str): Dataset name, e.g. 'TopDownCocoDataset'.\n It is deprecated. Please use dataset_info instead.\n dataset_info (DatasetInfo): A class containing all dataset info.\n return_heatmap (bool) : Flag to return heatmap, default: False\n outputs (list(str) | tuple(str)) : Names of layers whose outputs\n need to be returned, default: None\n\n Returns:\n list[dict]: The bbox & pose info,\n Each item in the list is a dictionary,\n containing the bbox: (left, top, right, bottom, [score])\n and the pose (ndarray[Kx3]): x, y, score\n list[dict[np.ndarray[N, K, H, W] | torch.tensor[N, K, H, W]]]:\n Output feature maps from layers specified in `outputs`.\n Includes 'heatmap' if `return_heatmap` is True.\n \"\"\"\n # get dataset info\n if (dataset_info is None and hasattr(model, 'cfg')\n and 'dataset_info' in model.cfg):\n dataset_info = DatasetInfo(model.cfg.dataset_info)\n if dataset_info is None:\n warnings.warn(\n 'dataset is deprecated.'\n 'Please set `dataset_info` in the config.'\n 'Check https://github.com/open-mmlab/mmpose/pull/663'\n ' for details.', DeprecationWarning)\n\n # only two kinds of bbox format is supported.\n assert format in ['xyxy', 'xywh']\n\n pose_results = []\n returned_outputs = []\n\n if person_results is None:\n # create dummy person results\n if isinstance(img_or_path, str):\n width, height = Image.open(img_or_path).size\n else:\n height, width = img_or_path.shape[:2]\n person_results = [{'bbox': np.array([0, 0, width, height])}]\n\n if len(person_results) == 0:\n return pose_results, returned_outputs\n\n # Change for-loop preprocess each bbox to preprocess all bboxes at once.\n bboxes = np.array([box['bbox'] for box in person_results])\n\n # Select bboxes by score threshold\n if bbox_thr is not None:\n assert bboxes.shape[1] == 5\n valid_idx = np.where(bboxes[:, 4] > bbox_thr)[0]\n bboxes = bboxes[valid_idx]\n person_results = [person_results[i] for i in valid_idx]\n\n if format == 'xyxy':\n bboxes_xyxy = bboxes\n bboxes_xywh = _xyxy2xywh(bboxes)\n else:\n # format is already 'xywh'\n bboxes_xywh = bboxes\n bboxes_xyxy = _xywh2xyxy(bboxes)\n\n # if bbox_thr remove all bounding box\n if len(bboxes_xywh) == 0:\n return [], []\n\n with OutputHook(model, outputs=outputs, as_tensor=False) as h:\n # poses is results['pred'] # N x 17x 3\n poses, heatmap = _inference_single_pose_model(\n model,\n img_or_path,\n bboxes_xywh,\n dataset=dataset,\n dataset_info=dataset_info,\n return_heatmap=return_heatmap)\n\n if return_heatmap:\n h.layer_outputs['heatmap'] = heatmap\n\n returned_outputs.append(h.layer_outputs)\n\n assert len(poses) == len(person_results), print(\n len(poses), len(person_results), len(bboxes_xyxy))\n for pose, person_result, bbox_xyxy in zip(poses, person_results,\n bboxes_xyxy):\n pose_result = person_result.copy()\n pose_result['keypoints'] = pose\n pose_result['bbox'] = bbox_xyxy\n pose_results.append(pose_result)\n\n return pose_results, returned_outputs\n\n\ndef inference_bottom_up_pose_model(model,\n img_or_path,\n dataset='BottomUpCocoDataset',\n dataset_info=None,\n pose_nms_thr=0.9,\n return_heatmap=False,\n outputs=None):\n \"\"\"Inference a single image.\n\n num_people: P\n num_keypoints: K\n bbox height: H\n bbox width: W\n\n Args:\n model (nn.Module): The loaded pose model.\n img_or_path (str| np.ndarray): Image filename or loaded image.\n dataset (str): Dataset name, e.g. 'BottomUpCocoDataset'.\n It is deprecated. Please use dataset_info instead.\n dataset_info (DatasetInfo): A class containing all dataset info.\n pose_nms_thr (float): retain oks overlap < pose_nms_thr, default: 0.9.\n return_heatmap (bool) : Flag to return heatmap, default: False.\n outputs (list(str) | tuple(str)) : Names of layers whose outputs\n need to be returned, default: None.\n\n Returns:\n list[ndarray]: The predicted pose info.\n The length of the list is the number of people (P).\n Each item in the list is a ndarray, containing each person's\n pose (ndarray[Kx3]): x, y, score.\n list[dict[np.ndarray[N, K, H, W] | torch.tensor[N, K, H, W]]]:\n Output feature maps from layers specified in `outputs`.\n Includes 'heatmap' if `return_heatmap` is True.\n \"\"\"\n # get dataset info\n if (dataset_info is None and hasattr(model, 'cfg')\n and 'dataset_info' in model.cfg):\n dataset_info = DatasetInfo(model.cfg.dataset_info)\n\n if dataset_info is not None:\n dataset_name = dataset_info.dataset_name\n flip_index = dataset_info.flip_index\n else:\n warnings.warn(\n 'dataset is deprecated.'\n 'Please set `dataset_info` in the config.'\n 'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',\n DeprecationWarning)\n assert (dataset == 'BottomUpCocoDataset')\n dataset_name = dataset\n flip_index = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]\n\n pose_results = []\n returned_outputs = []\n\n cfg = model.cfg\n device = next(model.parameters()).device\n\n # build the data pipeline\n channel_order = cfg.test_pipeline[0].get('channel_order', 'rgb')\n test_pipeline = [LoadImage(channel_order=channel_order)\n ] + cfg.test_pipeline[1:]\n test_pipeline = Compose(test_pipeline)\n\n # prepare data\n data = {\n 'img_or_path': img_or_path,\n 'dataset': dataset_name,\n 'ann_info': {\n 'image_size': np.array(cfg.data_cfg['image_size']),\n 'num_joints': cfg.data_cfg['num_joints'],\n 'flip_index': flip_index,\n }\n }\n\n data = test_pipeline(data)\n data = collate([data], samples_per_gpu=1)\n if next(model.parameters()).is_cuda:\n # scatter to specified GPU\n data = scatter(data, [device])[0]\n else:\n # just get the actual data from DataContainer\n data['img_metas'] = data['img_metas'].data[0]\n\n with OutputHook(model, outputs=outputs, as_tensor=False) as h:\n # forward the model\n with torch.no_grad():\n result = model(\n img=data['img'],\n img_metas=data['img_metas'],\n return_loss=False,\n return_heatmap=return_heatmap)\n\n if return_heatmap:\n h.layer_outputs['heatmap'] = result['output_heatmap']\n\n returned_outputs.append(h.layer_outputs)\n\n for idx, pred in enumerate(result['preds']):\n area = (np.max(pred[:, 0]) - np.min(pred[:, 0])) * (\n np.max(pred[:, 1]) - np.min(pred[:, 1]))\n pose_results.append({\n 'keypoints': pred[:, :3],\n 'score': result['scores'][idx],\n 'area': area,\n })\n\n # pose nms\n keep = oks_nms(pose_results, pose_nms_thr, sigmas=None)\n pose_results = [pose_results[_keep] for _keep in keep]\n\n return pose_results, returned_outputs\n\n\ndef vis_pose_result(model,\n img,\n result,\n radius=4,\n thickness=1,\n kpt_score_thr=0.3,\n bbox_color='green',\n dataset='TopDownCocoDataset',\n dataset_info=None,\n show=False,\n out_file=None):\n \"\"\"Visualize the detection results on the image.\n\n Args:\n model (nn.Module): The loaded detector.\n img (str | np.ndarray): Image filename or loaded image.\n result (list[dict]): The results to draw over `img`\n (bbox_result, pose_result).\n radius (int): Radius of circles.\n thickness (int): Thickness of lines.\n kpt_score_thr (float): The threshold to visualize the keypoints.\n skeleton (list[tuple()]): Default None.\n show (bool): Whether to show the image. Default True.\n out_file (str|None): The filename of the output visualization image.\n \"\"\"\n\n # get dataset info\n if (dataset_info is None and hasattr(model, 'cfg')\n and 'dataset_info' in model.cfg):\n dataset_info = DatasetInfo(model.cfg.dataset_info)\n\n if dataset_info is not None:\n skeleton = dataset_info.skeleton\n pose_kpt_color = dataset_info.pose_kpt_color\n pose_link_color = dataset_info.pose_link_color\n else:\n warnings.warn(\n 'dataset is deprecated.'\n 'Please set `dataset_info` in the config.'\n 'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',\n DeprecationWarning)\n # TODO: These will be removed in the later versions.\n palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102],\n [230, 230, 0], [255, 153, 255], [153, 204, 255],\n [255, 102, 255], [255, 51, 255], [102, 178, 255],\n [51, 153, 255], [255, 153, 153], [255, 102, 102],\n [255, 51, 51], [153, 255, 153], [102, 255, 102],\n [51, 255, 51], [0, 255, 0], [0, 0, 255],\n [255, 0, 0], [255, 255, 255]])\n\n if dataset in ('TopDownCocoDataset', 'BottomUpCocoDataset',\n 'TopDownOCHumanDataset', 'AnimalMacaqueDataset'):\n # show the results\n skeleton = [[15, 13], [13, 11], [16, 14], [14, 12], [11, 12],\n [5, 11], [6, 12], [5, 6], [5, 7], [6, 8], [7, 9],\n [8, 10], [1, 2], [0, 1], [0, 2], [1, 3], [2, 4],\n [3, 5], [4, 6]]\n\n pose_link_color = palette[[\n 0, 0, 0, 0, 7, 7, 7, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 16\n ]]\n pose_kpt_color = palette[[\n 16, 16, 16, 16, 16, 9, 9, 9, 9, 9, 9, 0, 0, 0, 0, 0, 0\n ]]\n\n elif dataset == 'TopDownCocoWholeBodyDataset':\n # show the results\n skeleton = [[15, 13], [13, 11], [16, 14], [14, 12], [11, 12],\n [5, 11], [6, 12], [5, 6], [5, 7], [6, 8], [7, 9],\n [8, 10], [1, 2], [0, 1], [0, 2],\n [1, 3], [2, 4], [3, 5], [4, 6], [15, 17], [15, 18],\n [15, 19], [16, 20], [16, 21], [16, 22], [91, 92],\n [92, 93], [93, 94], [94, 95], [91, 96], [96, 97],\n [97, 98], [98, 99], [91, 100], [100, 101], [101, 102],\n [102, 103], [91, 104], [104, 105], [105, 106],\n [106, 107], [91, 108], [108, 109], [109, 110],\n [110, 111], [112, 113], [113, 114], [114, 115],\n [115, 116], [112, 117], [117, 118], [118, 119],\n [119, 120], [112, 121], [121, 122], [122, 123],\n [123, 124], [112, 125], [125, 126], [126, 127],\n [127, 128], [112, 129], [129, 130], [130, 131],\n [131, 132]]\n\n pose_link_color = palette[[\n 0, 0, 0, 0, 7, 7, 7, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 16\n ] + [16, 16, 16, 16, 16, 16] + [\n 0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16,\n 16\n ] + [\n 0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16,\n 16\n ]]\n pose_kpt_color = palette[\n [16, 16, 16, 16, 16, 9, 9, 9, 9, 9, 9, 0, 0, 0, 0, 0, 0] +\n [0, 0, 0, 0, 0, 0] + [19] * (68 + 42)]\n\n elif dataset == 'TopDownAicDataset':\n skeleton = [[2, 1], [1, 0], [0, 13], [13, 3], [3, 4], [4, 5],\n [8, 7], [7, 6], [6, 9], [9, 10], [10, 11], [12, 13],\n [0, 6], [3, 9]]\n\n pose_link_color = palette[[\n 9, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 0, 7, 7\n ]]\n pose_kpt_color = palette[[\n 9, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 0, 0\n ]]\n\n elif dataset == 'TopDownMpiiDataset':\n skeleton = [[0, 1], [1, 2], [2, 6], [6, 3], [3, 4], [4, 5], [6, 7],\n [7, 8], [8, 9], [8, 12], [12, 11], [11, 10], [8, 13],\n [13, 14], [14, 15]]\n\n pose_link_color = palette[[\n 16, 16, 16, 16, 16, 16, 7, 7, 0, 9, 9, 9, 9, 9, 9\n ]]\n pose_kpt_color = palette[[\n 16, 16, 16, 16, 16, 16, 7, 7, 0, 0, 9, 9, 9, 9, 9, 9\n ]]\n\n elif dataset == 'TopDownMpiiTrbDataset':\n skeleton = [[12, 13], [13, 0], [13, 1], [0, 2], [1, 3], [2, 4],\n [3, 5], [0, 6], [1, 7], [6, 7], [6, 8], [7,\n 9], [8, 10],\n [9, 11], [14, 15], [16, 17], [18, 19], [20, 21],\n [22, 23], [24, 25], [26, 27], [28, 29], [30, 31],\n [32, 33], [34, 35], [36, 37], [38, 39]]\n\n pose_link_color = palette[[16] * 14 + [19] * 13]\n pose_kpt_color = palette[[16] * 14 + [0] * 26]\n\n elif dataset in ('OneHand10KDataset', 'FreiHandDataset',\n 'PanopticDataset'):\n skeleton = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7],\n [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13],\n [13, 14], [14, 15], [15, 16], [0, 17], [17, 18],\n [18, 19], [19, 20]]\n\n pose_link_color = palette[[\n 0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16,\n 16\n ]]\n pose_kpt_color = palette[[\n 0, 0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16,\n 16, 16\n ]]\n\n elif dataset == 'InterHand2DDataset':\n skeleton = [[0, 1], [1, 2], [2, 3], [4, 5], [5, 6], [6, 7], [8, 9],\n [9, 10], [10, 11], [12, 13], [13, 14], [14, 15],\n [16, 17], [17, 18], [18, 19], [3, 20], [7, 20],\n [11, 20], [15, 20], [19, 20]]\n\n pose_link_color = palette[[\n 0, 0, 0, 4, 4, 4, 8, 8, 8, 12, 12, 12, 16, 16, 16, 0, 4, 8, 12,\n 16\n ]]\n pose_kpt_color = palette[[\n 0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16,\n 16, 0\n ]]\n\n elif dataset == 'Face300WDataset':\n # show the results\n skeleton = []\n\n pose_link_color = palette[[]]\n pose_kpt_color = palette[[19] * 68]\n kpt_score_thr = 0\n\n elif dataset == 'FaceAFLWDataset':\n # show the results\n skeleton = []\n\n pose_link_color = palette[[]]\n pose_kpt_color = palette[[19] * 19]\n kpt_score_thr = 0\n\n elif dataset == 'FaceCOFWDataset':\n # show the results\n skeleton = []\n\n pose_link_color = palette[[]]\n pose_kpt_color = palette[[19] * 29]\n kpt_score_thr = 0\n\n elif dataset == 'FaceWFLWDataset':\n # show the results\n skeleton = []\n\n pose_link_color = palette[[]]\n pose_kpt_color = palette[[19] * 98]\n kpt_score_thr = 0\n\n elif dataset == 'AnimalHorse10Dataset':\n skeleton = [[0, 1], [1, 12], [12, 16], [16, 21], [21, 17],\n [17, 11], [11, 10], [10, 8], [8, 9], [9, 12], [2, 3],\n [3, 4], [5, 6], [6, 7], [13, 14], [14, 15], [18, 19],\n [19, 20]]\n\n pose_link_color = palette[[4] * 10 + [6] * 2 + [6] * 2 + [7] * 2 +\n [7] * 2]\n pose_kpt_color = palette[[\n 4, 4, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 4, 7, 7, 7, 4, 4, 7, 7, 7,\n 4\n ]]\n\n elif dataset == 'AnimalFlyDataset':\n skeleton = [[1, 0], [2, 0], [3, 0], [4, 3], [5, 4], [7, 6], [8, 7],\n [9, 8], [11, 10], [12, 11], [13, 12], [15, 14],\n [16, 15], [17, 16], [19, 18], [20, 19], [21, 20],\n [23, 22], [24, 23], [25, 24], [27, 26], [28, 27],\n [29, 28], [30, 3], [31, 3]]\n\n pose_link_color = palette[[0] * 25]\n pose_kpt_color = palette[[0] * 32]\n\n elif dataset == 'AnimalLocustDataset':\n skeleton = [[1, 0], [2, 1], [3, 2], [4, 3], [6, 5], [7, 6], [9, 8],\n [10, 9], [11, 10], [13, 12], [14, 13], [15, 14],\n [17, 16], [18, 17], [19, 18], [21, 20], [22, 21],\n [24, 23], [25, 24], [26, 25], [28, 27], [29, 28],\n [30, 29], [32, 31], [33, 32], [34, 33]]\n\n pose_link_color = palette[[0] * 26]\n pose_kpt_color = palette[[0] * 35]\n\n elif dataset == 'AnimalZebraDataset':\n skeleton = [[1, 0], [2, 1], [3, 2], [4, 2], [5, 7], [6, 7], [7, 2],\n [8, 7]]\n\n pose_link_color = palette[[0] * 8]\n pose_kpt_color = palette[[0] * 9]\n\n elif dataset in 'AnimalPoseDataset':\n skeleton = [[0, 1], [0, 2], [1, 3], [0, 4], [1, 4], [4, 5], [5, 7],\n [6, 7], [5, 8], [8, 12], [12, 16], [5, 9], [9, 13],\n [13, 17], [6, 10], [10, 14], [14, 18], [6, 11],\n [11, 15], [15, 19]]\n\n pose_link_color = palette[[0] * 20]\n pose_kpt_color = palette[[0] * 20]\n else:\n NotImplementedError()\n\n if hasattr(model, 'module'):\n model = model.module\n\n img = model.show_result(\n img,\n result,\n skeleton,\n radius=radius,\n thickness=thickness,\n pose_kpt_color=pose_kpt_color,\n pose_link_color=pose_link_color,\n kpt_score_thr=kpt_score_thr,\n bbox_color=bbox_color,\n show=show,\n out_file=out_file)\n\n return img\n\n\ndef process_mmdet_results(mmdet_results, cat_id=1):\n \"\"\"Process mmdet results, and return a list of bboxes.\n\n Args:\n mmdet_results (list|tuple): mmdet results.\n cat_id (int): category id (default: 1 for human)\n Returns:\n person_results (list): a list of detected bounding boxes\n \"\"\"\n if isinstance(mmdet_results, tuple):\n det_results = mmdet_results[0]\n else:\n det_results = mmdet_results\n\n bboxes = det_results[cat_id - 1]\n\n person_results = []\n for bbox in bboxes:\n person = {}\n person['bbox'] = bbox\n person_results.append(person)\n\n return person_results\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\nfrom torch.nn.modules import GroupNorm\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmpose.models.backbones import MobileNetV3\nfrom mmpose.models.backbones.utils import InvertedResidual\n\n\ndef is_norm(modules):\n \"\"\"Check if is one of the norms.\"\"\"\n if isinstance(modules, (GroupNorm, _BatchNorm)):\n return True\n return False\n\n\ndef check_norm_state(modules, train_state):\n \"\"\"Check if norm layer is in correct train state.\"\"\"\n for mod in modules:\n if isinstance(mod, _BatchNorm):\n if mod.training != train_state:\n return False\n return True\n\n\ndef test_mobilenetv3_backbone():\n with pytest.raises(TypeError):\n # pretrained must be a string path\n model = MobileNetV3()\n model.init_weights(pretrained=0)\n\n with pytest.raises(AssertionError):\n # arch must in [small, big]\n MobileNetV3(arch='others')\n\n with pytest.raises(ValueError):\n # frozen_stages must less than 12 when arch is small\n MobileNetV3(arch='small', frozen_stages=12)\n\n with pytest.raises(ValueError):\n # frozen_stages must less than 16 when arch is big\n MobileNetV3(arch='big', frozen_stages=16)\n\n with pytest.raises(ValueError):\n # max out_indices must less than 11 when arch is small\n MobileNetV3(arch='small', out_indices=(11, ))\n\n with pytest.raises(ValueError):\n # max out_indices must less than 15 when arch is big\n MobileNetV3(arch='big', out_indices=(15, ))\n\n # Test MobileNetv3\n model = MobileNetV3()\n model.init_weights()\n model.train()\n\n # Test MobileNetv3 with first stage frozen\n frozen_stages = 1\n model = MobileNetV3(frozen_stages=frozen_stages)\n model.init_weights()\n model.train()\n for param in model.conv1.parameters():\n assert param.requires_grad is False\n for i in range(1, frozen_stages + 1):\n layer = getattr(model, f'layer{i}')\n for mod in layer.modules():\n if isinstance(mod, _BatchNorm):\n assert mod.training is False\n for param in layer.parameters():\n assert param.requires_grad is False\n\n # Test MobileNetv3 with norm eval\n model = MobileNetV3(norm_eval=True, out_indices=range(0, 11))\n model.init_weights()\n model.train()\n assert check_norm_state(model.modules(), False)\n\n # Test MobileNetv3 forward with small arch\n model = MobileNetV3(out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10))\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 11\n assert feat[0].shape == torch.Size([1, 16, 56, 56])\n assert feat[1].shape == torch.Size([1, 24, 28, 28])\n assert feat[2].shape == torch.Size([1, 24, 28, 28])\n assert feat[3].shape == torch.Size([1, 40, 14, 14])\n assert feat[4].shape == torch.Size([1, 40, 14, 14])\n assert feat[5].shape == torch.Size([1, 40, 14, 14])\n assert feat[6].shape == torch.Size([1, 48, 14, 14])\n assert feat[7].shape == torch.Size([1, 48, 14, 14])\n assert feat[8].shape == torch.Size([1, 96, 7, 7])\n assert feat[9].shape == torch.Size([1, 96, 7, 7])\n assert feat[10].shape == torch.Size([1, 96, 7, 7])\n\n # Test MobileNetv3 forward with small arch and GroupNorm\n model = MobileNetV3(\n out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10),\n norm_cfg=dict(type='GN', num_groups=2, requires_grad=True))\n for m in model.modules():\n if is_norm(m):\n assert isinstance(m, GroupNorm)\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 11\n assert feat[0].shape == torch.Size([1, 16, 56, 56])\n assert feat[1].shape == torch.Size([1, 24, 28, 28])\n assert feat[2].shape == torch.Size([1, 24, 28, 28])\n assert feat[3].shape == torch.Size([1, 40, 14, 14])\n assert feat[4].shape == torch.Size([1, 40, 14, 14])\n assert feat[5].shape == torch.Size([1, 40, 14, 14])\n assert feat[6].shape == torch.Size([1, 48, 14, 14])\n assert feat[7].shape == torch.Size([1, 48, 14, 14])\n assert feat[8].shape == torch.Size([1, 96, 7, 7])\n assert feat[9].shape == torch.Size([1, 96, 7, 7])\n assert feat[10].shape == torch.Size([1, 96, 7, 7])\n\n # Test MobileNetv3 forward with big arch\n model = MobileNetV3(\n arch='big',\n out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 15\n assert feat[0].shape == torch.Size([1, 16, 112, 112])\n assert feat[1].shape == torch.Size([1, 24, 56, 56])\n assert feat[2].shape == torch.Size([1, 24, 56, 56])\n assert feat[3].shape == torch.Size([1, 40, 28, 28])\n assert feat[4].shape == torch.Size([1, 40, 28, 28])\n assert feat[5].shape == torch.Size([1, 40, 28, 28])\n assert feat[6].shape == torch.Size([1, 80, 14, 14])\n assert feat[7].shape == torch.Size([1, 80, 14, 14])\n assert feat[8].shape == torch.Size([1, 80, 14, 14])\n assert feat[9].shape == torch.Size([1, 80, 14, 14])\n assert feat[10].shape == torch.Size([1, 112, 14, 14])\n assert feat[11].shape == torch.Size([1, 112, 14, 14])\n assert feat[12].shape == torch.Size([1, 160, 14, 14])\n assert feat[13].shape == torch.Size([1, 160, 7, 7])\n assert feat[14].shape == torch.Size([1, 160, 7, 7])\n\n # Test MobileNetv3 forward with big arch\n model = MobileNetV3(arch='big', out_indices=(0, ))\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert feat.shape == torch.Size([1, 16, 112, 112])\n\n # Test MobileNetv3 with checkpoint forward\n model = MobileNetV3(with_cp=True)\n for m in model.modules():\n if isinstance(m, InvertedResidual):\n assert m.with_cp\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert feat.shape == torch.Size([1, 96, 7, 7])\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.ReLU" ], [ "numpy.load", "numpy.array", "numpy.zeros", "numpy.ones" ], [ "numpy.array", "numpy.random.random", "numpy.random.rand", "numpy.testing.assert_array_almost_equal" ], [ "torch.Size", "torch.randn", "torch.nn.Conv2d", "torch.zeros_like", "torch.nn.BatchNorm2d" ], [ "numpy.min", "numpy.max", "torch.no_grad", "numpy.array", "numpy.where", "numpy.zeros" ], [ "torch.randn", "torch.Size" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yutliu/betterSAT
[ "db7c1b323f26ec19533a4b19804cf2c8a52643e5", "fb983f43b12352f9ee6ae40b4e0954f6ba502fb8" ]
[ "videoanalyst/data/target/target_impl/utils/make_densebox_target.py", "main/test.py" ]
[ "# encoding: utf-8\nfrom typing import Dict, Tuple\n\nimport numpy as np\n\n\ndef make_densebox_target(gt_boxes: np.array, config: Dict) -> Tuple:\n \"\"\"\n Model training target generation function for densebox\n\n Arguments\n ---------\n gt_boxes : np.array\n ground truth bounding boxes with class, shape=(N, 5), order=(x0, y0, x1, y1, class)\n config: configuration of target making (old format)\n Keys\n ----\n x_size : int\n search image size\n score_size : int\n score feature map size\n total_stride : int\n total stride of backbone\n score_offset : int\n offset between the edge of score map and the border of the search image\n\n Returns\n -------\n Tuple\n cls_res_final : np.array\n class\n shape=(N, 1)\n ctr_res_final : np.array\n shape=(N, 1)\n gt_boxes_res_final : np.array\n shape=(N, 4)\n # previous format\n # shape=(N, 6), order=(class, center-ness, left_offset, top_offset, right_offset, bottom_offset)\n \"\"\"\n x_size = config[\"x_size\"]\n score_size = config[\"score_size\"]\n total_stride = config[\"total_stride\"]\n score_offset = config[\"score_offset\"]\n eps = 1e-5\n raw_height, raw_width = x_size, x_size\n\n if gt_boxes.shape[1] == 4:\n gt_boxes = np.concatenate(\n [gt_boxes, np.ones(\n (gt_boxes.shape[0], 1))], axis=1) # boxes_cnt x 5\n # l, t, r, b\n gt_boxes = np.concatenate([np.zeros((1, 5)), gt_boxes]) # boxes_cnt x 5\n gt_boxes_area = (np.abs(\n (gt_boxes[:, 2] - gt_boxes[:, 0]) * (gt_boxes[:, 3] - gt_boxes[:, 1])))\n gt_boxes = gt_boxes[np.argsort(gt_boxes_area)]\n boxes_cnt = len(gt_boxes)\n\n shift_x = np.arange(0, raw_width).reshape(-1, 1)\n shift_y = np.arange(0, raw_height).reshape(-1, 1)\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n\n off_l = (shift_x[:, :, np.newaxis, np.newaxis] -\n gt_boxes[np.newaxis, np.newaxis, :, 0, np.newaxis])\n off_t = (shift_y[:, :, np.newaxis, np.newaxis] -\n gt_boxes[np.newaxis, np.newaxis, :, 1, np.newaxis])\n off_r = -(shift_x[:, :, np.newaxis, np.newaxis] -\n gt_boxes[np.newaxis, np.newaxis, :, 2, np.newaxis])\n off_b = -(shift_y[:, :, np.newaxis, np.newaxis] -\n gt_boxes[np.newaxis, np.newaxis, :, 3, np.newaxis])\n\n center = ((np.minimum(off_l, off_r) * np.minimum(off_t, off_b)) /\n (np.maximum(off_l, off_r) * np.maximum(off_t, off_b) + eps))\n center = np.squeeze(np.sqrt(np.abs(center)))\n center[:, :, 0] = 0\n\n offset = np.concatenate([off_l, off_t, off_r, off_b],\n axis=3) # h x w x boxes_cnt * 4\n cls = gt_boxes[:, 4]\n\n cls_res_list = []\n ctr_res_list = []\n gt_boxes_res_list = []\n\n fm_height, fm_width = score_size, score_size\n\n fm_size_list = []\n fm_strides = [total_stride]\n fm_offsets = [score_offset]\n for fm_i in range(len(fm_strides)):\n fm_size_list.append([fm_height, fm_width])\n fm_height = int(np.ceil(fm_height / 2))\n fm_width = int(np.ceil(fm_width / 2))\n\n fm_size_list = fm_size_list[::-1]\n for fm_i, (stride, fm_offset) in enumerate(zip(fm_strides, fm_offsets)):\n fm_height = fm_size_list[fm_i][0]\n fm_width = fm_size_list[fm_i][1]\n\n shift_x = np.arange(0, fm_width)\n shift_y = np.arange(0, fm_height)\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n xy = np.vstack(\n (shift_y.ravel(), shift_x.ravel())).transpose() # (hxw) x 2\n # floor(stride / 2) + x * stride?\n off_xy = offset[fm_offset + xy[:, 0] * stride,\n fm_offset + xy[:, 1] * stride] # will reduce dim by 1\n # off_max_xy = off_xy.max(axis=2) # max of l,t,r,b\n off_valid = np.zeros((fm_height, fm_width, boxes_cnt))\n\n is_in_boxes = (off_xy > 0).all(axis=2)\n # is_in_layer = (off_max_xy <=\n # config.sep_win[fm_i]) & (off_max_xy >= config.sep_win[fm_i + 1])\n off_valid[\n xy[:, 0],\n xy[:,\n 1], :] = is_in_boxes #& is_in_layer # xy[:, 0], xy[:, 1] reduce dim by 1 to match is_in_boxes.shape & is_in_layer.shape\n off_valid[:, :, 0] = 0 # h x w x boxes_cnt\n\n hit_gt_ind = np.argmax(off_valid, axis=2) # h x w\n\n # gt_boxes\n gt_boxes_res = np.zeros((fm_height, fm_width, 4))\n gt_boxes_res[xy[:, 0],\n xy[:, 1]] = gt_boxes[hit_gt_ind[xy[:, 0], xy[:, 1]], :4]\n gt_boxes_res_list.append(gt_boxes_res.reshape(-1, 4))\n\n # cls\n cls_res = np.zeros((fm_height, fm_width))\n cls_res[xy[:, 0], xy[:, 1]] = cls[hit_gt_ind[xy[:, 0], xy[:, 1]]]\n cls_res_list.append(cls_res.reshape(-1))\n\n # center\n center_res = np.zeros((fm_height, fm_width))\n center_res[xy[:, 0], xy[:, 1]] = center[fm_offset +\n xy[:, 0] * stride, fm_offset +\n xy[:, 1] * stride,\n hit_gt_ind[xy[:, 0], xy[:, 1]]]\n ctr_res_list.append(center_res.reshape(-1))\n\n cls_res_final = np.concatenate(cls_res_list,\n axis=0)[:, np.newaxis].astype(np.float32)\n ctr_res_final = np.concatenate(ctr_res_list,\n axis=0)[:, np.newaxis].astype(np.float32)\n gt_boxes_res_final = np.concatenate(gt_boxes_res_list,\n axis=0).astype(np.float32)\n\n # choose pos and neg point\n # labels = np.empty((len(cls_res_final),), dtype=np.float32)\n # labels.fill(-1)\n #\n # pos_index= np.where(cls_res_final > 0)\n # neg_index = np.where(cls_res_final == 0)\n # if len(pos_index[0]) > config.rpn_pos_samples:\n # np.random.shuffle(pos_index[0])\n # selected_pos = pos_index[0][:config.rpn_pos_samples]\n # else:\n # selected_pos = pos_index[0]\n #\n # neg_num = config.rpn_total_samples - len(selected_pos)\n # np.random.shuffle(neg_index[0])\n # selected_neg = neg_index[0][:neg_num]\n #\n # labels[selected_pos] = 1\n # labels[selected_neg] = 0\n # labels = labels[:, np.newaxis]\n\n # return np.concatenate([cls_res_final, ctr_res_final, gt_boxes_res_final], axis=1)\n return cls_res_final, ctr_res_final, gt_boxes_res_final\n\n\nif __name__ == '__main__':\n # gt_boxes\n gt_boxes = np.asarray([[13, 25, 100, 140, 1]])\n resized_image = np.zeros((255, 255, 3))\n densebox_target(gt_boxes, resized_image.shape)\n", "# -*- coding: utf-8 -*-\nfrom paths import ROOT_PATH # isort:skip\n\nimport argparse\nimport os.path as osp\n\nfrom loguru import logger\n\nimport torch\n\nfrom videoanalyst.config.config import cfg as root_cfg\nfrom videoanalyst.config.config import specify_task\nfrom videoanalyst.engine.builder import build as tester_builder\nfrom videoanalyst.model import builder as model_builder\nfrom videoanalyst.pipeline import builder as pipeline_builder\nfrom videoanalyst.utils import complete_path_wt_root_in_cfg\n\n\ndef make_parser():\n parser = argparse.ArgumentParser(description='Test')\n parser.add_argument('-cfg',\n '--config',\n # default='/home/jaffe/PycharmProjects/state aware VOS/experiments/sat/test/sat_res50-davis17.yaml',\n default='experiments/sat/test/sat_res50-ALIVOS1.yaml',\n type=str,\n help='experiment configuration')\n\n return parser\n\n\ndef build_siamfcpp_tester(task_cfg):\n # build model\n model = model_builder.build(\"track\", task_cfg.model)\n # build pipeline\n pipeline = pipeline_builder.build(\"track\", task_cfg.pipeline, model)\n # build tester\n testers = tester_builder(\"track\", task_cfg.tester, \"tester\", pipeline)\n return testers\n\n\ndef build_sat_tester(task_cfg):\n # build model\n tracker_model = model_builder.build(\"track\", task_cfg.tracker_model)\n tracker = pipeline_builder.build(\"track\",\n task_cfg.tracker_pipeline,\n model=tracker_model)\n segmenter = model_builder.build('vos', task_cfg.segmenter)\n # build pipeline\n pipeline = pipeline_builder.build('vos',\n task_cfg.pipeline,\n segmenter=segmenter,\n tracker=tracker)\n # build tester\n testers = tester_builder('vos', task_cfg.tester, \"tester\", pipeline)\n return testers\n\n\nif __name__ == '__main__':\n # parsing\n parser = make_parser()\n parsed_args = parser.parse_args()\n\n # experiment config\n exp_cfg_path = osp.realpath(parsed_args.config)\n root_cfg.merge_from_file(exp_cfg_path)\n logger.info(\"Load experiment configuration at: %s\" % exp_cfg_path)\n\n # resolve config\n root_cfg = complete_path_wt_root_in_cfg(root_cfg, ROOT_PATH)\n root_cfg = root_cfg.test\n task, task_cfg = specify_task(root_cfg)\n task_cfg.freeze()\n\n torch.multiprocessing.set_start_method('spawn', force=True)\n\n if task == 'track':\n testers = build_siamfcpp_tester( )\n elif task == 'vos':\n testers = build_sat_tester(task_cfg)\n for tester in testers:\n tester.test()\n" ]
[ [ "numpy.minimum", "numpy.abs", "numpy.maximum", "numpy.asarray", "numpy.arange", "numpy.ones", "numpy.concatenate", "numpy.ceil", "numpy.argmax", "numpy.argsort", "numpy.meshgrid", "numpy.zeros" ], [ "torch.multiprocessing.set_start_method" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lhcorralo/image-utils
[ "87e8fb9c1a6763d0dfa7a441ac105ccb9b6cc065" ]
[ "image_utils/image_utils.py" ]
[ "import argparse\nimport os\nimport rasterio as rio\nimport numpy as np\nimport cv2 as cv\nimport importlib.resources as pkg_resources\nfrom . import resources\n\n\n\ndef get_castellon_image_path():\n path_ctx = pkg_resources.path(resources, 'castellon.tif')\n return path_ctx\n\n\ndef generate(base_name, count, output_folder):\n os.makedirs(output_folder, exist_ok=True)\n with get_castellon_image_path() as castellon_path:\n with rio.Env():\n with rio.open(castellon_path) as src:\n for i in range(count):\n name = os.path.join(output_folder, f'{base_name}_{i}.tif')\n print(f\"Generating {name}\")\n with rio.open(name, 'w', **src.profile) as dst:\n for i in range(src.count):\n data = src.read(i + 1)\n dst.write(data, i + 1)\n\n\ndef blur(images, output_folder):\n os.makedirs(output_folder, exist_ok=True)\n for image in images:\n _, file_name = os.path.split(image)\n file_name, extension = os.path.splitext(file_name)\n blur_file = os.path.join(output_folder, f'{file_name}.blur{extension}')\n kernel = np.ones((5, 5), np.float32) / 25\n with rio.Env():\n with rio.open(image) as src:\n with rio.open(blur_file, 'w', **src.profile) as dst:\n print(f\"blurring {blur_file}\")\n for i in range(src.count):\n data = src.read(i + 1)\n data = cv.filter2D(data, -1, kernel)\n data = data.astype(src.profile['dtype'])\n dst.write(data, i + 1)\n pass\n\n\ndef parse_args():\n import argparse\n\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest=\"action\", help='Actions available')\n\n generate_parser = subparsers.add_parser(\"generate\")\n generate_parser.add_argument(\"-c\", \"--count\", type=int, default=1, help=\"Number to images to export\")\n generate_parser.add_argument(\"-n\", \"--base-name\", default=\"image\", help=\"Base name for the generated images\")\n generate_parser.add_argument(\"-o\", \"--output-folder\")\n\n blur_parser = subparsers.add_parser(\"blur\")\n blur_parser.add_argument('-i', '--image', action='append', help='images to blur')\n blur_parser.add_argument(\"-o\", \"--output-folder\")\n\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n if args.action == 'generate':\n generate(args.base_name, args.count, args.output_folder)\n elif args.action == 'blur':\n blur(args.image, args.output_folder)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VictoriaLouiseS/improver
[ "a5c31be3430df429ae38e7c16e267fcbc2af1858" ]
[ "improver_tests/psychrometric_calculations/precip_phase_probability/test_PrecipPhaseProbability.py" ]
[ "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2020 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Unit tests for psychrometric_calculations PrecipPhaseProbability plugin.\"\"\"\n\nimport unittest\n\nimport iris\nimport numpy as np\nfrom cf_units import Unit\nfrom iris.tests import IrisTest\n\nfrom improver.nbhood.nbhood import GeneratePercentilesFromANeighbourhood\nfrom improver.psychrometric_calculations.precip_phase_probability import (\n PrecipPhaseProbability,\n)\nfrom improver.synthetic_data.set_up_test_cubes import set_up_variable_cube\n\n\nclass Test__init__(IrisTest):\n\n \"\"\"Test the init method.\"\"\"\n\n def test_basic(self):\n \"\"\"Test that the __init__ method configures the plugin as expected.\"\"\"\n\n plugin = PrecipPhaseProbability()\n self.assertTrue(\n plugin.percentile_plugin is GeneratePercentilesFromANeighbourhood\n )\n self.assertEqual(plugin._nbhood_shape, \"circular\")\n self.assertAlmostEqual(plugin.radius, 10000.0)\n\n\nclass Test_process(IrisTest):\n\n \"\"\"Test the PhaseChangeLevel processing works\"\"\"\n\n def setUp(self):\n \"\"\"Set up orography cube (as zeros) and falling_phase_level cube with\n multiple realizations designed to return snow, sleet and rain. The\n middle realization gives both not-snow and not-rain because both the\n 20th percentile is <= zero and the 80th percentile is >= zero.\"\"\"\n\n # cubes for testing have a grid-length of 2000m.\n self.plugin = PrecipPhaseProbability(radius=2100.0)\n self.mandatory_attributes = {\n \"title\": \"mandatory title\",\n \"source\": \"mandatory_source\",\n \"institution\": \"mandatory_institution\",\n }\n\n data = np.zeros((3, 3), dtype=np.float32)\n\n orog_cube = set_up_variable_cube(\n data,\n name=\"surface_altitude\",\n units=\"m\",\n spatial_grid=\"equalarea\",\n attributes=self.mandatory_attributes,\n )\n\n falling_level_data = np.array(\n [\n [[-1, -1, -1], [-1, -1, -1], [-1, -1, -1]],\n [[0, -1, 0], [0, 1, 0], [0, -1, 0]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n ],\n dtype=np.float32,\n )\n\n falling_level_cube = set_up_variable_cube(\n falling_level_data,\n units=\"m\",\n spatial_grid=\"equalarea\",\n name=\"altitude_of_snow_falling_level\",\n realizations=[0, 1, 2],\n attributes=self.mandatory_attributes,\n )\n\n self.cubes = iris.cube.CubeList([falling_level_cube, orog_cube])\n\n def test_prob_snow(self):\n \"\"\"Test that process returns a cube with the right name, units and\n values. In this instance the phase change is from snow to sleet.\"\"\"\n result = self.plugin.process(self.cubes)\n expected = np.zeros((3, 3, 3), dtype=np.float32)\n expected[0] = 1.0\n self.assertIsInstance(result, iris.cube.Cube)\n self.assertEqual(result.name(), \"probability_of_snow_at_surface\")\n self.assertEqual(result.units, Unit(\"1\"))\n self.assertDictEqual(result.attributes, self.mandatory_attributes)\n self.assertArrayAlmostEqual(result.data, expected)\n\n def test_prob_rain(self):\n \"\"\"Test that process returns a cube with the right name, units and\n values. In this instance the phase change is from sleet to rain.\"\"\"\n self.cubes[0].rename(\"altitude_of_rain_falling_level\")\n result = self.plugin.process(self.cubes)\n expected = np.zeros((3, 3, 3), dtype=np.float32)\n expected[2] = 1.0\n self.assertIsInstance(result, iris.cube.Cube)\n self.assertEqual(result.name(), \"probability_of_rain_at_surface\")\n self.assertEqual(result.units, Unit(\"1\"))\n self.assertArrayAlmostEqual(result.data, expected)\n\n def test_unit_conversion(self):\n \"\"\"Test that process returns the same as test_prob_rain when the\n orography cube units are in feet.\"\"\"\n self.cubes[1].units = Unit(\"feet\")\n self.cubes[0].rename(\"altitude_of_rain_falling_level\")\n result = self.plugin.process(self.cubes)\n expected = np.zeros((3, 3, 3), dtype=np.float32)\n expected[2] = 1.0\n self.assertIsInstance(result, iris.cube.Cube)\n self.assertEqual(result.name(), \"probability_of_rain_at_surface\")\n self.assertEqual(result.units, Unit(\"1\"))\n self.assertArrayAlmostEqual(result.data, expected)\n\n def test_unit_synonyms(self):\n \"\"\"Test that process returns the same as test_prob_rain when the\n orography cube units are \"metres\" (a synonym of \"m\").\"\"\"\n self.cubes[1].units = Unit(\"metres\")\n self.cubes[0].rename(\"altitude_of_rain_falling_level\")\n result = self.plugin.process(self.cubes)\n expected = np.zeros((3, 3, 3), dtype=np.float32)\n expected[2] = 1.0\n self.assertIsInstance(result, iris.cube.Cube)\n self.assertEqual(result.name(), \"probability_of_rain_at_surface\")\n self.assertEqual(result.units, Unit(\"1\"))\n self.assertArrayAlmostEqual(result.data, expected)\n\n def test_bad_phase_cube(self):\n \"\"\"Test that process raises an exception when the input phase cube is\n incorrectly named.\"\"\"\n self.cubes[0].rename(\"altitude_of_kittens\")\n msg = \"Could not extract a rain or snow falling-level cube from\"\n with self.assertRaisesRegex(ValueError, msg):\n self.plugin.process(self.cubes)\n\n def test_bad_orography_cube(self):\n \"\"\"Test that process raises an exception when the input orography\n cube is incorrectly named.\"\"\"\n self.cubes[1].rename(\"altitude_of_kittens\")\n msg = \"Could not extract surface_altitude cube from\"\n with self.assertRaisesRegex(ValueError, msg):\n self.plugin.process(self.cubes)\n\n def test_bad_units(self):\n \"\"\"Test that process raises an exception when the input cubes cannot\n be coerced into the same units.\"\"\"\n self.cubes[1].units = Unit(\"seconds\")\n msg = \"Unable to convert from \"\n with self.assertRaisesRegex(ValueError, msg):\n self.plugin.process(self.cubes)\n\n def test_spatial_mismatch(self):\n \"\"\"Test that process raises an exception when the input cubes have\n different spatial coordinates.\"\"\"\n self.cubes[1] = set_up_variable_cube(\n self.cubes[1].data,\n name=\"surface_altitude\",\n units=\"m\",\n spatial_grid=\"latlon\",\n attributes=self.mandatory_attributes,\n )\n msg = \"Spatial coords mismatch between\"\n with self.assertRaisesRegex(ValueError, msg):\n self.plugin.process(self.cubes)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DrMatters/ParlAI
[ "755b9dcb778deb5a82029d69ae3260579c6450f1" ]
[ "parlai/agents/rag/retrievers.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nRetrievers for RAG.\n\"\"\"\nfrom abc import ABC, abstractmethod\nimport copy\nimport csv\nimport gzip\nimport numpy as np\nimport os\nfrom parlai.core.message import Message\nimport torch\nimport torch.cuda\nimport torch.nn\nimport transformers\nfrom tqdm import tqdm\n\ntry:\n from transformers import BertTokenizerFast as BertTokenizer\nexcept ImportError:\n from transformers import BertTokenizer\nfrom typing import Tuple, List, Dict, Union, Optional, Any\nfrom typing_extensions import final\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom parlai.agents.tfidf_retriever.tfidf_retriever import TfidfRetrieverAgent\nfrom parlai.core.agents import create_agent, create_agent_from_model_file\nfrom parlai.core.build_data import modelzoo_path\nfrom parlai.core.dict import DictionaryAgent\nfrom parlai.core.loader import register_agent\nfrom parlai.core.opt import Opt\nfrom parlai.core.torch_generator_agent import TorchGeneratorAgent\nfrom parlai.core.torch_ranker_agent import TorchRankerAgent\nfrom parlai.tasks.wizard_of_internet.mutators import chunk_docs_in_message\nimport parlai.tasks.wizard_of_internet.constants as CONST\nimport parlai.utils.logging as logging\nfrom parlai.utils.torch import padded_tensor\nfrom parlai.utils.typing import TShared\nfrom parlai.utils.io import PathManager\n\nfrom parlai.agents.rag.dpr import DprQueryEncoder\nfrom parlai.agents.rag.polyfaiss import RagDropoutPolyWrapper\nfrom parlai.agents.rag.indexers import DenseHNSWFlatIndexer, indexer_factory\nfrom parlai.agents.rag.args import (\n RetrieverType,\n WOW_INDEX_PATH,\n WOW_PASSAGES_PATH,\n POLYENCODER_OPT_KEYS,\n TRANSFORMER_RANKER_BASE_OPT,\n WOW_COMPRESSED_INDEX_PATH,\n)\nfrom parlai.agents.rag.retrieve_api import SearchEngineRetriever\n\n\ndef load_passage_reader(\n ctx_file: str, return_dict: bool = True\n) -> Union[Dict[str, Tuple[str, str]], List[Tuple[str, str, str]]]:\n \"\"\"\n Load passages from file, corresponding to a FAISS index.\n\n We attempt to read the passages with a csv reader.\n\n If passage files are not saved correctly with a csv reader,\n reads can fail.\n\n :param ctxt_file:\n file to read\n\n :return reader:\n return a reader over the passages\n \"\"\"\n logging.info(f'Reading data from: {ctx_file}')\n f_open = gzip.open if ctx_file.endswith(\".gz\") else open\n try:\n passages = {} if return_dict else []\n with f_open(ctx_file) as tsvfile:\n _reader = csv.reader(tsvfile, delimiter='\\t') # type: ignore\n ids = []\n for idx, row in tqdm(enumerate(_reader)):\n if idx == 0:\n assert row[0] == 'id'\n ids.append(-1)\n elif idx <= 1:\n ids.append(row[0])\n if return_dict:\n passages[row[0]] = (row[1], row[2]) # type: ignore\n else:\n passages.append((row[0], row[1], row[2])) # type: ignore\n continue\n else:\n assert int(row[0]) == int(ids[idx - 1]) + 1, \"invalid load\"\n if return_dict:\n passages[row[0]] = (row[1], row[2]) # type: ignore\n else:\n passages.append((row[0], row[1], row[2])) # type: ignore\n ids.append(row[0])\n\n del ids\n except (csv.Error, AssertionError) as e:\n passages = {} if return_dict else []\n logging.error(f'Exception: {e}')\n logging.warning('Error in loading csv; loading via readlines')\n with f_open(ctx_file) as tsvfile:\n for idx, l in tqdm(enumerate(tsvfile.readlines())):\n line = l.replace('\\n', '').split('\\t') # type: ignore\n assert len(line) == 3\n if idx == 0:\n assert line[0] == 'id'\n if line[0] != 'id':\n if return_dict:\n passages[line[0]] = (line[1], line[2]) # type: ignore\n else:\n passages.append((line[0], line[1], line[2])) # type: ignore\n return passages\n\n\ndef load_passages_dict(ctx_file: str) -> Dict[str, Tuple[str, str]]:\n \"\"\"\n Load passages as a dict.\n\n :param ctx_file:\n file to read\n\n :return passages_dict:\n return a dict mapping passage id to a tuple of (text, title)\n \"\"\"\n psgs_dict = load_passage_reader(ctx_file, return_dict=True)\n assert isinstance(psgs_dict, dict)\n return psgs_dict\n\n\ndef load_passages_list(ctx_file: str) -> List[Tuple[str, str, str]]:\n \"\"\"\n Load passages as a list.\n\n :param ctx_file:\n file to read\n\n :return passages_dict:\n return a list of 3-tuples (id, text, title)\n \"\"\"\n psgs_list = load_passage_reader(ctx_file, return_dict=False)\n assert isinstance(psgs_list, list)\n return psgs_list\n\n\nclass Document:\n \"\"\"\n A Document used in retrieval.\n \"\"\"\n\n TITLE_DELIM = ' / '\n PASSAGE_DELIM = ' // '\n\n def __init__(self, title: str, text: str, docid: Union[int, str]):\n assert all(isinstance(t, str) for t in [title, text])\n self._title = title\n self._text = text\n self._id = str(docid)\n\n def get_title(self) -> str:\n return self._title\n\n def get_text(self) -> str:\n return self._text\n\n def get_id(self) -> str:\n return self._id\n\n def __repr__(self):\n return f\"ID: {self._id}\\nTitle: {self._title}\\nText: {self._text}\"\n\n def __str__(self):\n return f\"{self._title} | {self._text}\"\n\n def get_passage_str(self):\n return f\"{self._title.strip()}{self.TITLE_DELIM}{self._text.strip()}{self.PASSAGE_DELIM}\"\n\n def get_tokenization_str(self):\n return f\"{self._title.strip()}{self.TITLE_DELIM}{self._text.strip()}\"\n\n\nBLANK_DOC = Document('', '', '')\n\n\ndef argsort_scores_and_docs(\n scores: torch.Tensor, docs: List[Document], n_docs: int\n) -> Tuple[List[Document], torch.Tensor]:\n \"\"\"\n Sort scores and documents by score, return n_docs ranked docs/scores.\n\n :param scores:\n scores with which to rank\n :param docs:\n docs to argsort\n :param n_docs:\n number of docs to return\n\n :return:\n (docs, scores) --> sorted documents, according to scores.\n \"\"\"\n scores_sorter = scores.sort(descending=True)\n ranked_docs = [docs[idx] for idx in scores_sorter.indices[:n_docs]]\n ranked_scores = scores_sorter.values[:n_docs]\n return ranked_docs, ranked_scores\n\n\ndef clean_vec(\n vec: torch.LongTensor, end_idx: int, special_toks: List[int] = None\n) -> List[int]:\n \"\"\"\n Remove special tokens from a tensor prior to text conversion.\n \"\"\"\n new_vec = []\n for i in vec:\n if i == end_idx:\n break\n elif special_toks and i in special_toks:\n continue\n new_vec.append(i)\n return new_vec\n\n\nclass RagRetrieverTokenizer:\n \"\"\"\n Wrapper for various tokenizers used by RAG Query Model.\n \"\"\"\n\n VOCAB_PATH = 'vocab.txt'\n\n def __init__(\n self,\n datapath: str,\n query_model: str,\n dictionary: DictionaryAgent,\n max_length: int = 256,\n delimiter='\\n',\n ):\n \"\"\"\n :param query_model:\n query model type (e.g. bert)\n :param dictionary:\n ParlAI dictionary agent\n :param fast:\n whether to instantiate fast BertTokenizer\n :param max_length:\n maximum length of encoding.\n \"\"\"\n self.datapath = datapath\n self.query_model = query_model\n self.tokenizer = self._init_tokenizer(dictionary)\n self.max_length = max_length\n self._delimiter = delimiter\n\n def _init_tokenizer(\n self, dictionary: DictionaryAgent\n ) -> Union[BertTokenizer, DictionaryAgent]:\n \"\"\"\n If a regular parlai model, use the regular dictionary.\n\n Otherwise, build as necessary\n\n :param dictionary:\n ParlAI dictionary agent\n \"\"\"\n if self.query_model in ['bert', 'bert_from_parlai_rag']:\n try:\n return BertTokenizer.from_pretrained('bert-base-uncased')\n except (ImportError, OSError):\n vocab_path = PathManager.get_local_path(\n os.path.join(self.datapath, \"bert_base_uncased\", self.VOCAB_PATH)\n )\n return transformers.BertTokenizer.from_pretrained(vocab_path)\n else:\n return dictionary\n\n def get_pad_idx(self) -> int:\n \"\"\"\n Return pad token idx.\n \"\"\"\n if self.query_model in ['bert', 'bert_from_parlai_rag']:\n return self.tokenizer.pad_token_id\n else:\n return self.tokenizer[self.tokenizer.null_token]\n\n def get_delimiter(self) -> str:\n \"\"\"\n Return delimiter.\n \"\"\"\n return self._delimiter\n\n def get_bos_idx(self) -> int:\n \"\"\"\n Return start token idx.\n \"\"\"\n if self.query_model in ['bert', 'bert_from_parlai_rag']:\n return self.tokenizer.bos_token_id or 1\n else:\n return self.tokenizer[self.tokenizer.start_token]\n\n def get_eos_idx(self) -> int:\n \"\"\"\n Return start token idx.\n \"\"\"\n if self.query_model in ['bert', 'bert_from_parlai_rag']:\n return self.tokenizer.eos_token_id or 2\n else:\n return self.tokenizer[self.tokenizer.end_token]\n\n def encode(self, txt: str, txt_pair: Optional[str] = None) -> List[int]:\n \"\"\"\n Encode text.\n\n :param txt:\n text to encode\n :param txt_pair:\n Optional additional text to encode.\n Useful if encoding two parts of a text, e.g. title & text.\n\n :return encoding:\n return encoded text.\n \"\"\"\n if self.query_model in ['bert', 'bert_from_parlai_rag']:\n txt = txt.lower().strip()\n if txt_pair:\n txt_pair = txt_pair.lower().strip()\n return self.tokenizer.encode(\n txt,\n text_pair=txt_pair,\n add_special_tokens=True,\n max_length=self.max_length,\n pad_to_max_length=False,\n truncation='longest_first',\n )\n else:\n return self.tokenizer.txt2vec(txt)\n\n def decode(self, vec: torch.LongTensor) -> str:\n \"\"\"\n Decode a token vector into a string.\n \"\"\"\n if self.query_model in ['bert', 'bert_from_parlai_rag']:\n return self.tokenizer.decode(\n clean_vec(vec, self.get_eos_idx()), skip_special_tokens=True\n )\n else:\n return self.tokenizer.vec2txt(\n clean_vec(\n vec,\n self.get_eos_idx(),\n special_toks=[\n self.get_pad_idx(),\n self.get_bos_idx(),\n self.get_eos_idx(),\n ],\n )\n )\n\n\nclass RagRetriever(torch.nn.Module, ABC):\n \"\"\"\n RAG Retriever.\n\n Provides an interface to the RagModel for retrieving documents.\n \"\"\"\n\n def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None):\n super().__init__()\n self.retriever_type = RetrieverType(opt['rag_retriever_type'])\n if not (\n (\n self.retriever_type\n in (\n RetrieverType.SEARCH_ENGINE,\n RetrieverType.OBSERVATION_ECHO_RETRIEVER,\n )\n )\n or (opt.get('retriever_debug_index') in [None, 'none'])\n ):\n if opt.get('retriever_debug_index') == 'exact':\n opt['path_to_index'] = WOW_INDEX_PATH\n else:\n opt['path_to_index'] = WOW_COMPRESSED_INDEX_PATH\n opt['path_to_dpr_passages'] = WOW_PASSAGES_PATH\n self.opt = opt\n self.print_docs = opt.get('print_docs', False)\n self.max_doc_len = opt['max_doc_token_length']\n self.max_query_len = opt['rag_query_truncate'] or 1024\n self.end_idx = dictionary[dictionary.end_token]\n self._tokenizer = RagRetrieverTokenizer(\n datapath=opt['datapath'],\n query_model=opt['query_model'],\n dictionary=dictionary,\n delimiter=opt.get('delimiter', '\\n') or '\\n',\n )\n self.fp16 = (\n not opt['no_cuda']\n and torch.cuda.is_available()\n and self.opt.get('fp16', False)\n )\n\n @final\n def retrieve(\n self, query: torch.LongTensor\n ) -> Tuple[List[List[Document]], torch.Tensor]:\n \"\"\"\n Retrieve documents, given a query vector.\n\n :param query:\n tokenized query\n\n :return (docs, scores):\n docs: list of Documents for each batch example.\n scores: [bsz, n_docs] document scores\n \"\"\"\n docs, scores = self.retrieve_and_score(query)\n if self.print_docs:\n self.display_docs(docs)\n self.top_docs = [[str(d) for d in ds] for ds in docs]\n return docs, scores\n\n @abstractmethod\n def retrieve_and_score(\n self, query: torch.LongTensor\n ) -> Tuple[List[List[Document]], torch.Tensor]:\n \"\"\"\n Retrieve documents for a given query.\n\n :param query:\n tokenized query\n\n :return (docs, scores):\n docs: list of Documents for each batch example.\n scores: [bsz, n_docs] document scores\n \"\"\"\n\n def tokenize_query(self, query: str) -> List[int]:\n \"\"\"\n Tokenize the query.\n\n :param query:\n query to tokenize\n\n :return tokenized_query:\n return list of tokens\n \"\"\"\n return self._tokenizer.encode(query)\n\n def vectorize_texts(\n self,\n input_text: List[str],\n tokenizer: RagRetrieverTokenizer,\n max_len: Optional[int] = None,\n ) -> torch.LongTensor:\n \"\"\"\n Vectorize a set of input texts with an arbitrary RagRetrieverTokenizer.\n\n :param input_text:\n list of input strings\n :param tokenizer:\n tokenizer that encodes the input strings\n :param max_len:\n max length to tokenize\n\n :return vecs:\n returns a stacked padded tensor of tokens.\n \"\"\"\n vecs = [tokenizer.encode(q) for q in input_text]\n if max_len:\n vecs = [v[:max_len] for v in vecs]\n vecs, _ = padded_tensor(\n vecs,\n fp16friendly=self.fp16,\n pad_idx=tokenizer.get_pad_idx(),\n max_len=max_len,\n )\n return vecs\n\n def get_delimiter(self) -> str:\n \"\"\"\n Return the tokenizer's delimiter.\n \"\"\"\n return self._tokenizer.get_delimiter()\n\n def display_docs(self, top_docs: List[List[Document]]):\n \"\"\"\n Prints documents.\n\n :param top_docs:\n list of documents for each batch item\n \"\"\"\n for docs in top_docs:\n for rank, doc in enumerate(docs):\n print(f\"Rank: {rank}\\n{doc}\")\n\n def share(self) -> TShared:\n \"\"\"\n Share retriever stuff.\n\n Share anything that can be handily used by other retrievers.\n\n This is primarily to share things that take up substantial RAM\n (indices, passages)\n \"\"\"\n return {}\n\n\nclass RagRetrieverReranker(RagRetriever, ABC):\n \"\"\"\n Trait that carries methods for Reranker-based retrievers.\n \"\"\"\n\n def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None):\n super().__init__(opt, dictionary, shared=shared)\n self.n_final_docs = opt['n_docs']\n\n @final\n def retrieve_and_score(\n self, query: torch.LongTensor\n ) -> Tuple[List[List[Document]], torch.Tensor]:\n \"\"\"\n Perform two-stage retrieval; rescore initial set of docs.\n\n :param query:\n query tokens\n\n :return (docs, scores):\n docs: list of Documents for each batch example\n scores: doc scores\n \"\"\"\n # 1. Get Initial documents\n initial_docs, initial_scores = self._retrieve_initial(query)\n new_scores = self._rescore(query, initial_docs)\n\n # 2. Get new scores\n final_docs: List[List[Document]] = []\n final_scores: List[torch.Tensor] = []\n new_score_lambda = self._get_new_score_lambda()\n\n for i in range(len(initial_docs)):\n docs_i = initial_docs[i]\n initial_scores_i = initial_scores[i]\n scores_i = torch.mul(initial_scores_i, (1 - new_score_lambda)) + torch.mul(\n new_scores[i], new_score_lambda\n )\n docs_i, scores_i = argsort_scores_and_docs(\n scores_i, docs_i, self.n_final_docs\n )\n final_docs.append(docs_i)\n final_scores.append(scores_i)\n\n return final_docs, torch.stack(final_scores)\n\n @abstractmethod\n def _retrieve_initial(\n self, query: torch.LongTensor\n ) -> Tuple[List[List[Document]], torch.Tensor]:\n \"\"\"\n Perform initial stage of retrieval.\n\n :param query:\n tokenized query\n\n :return (docs, scores):\n docs: list of Documents for each batch example\n scores: doc scores\n \"\"\"\n\n @abstractmethod\n def _rescore(\n self, query: torch.LongTensor, docs: List[List[Document]]\n ) -> torch.Tensor:\n \"\"\"\n Rescore retrieved documents.\n\n :param query:\n tokenized query\n :param docs:\n List of initially retrieved top docs for each batch example\n\n :return scores:\n return new doc scores.\n \"\"\"\n\n @abstractmethod\n def _get_new_score_lambda(self) -> torch.nn.Parameter:\n \"\"\"\n Return the lambda used for computing the new score.\n \"\"\"\n\n\nclass DPRRetriever(RagRetriever):\n \"\"\"\n DPR Retriever.\n \"\"\"\n\n def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared=None):\n \"\"\"\n Initialize DPR Retriever.\n \"\"\"\n super().__init__(opt, dictionary, shared=shared)\n self.load_index(opt, shared)\n self.n_docs = opt['n_docs']\n self.query_encoder = DprQueryEncoder(\n opt, dpr_model=opt['query_model'], pretrained_path=opt['dpr_model_file']\n )\n\n def load_index(self, opt, shared):\n if not shared:\n self.indexer = indexer_factory(opt)\n index_path = modelzoo_path(opt['datapath'], opt['path_to_index'])\n passages_path = modelzoo_path(opt['datapath'], opt['path_to_dpr_passages'])\n embeddings_path = None\n if opt['path_to_dense_embeddings'] is not None:\n embeddings_path = modelzoo_path(\n opt['datapath'], opt['path_to_dense_embeddings']\n )\n self.indexer.deserialize_from(index_path, embeddings_path)\n self.passages = load_passages_dict(passages_path)\n elif shared:\n self.indexer = shared['indexer']\n self.passages = shared['passages']\n\n def share(self) -> TShared:\n \"\"\"\n Share FAISS retriever and passages.\n \"\"\"\n shared = super().share()\n shared['indexer'] = self.indexer\n shared['passages'] = self.passages\n return shared\n\n def index_retrieve(\n self, query: torch.Tensor, n_docs: int\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Retrieve over FAISS index.\n\n :param query:\n bsz x embed_dim query tensor\n :param n_docs:\n number of docs to retrieve\n\n :return (ids, scores):\n ids: [bsz, n_docs] tensor of document IDs\n scores: [bsz, n_docs] tensor of document scores\n \"\"\"\n # retrieve docs and scores, reconstruct document embeddings & scores\n # NOTE: important that detach occurs _for retrieval only_, as we use the\n # query encodings to compute scores later in this function; if detached,\n # gradient will not flow to the query encoder.\n top_docs_and_scores = self.indexer.search(\n query.cpu().detach().to(torch.float32).numpy(), n_docs\n )\n ids, np_vectors = zip(*top_docs_and_scores)\n vectors = torch.tensor(np.array(np_vectors)).to(query)\n if isinstance(self.indexer, DenseHNSWFlatIndexer):\n vectors = vectors[:, :, :-1]\n # recompute exact FAISS scores\n scores = torch.bmm(query.unsqueeze(1), vectors.transpose(1, 2)).squeeze(1)\n if torch.isnan(scores).sum().item():\n raise RuntimeError(\n '\\n[ Document scores are NaN; please look into the built index. ]\\n'\n '[ This generally happens if FAISS cannot separate vectors appropriately. ]\\n'\n '[ If using a compressed index, try building an exact index: ]\\n'\n '[ $ python index_dense_embeddings --indexer-type exact... ]'\n )\n ids = torch.tensor([[int(s) for s in ss] for ss in ids])\n\n return ids, scores\n\n def retrieve_and_score(\n self, query: torch.LongTensor\n ) -> Tuple[List[List[Document]], torch.Tensor]:\n \"\"\"\n Retrieve and score.\n\n For DPR, we encode query tokens and retrieve from FAISS index.\n\n :param query:\n query tokens\n\n :return (docs, scores):\n docs: list of (text, title) tuples for each batch example\n scores: doc scores\n \"\"\"\n query_enc = self.query_encoder(query)\n top_doc_ids_tensor, top_doc_scores = self.index_retrieve(query_enc, self.n_docs)\n top_docs, top_doc_ids = [], []\n for i in range(query.size(0)):\n ids_i = []\n docs_i = []\n for int_id in top_doc_ids_tensor[i]:\n doc_id = str(int_id.item())\n passage = self.passages[doc_id]\n\n ids_i.append(doc_id)\n docs_i.append(Document(title=passage[1], text=passage[0], docid=doc_id))\n top_docs.append(docs_i)\n top_doc_ids.append(ids_i)\n return top_docs, top_doc_scores\n\n\nclass TFIDFRetriever(RagRetriever):\n \"\"\"\n Use TFIDF to retrieve wikipedia documents.\n \"\"\"\n\n def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None):\n \"\"\"\n Init a TFIDFRetrieverAgent.\n \"\"\"\n opt['query_model'] = 'tfidf'\n super().__init__(opt, dictionary, shared=shared)\n tfidf_opt = {\n 'model': 'rag_tfidf_retriever',\n 'model_file': (opt['tfidf_model_path']),\n 'tfidf_model_path': opt['tfidf_model_path'],\n 'retriever_num_retrieved': opt['n_docs'],\n 'retriever_mode': 'keys',\n 'override': {'model': 'rag_tfidf_retriever', 'remove_title': False},\n }\n self.n_docs = opt['n_docs']\n self.max_doc_paragraphs = opt['tfidf_max_doc_paragraphs']\n assert self.max_doc_paragraphs != 0\n if not shared:\n self.tfidf_retriever = create_agent(tfidf_opt)\n self.query_encoder = DprQueryEncoder(\n opt, dpr_model=opt['query_model'], pretrained_path=opt['dpr_model_file']\n )\n else:\n self.tfidf_retriever = shared['tfidf_retriever']\n self.query_encoder = shared['query_encoder']\n\n def share(self) -> TShared:\n shared = super().share()\n shared['tfidf_retriever'] = self.tfidf_retriever\n shared['query_encoder'] = self.query_encoder\n return shared\n\n def retrieve_and_score(\n self, query: torch.LongTensor\n ) -> Tuple[List[List[Document]], Union[torch.Tensor, List[torch.Tensor]]]:\n \"\"\"\n Retrieve and score using TFIDF.\n\n :param query:\n query tokens\n\n :return (docs, scores):\n docs: list of (text, title) tuples for each batch example\n scores: doc scores\n \"\"\"\n\n def _build_doc(idx, cand):\n title = cand.split('\\n\\n')[0]\n paragraphs = cand.split('\\n\\n')[1:]\n if self.max_doc_paragraphs > 0:\n paragraphs = paragraphs[: self.max_doc_paragraphs]\n return Document(title=title, text=' '.join(paragraphs), docid=ids_i[idx])\n\n docs = []\n scores = []\n\n for q in query:\n query_text = self._tokenizer.decode(q)\n self.tfidf_retriever.observe({'text': query_text, 'episode_done': True})\n act = self.tfidf_retriever.act()\n if 'candidate_scores' not in act:\n scores_i = [0] * self.n_docs\n docs_i = [BLANK_DOC] * self.n_docs\n else:\n scores_i = act['candidate_scores']\n candidate_docs = act['text_candidates']\n ids_i = act['candidate_ids']\n candidate_docs = [\n _build_doc(j, c) for j, c in enumerate(act['text_candidates'])\n ]\n docs_i = candidate_docs[: self.n_docs]\n scores_i = scores_i[: self.n_docs]\n if len(docs_i) < self.n_docs:\n # Something went wrong with TFIDF here; need to add null docs\n logging.warning(\n f'Ex has less than {self.n_docs} TFIDF docs: {len(docs_i)}'\n )\n num_null = self.n_docs - len(docs_i)\n docs_i += [BLANK_DOC] * num_null\n scores_i = np.append(scores_i, [0] * num_null)\n docs.append(docs_i)\n scores.append(torch.FloatTensor(scores_i).to(query.device))\n\n scores = torch.stack(scores)\n return docs, scores\n\n\nclass DPRThenTorchReranker(RagRetrieverReranker, DPRRetriever, ABC):\n \"\"\"\n Base Class for DPR --> TorchRanker Retrievers.\n\n Handles some shared functionality.\n \"\"\"\n\n def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None):\n \"\"\"\n Initialize DPR model.\n\n It is up to subclasses to initialize rerankers.\n \"\"\"\n RagRetrieverReranker.__init__(self, opt, dictionary, shared=shared)\n self.dpr_num_docs = opt['dpr_num_docs']\n assert self.dpr_num_docs\n dpr_opt = copy.deepcopy(opt)\n dpr_opt['n_docs'] = self.dpr_num_docs\n DPRRetriever.__init__(self, dpr_opt, dictionary, shared=shared)\n\n def get_reranker_opts(self, opt: Opt) -> Dict[str, Any]:\n \"\"\"\n Provide options used when building the rerankers.\n\n Base class ensures that various optimizations (cuda, fp16, parallel)\n are accounted for.\n\n :param opt:\n base opt\n\n :return options_dict:\n return a dictionary mapping options to values.\n \"\"\"\n return {\n 'no_cuda': opt['no_cuda'],\n 'fp16': opt['fp16'],\n 'model_parallel': opt['model_parallel'],\n 'data_parallel': opt['data_parallel'],\n }\n\n def _build_reranker(\n self, opt: Opt\n ) -> Tuple[torch.nn.Module, RagRetrieverTokenizer]:\n \"\"\"\n Builds reranker.\n\n :param opt:\n original opt\n\n :return (module, dict)\n module: the model from the agent created via the options\n dict: A RagRetrieverTokenizer, dictionary for the created model.\n \"\"\"\n rerank_opt = copy.deepcopy(opt)\n rerank_opt = {**TRANSFORMER_RANKER_BASE_OPT, **self.get_reranker_opts(opt)}\n logging.disable()\n agent = create_agent(rerank_opt)\n logging.enable()\n assert isinstance(agent, TorchRankerAgent)\n\n return (\n agent.model,\n RagRetrieverTokenizer(opt['datapath'], '', agent.dict, max_length=360),\n )\n\n def _retrieve_initial(\n self, query: torch.LongTensor\n ) -> Tuple[List[List[Document]], torch.Tensor]:\n \"\"\"\n Initial DPR retrieval.\n\n Just call superclass to retrieve first stage.\n\n :param query:\n encoding of query\n :param mask:\n optional query mask\n\n :return (docs, scores):\n docs: list of (text, title) tuples for each batch example\n scores: doc scores\n \"\"\"\n return DPRRetriever.retrieve_and_score(self, query)\n\n\nclass DPRThenPolyRetriever(DPRThenTorchReranker):\n \"\"\"\n 2 Stage Retrieval with DPR and Poly-encoder.\n\n 1. Retrieve N Docs with DPR\n 2. Rescore docs with polyencoder\n \"\"\"\n\n def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None):\n \"\"\"\n Initialize a Poly-Encoder Agent.\n \"\"\"\n # 1. Call super to init DPR\n super().__init__(opt, dictionary, shared=shared)\n\n # 2. Poly-encoder\n self.polyencoder, self.poly_tokenizer = self._build_reranker(opt)\n self.register_parameter(\n \"poly_score_lambda\",\n torch.nn.Parameter(torch.tensor([float(opt['poly_score_initial_lambda'])])),\n )\n\n def _get_new_score_lambda(self) -> torch.nn.Parameter:\n \"\"\"\n Return the lambda used for computing the new score.\n \"\"\"\n return self.poly_score_lambda # type: ignore\n\n def get_reranker_opts(self, opt: Opt) -> Dict[str, Any]:\n \"\"\"\n Provide options used when building the polyencoder.\n\n :param opt:\n base opt\n\n :return options_dict:\n return a dictionary mapping options to values.\n \"\"\"\n from parlai.agents.rag.args import PRETRAINED_RANKER_TYPES\n\n init_path = opt['polyencoder_init_model']\n if init_path in PRETRAINED_RANKER_TYPES:\n init_model = f\"zoo:pretrained_transformers/poly_model_huge_{opt['polyencoder_init_model']}/model\"\n dict_file = f\"zoo:pretrained_transformers/poly_model_huge_{opt['polyencoder_init_model']}/model.dict\"\n else:\n assert os.path.exists(init_path)\n init_model = init_path\n dict_file = f'{init_path}.dict'\n\n return {\n 'model': 'transformer/polyencoder',\n 'init_model': init_model,\n 'dict_file': dict_file,\n # necessary opt args\n 'multitask_weights': [1],\n **{k: opt[k] for k in POLYENCODER_OPT_KEYS},\n **super().get_reranker_opts(opt),\n }\n\n def _rescore(\n self, query: torch.LongTensor, docs: List[List[Document]]\n ) -> torch.Tensor:\n \"\"\"\n Compute Poly-encoder score with initial set of Documents.\n\n Scoring taken from PolyencoderAgent.score_candidates\n\n :param query:\n query tokens, used in DPR retrieval.\n :param docs:\n List of initially retrieved top docs for each batch example\n\n :return new_scores:\n return scored documents.\n \"\"\"\n poly_query_vec = self.vectorize_texts(\n [self._tokenizer.decode(q) for q in query],\n self.poly_tokenizer,\n self.max_query_len,\n ).to(query.device)\n\n doc_vecs = torch.stack(\n [\n self.vectorize_texts(\n [d.get_tokenization_str() for d in docs_i],\n self.poly_tokenizer,\n self.max_doc_len,\n )\n for docs_i in docs\n ]\n ).to(query.device)\n\n ctxt_rep, ctxt_rep_mask, _ = self.polyencoder(ctxt_tokens=poly_query_vec)\n _, _, cand_rep = self.polyencoder(cand_tokens=doc_vecs)\n scores = self.polyencoder(\n ctxt_rep=ctxt_rep, ctxt_rep_mask=ctxt_rep_mask, cand_rep=cand_rep\n )\n return scores\n\n\nclass PolyFaissRetriever(DPRThenPolyRetriever):\n \"\"\"\n Poly-encoder Retriever, using FAISS.\n\n Performs FAISS retrieval to retrieve N initial docs; re-ranks according to Poly-\n encoder score to narrow down to K docs.\n \"\"\"\n\n def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None):\n assert opt['query_model'] == 'dropout_poly'\n super().__init__(opt, dictionary, shared=shared)\n self.dropout_poly = RagDropoutPolyWrapper(opt)\n self.polyencoder = self.dropout_poly.model\n\n self.poly_tokenizer = RagRetrieverTokenizer(\n opt['datapath'], opt['query_model'], self.dropout_poly.dict, max_length=360\n )\n\n model = (\n self.polyencoder.module\n if hasattr(self.polyencoder, 'module')\n else self.polyencoder\n )\n for param in model.encoder_cand.parameters(): # type: ignore\n # freeze document encoding for PolyFAISS.\n param.requires_grad = False\n\n\n@register_agent(\"rag_tfidf_retriever\")\nclass RagTfidfRetrieverAgent(TfidfRetrieverAgent):\n \"\"\"\n Wrapper around TFIDF Retriever to cache retrieved documents.\n \"\"\"\n\n def __init__(self, opt: Opt, shared: TShared = None):\n super().__init__(opt, shared)\n if not shared:\n self.docid_to_text = {}\n else:\n self.docid_to_text = shared.get('docid_to_text', {})\n\n def share(self) -> TShared:\n shared = super().share()\n shared['docid_to_text'] = self.docid_to_text\n return shared\n\n def doc2txt(self, docid):\n \"\"\"\n Cache document texts during train/eval.\n \"\"\"\n if docid not in self.docid_to_text:\n text = super().doc2txt(docid)\n self.docid_to_text[docid] = text\n else:\n text = self.docid_to_text[docid]\n return text\n\n\nBLANK_SEARCH_DOC = {'url': None, 'content': '', 'title': ''}\nNO_SEARCH_QUERY = 'no_passages_used'\n\n\nclass SearchQueryRetriever(RagRetriever):\n def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared):\n RagRetriever.__init__(self, opt, dictionary, shared=shared)\n opt['skip_retrieval_token'] = NO_SEARCH_QUERY\n self.n_docs = opt['n_docs']\n self.len_chunk = opt['splitted_chunk_length']\n self.doc_chunk_split_mode = opt['doc_chunk_split_mode']\n n_doc_chunks = opt['n_ranked_doc_chunks']\n chunk_ranker_type = opt['doc_chunks_ranker']\n if chunk_ranker_type == 'tfidf':\n self.chunk_reranker = TfidfChunkRanker(n_doc_chunks)\n elif chunk_ranker_type == 'head':\n self.chunk_reranker = HeadChunkRanker(n_doc_chunks)\n else:\n self.chunk_reranker = RetrievedChunkRanker(\n n_doc_chunks, opt['woi_doc_chunk_size']\n )\n\n if not shared:\n self.query_generator = self.init_search_query_generator(opt)\n else:\n self.query_generator = shared['query_generator']\n self.dict = dictionary\n self.init_query_encoder(opt)\n\n def share(self) -> TShared:\n shared = super().share()\n shared['query_generator'] = self.query_generator\n return shared\n\n def init_search_query_generator(self, opt) -> TorchGeneratorAgent:\n model_file = opt['search_query_generator_model_file']\n logging.info('Loading search generator model')\n logging.disable()\n search_query_gen_agent = create_agent_from_model_file(\n model_file,\n opt_overrides={\n 'skip_generation': False,\n 'inference': opt['search_query_generator_inference'],\n 'beam_min_length': opt['search_query_generator_beam_min_length'],\n 'beam_size': opt['search_query_generator_beam_size'],\n 'text_truncate': opt['search_query_generator_text_truncate'],\n },\n )\n logging.enable()\n logging.info('Search query generator model loading completed!')\n return search_query_gen_agent\n\n def generate_search_query(self, query: torch.LongTensor) -> List[str]:\n \"\"\"\n Generates a list of queries for the encoded query (context) tensor.\n \"\"\"\n texts = [self._tokenizer.decode(q) for q in query]\n obs_list = []\n for t in texts:\n msg = Message({'text': t, 'episode_done': True})\n obs_list.append(self.query_generator.observe(msg))\n self.query_generator.reset() # Erase the history\n search_quries = [r['text'] for r in self.query_generator.batch_act(obs_list)]\n logging.debug(f'Generated search queries {search_quries}')\n return search_quries\n\n def init_query_encoder(self, opt):\n if hasattr(self, 'query_encoder'):\n # It is already instantiated\n return\n self.query_encoder = DprQueryEncoder(\n opt, dpr_model=opt['query_model'], pretrained_path=opt['dpr_model_file']\n )\n\n def text2tokens(self, txt: str) -> Union[List[str], List[int]]:\n if self.doc_chunk_split_mode == 'word':\n return txt.split(' ')\n else:\n return self.dict.txt2vec(txt)\n\n def tokens2text(self, tokens: Union[List[int], List[str]]) -> str:\n if self.doc_chunk_split_mode == 'word':\n return ' '.join(tokens)\n else:\n return self.dict.vec2txt(tokens)\n\n def pick_chunk(self, query: str, doc_title: str, doc_text: str, doc_url: str):\n \"\"\"\n Splits the document and returns the selected chunks.\n\n The number of returned chunks is controlled by `n_ranked_doc_chunks` in opt. The\n chunk selection is determined by `doc_chunks_ranker` in the opt.\n \"\"\"\n if not doc_text:\n # When there is no search query for the context\n return [(\"\", 0)]\n tokens = self.text2tokens(doc_text)\n if self.opt['doc_chunks_ranker'] != 'woi_chunk_retrieved_docs':\n doc_chunks = [\n self.tokens2text(tokens[i : i + self.len_chunk])\n for i in range(0, len(tokens), self.len_chunk)\n ]\n else:\n doc_chunks = self.tokens2text(tokens)\n return self.chunk_reranker.get_top_chunks(query, doc_title, doc_chunks, doc_url)\n\n\nclass SearchQuerySearchEngineRetriever(SearchQueryRetriever):\n \"\"\"\n A retriever that uses a search engine server for retrieving documents.\n\n It instantiates a `SearchEngineRetriever` object that in turns send search queries\n to an external server for retrieving documents.\n \"\"\"\n\n def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared):\n super().__init__(opt, dictionary, shared)\n if not shared:\n self.search_client = self.initiate_retriever_api(opt)\n else:\n self.search_client = shared['search_client']\n\n def share(self) -> TShared:\n shared = super().share()\n shared['search_client'] = self.search_client\n return shared\n\n def initiate_retriever_api(self, opt) -> SearchEngineRetriever:\n logging.info('Creating the search engine retriever.')\n return SearchEngineRetriever(opt)\n\n def _empty_docs(self, num: int):\n \"\"\"\n Generates the requested number of empty documents.\n \"\"\"\n return [BLANK_SEARCH_DOC for _ in range(num)]\n\n def rank_score(self, rank_id: int):\n \"\"\"\n Scores the chunks of the retrieved document based on their rank.\n\n Note that this is the score for the retrieved document and applies to all its\n chunks.\n \"\"\"\n return 1 / (1 + rank_id)\n\n def _display_urls(self, search_results):\n \"\"\"\n Generates a string that lists retrieved URLs (document IDs).\n \"\"\"\n return '\\n'.join([d['url'] for d in search_results if d['url']])\n\n def retrieve_and_score(\n self, query: torch.LongTensor\n ) -> Tuple[List[List[Document]], torch.Tensor]:\n \"\"\"\n Retrieves relevant documents for the query (the conversation context). This\n method conducts three main steps that are flagged in the main code as well.\n\n Step 1: generate search queries for the conversation context batch.This step\n uses the query generator model (self.query_generator).\n\n Step 2: use the search client to retrieve documents.This step uses retrieval\n API agent (self.search_client)\n\n Step 3: generate the list of Document objects from the\n retrieved content. Here if the documents too long, the code splits them and\n chooses a chunk based on the selected `doc_chunks_ranker` in the opt.\n \"\"\"\n # step 1\n search_queries = self.generate_search_query(query)\n\n # step 2\n search_results_batch = self.search_client.retrieve(search_queries, self.n_docs)\n\n # step 3\n top_docs = []\n top_doc_scores = []\n max_n_docs: int = self.n_docs\n for sq, search_results in zip(search_queries, search_results_batch):\n if not search_results:\n search_results = self._empty_docs(self.n_docs)\n elif len(search_results) < self.n_docs:\n remain_docs = self.n_docs - len(search_results)\n search_results.extend(self._empty_docs(remain_docs))\n docs_i = []\n scors_i = []\n # Change this debug later\n logging.debug(f'URLS:\\n{self._display_urls(search_results)}')\n for i, doc in enumerate(search_results):\n url = doc['url']\n title = doc['title']\n dcontent = doc['content']\n assert type(dcontent) in (\n str,\n list,\n ), f'Unrecognized retrieved doc: {dcontent}'\n full_text = (\n dcontent if isinstance(dcontent, str) else '\\n'.join(doc['content'])\n )\n doc_chunks = [\n dc[0] for dc in self.pick_chunk(sq, title, full_text, url)\n ]\n for splt_id, splt_content in enumerate(doc_chunks):\n docs_i.append(\n Document(\n docid=url, text=splt_content, title=f'{title}_{splt_id}'\n )\n )\n scors_i.append(self.rank_score(i))\n max_n_docs = max(max_n_docs, len(docs_i))\n top_docs.append(docs_i)\n top_doc_scores.append(scors_i)\n # Pad with empty docs\n for i in range(len(top_docs)):\n n_empty = max_n_docs - len(top_docs[i])\n if n_empty:\n top_docs[i] = top_docs[i] + [BLANK_DOC] * n_empty\n top_doc_scores[i] = top_doc_scores[i] + [0] * n_empty\n self.top_docs = top_docs\n return top_docs, torch.Tensor(top_doc_scores).to(query.device)\n\n\nclass SearchQueryFAISSIndexRetriever(SearchQueryRetriever, DPRRetriever):\n def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared):\n SearchQueryRetriever.__init__(self, opt, dictionary, shared=shared)\n self.load_index(opt, shared)\n\n def share(self) -> TShared:\n shared = SearchQueryRetriever.share(self)\n shared.update(DPRRetriever.share(self))\n return shared\n\n def retrieve_and_score(\n self, query: torch.LongTensor\n ) -> Tuple[List[List[Document]], torch.Tensor]:\n \"\"\"\n Retrieves from the FAISS index using a search query.\n\n This methods relies on the `retrieve_and_score` method in `RagRetriever`\n ancestor class. It receive the query (conversation context) and generatess the\n search term queries based on them. Then uses those search quries (instead of the\n the query text itself) to retrieve from the FAISS index.\n \"\"\"\n\n search_queries = self.generate_search_query(query)\n tokenized_search_queries, _ = padded_tensor(\n [self._tokenizer.encode(sq) for sq in search_queries]\n )\n top_docs, top_doc_scores = DPRRetriever.retrieve_and_score(\n self, tokenized_search_queries.to(query.device)\n )\n for query_id in range(len(top_docs)):\n if search_queries[query_id] == NO_SEARCH_QUERY:\n top_docs[query_id] = [BLANK_DOC for _ in range(self.n_docs)]\n return top_docs, top_doc_scores\n\n\nclass ObservationEchoRetriever(RagRetriever):\n \"\"\"\n This retriever returns (echos) documents that are already passed to it to return.\n\n Use this only with GoldFiD agents. It relies on the retrieved docs being included in\n the observed example of the agent.\n \"\"\"\n\n def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None):\n self._delimiter = '\\n'\n self.n_docs = opt['n_docs']\n self._query_ids = dict()\n self._saved_docs = dict()\n self._largest_seen_idx = -1\n super().__init__(opt, dictionary, shared=shared)\n\n def add_retrieve_doc(self, query: str, retrieved_docs: List[Document]):\n self._largest_seen_idx += 1\n new_idx = self._largest_seen_idx\n if new_idx in self._query_ids.values() or new_idx in self._saved_docs:\n raise RuntimeError(\n \"Nonunique new_idx created in add_retrieve_doc in ObservationEchoRetriever \\n\"\n \"this might return the same set of docs for two distinct queries\"\n )\n self._query_ids[query] = new_idx\n self._saved_docs[new_idx] = retrieved_docs or [\n BLANK_DOC for _ in range(self.n_docs)\n ]\n\n def tokenize_query(self, query: str) -> List[int]:\n return [self._query_ids[query]]\n\n def get_delimiter(self) -> str:\n return self._delimiter\n\n def _clear_mapping(self):\n self._query_ids = dict()\n self._saved_docs = dict()\n self._largest_seen_idx = -1\n\n def retrieve_and_score(\n self, query: torch.LongTensor\n ) -> Tuple[List[List[Document]], torch.Tensor]:\n batch_size = query.size(0)\n\n retrieved_docs = []\n for endoded_query in query.tolist():\n docs_retrieve_idx = endoded_query[0]\n retrieved_docs.append(self._saved_docs[docs_retrieve_idx])\n\n # Some arbitrary scoring of docs\n max_num_docs = max([len(rtds) for rtds in retrieved_docs])\n retrieved_doc_scores = torch.Tensor([1 / (1 + i) for i in range(max_num_docs)])\n retrieved_doc_scores = retrieved_doc_scores.repeat(batch_size, 1).to(\n query.device\n )\n\n # empty the 2 mappings after each retrieval\n self._clear_mapping()\n\n return retrieved_docs, retrieved_doc_scores\n\n\nclass DocumentChunkRanker:\n \"\"\"\n Base class for controlling splitting long documents and selecting relevant chunks.\n \"\"\"\n\n def __init__(self, n_retrieved_chunks):\n self.n_ret_chunks = n_retrieved_chunks\n\n @abstractmethod\n def get_top_chunks(\n self,\n query: str,\n doc_title: str,\n doc_chunks: Union[List[str], str],\n doc_url: str,\n ):\n \"\"\"\n Ranks documents (chunk) based on their relevance to `query`\n \"\"\"\n\n\nclass HeadChunkRanker(DocumentChunkRanker):\n \"\"\"\n Returns the head chunks only.\n \"\"\"\n\n def get_top_chunks(\n self,\n query: str,\n doc_title: str,\n doc_chunks: Union[List[str], str],\n doc_url: str,\n ):\n \"\"\"\n Return chunks in doc-present order.\n \"\"\"\n return [(c,) for c in doc_chunks[: self.n_ret_chunks]]\n\n\nclass RetrievedChunkRanker(DocumentChunkRanker):\n \"\"\"\n Utilize retrieved doc chunk mutator.\n \"\"\"\n\n def __init__(self, n_retrieved_chunks, chunk_size: int = 500):\n super().__init__(n_retrieved_chunks)\n self.chunk_size = chunk_size\n\n def get_top_chunks(\n self,\n query: str,\n doc_title: str,\n doc_chunks: Union[List[str], str],\n doc_url: str,\n ):\n \"\"\"\n Return chunks according to the woi_chunk_retrieved_docs_mutator\n \"\"\"\n if isinstance(doc_chunks, list):\n docs = ''.join(doc_chunks)\n else:\n assert isinstance(doc_chunks, str)\n docs = doc_chunks\n chunks = chunk_docs_in_message(\n Message(\n {\n CONST.RETRIEVED_DOCS: [docs],\n CONST.RETRIEVED_DOCS_TITLES: [doc_title],\n CONST.RETRIEVED_DOCS_URLS: [doc_url],\n CONST.SELECTED_SENTENCES: [CONST.NO_SELECTED_SENTENCES_TOKEN],\n }\n ),\n self.chunk_size,\n )[CONST.RETRIEVED_DOCS]\n return [(c,) for c in chunks[: self.n_ret_chunks]]\n\n\nclass TfidfChunkRanker(DocumentChunkRanker):\n \"\"\"\n Uses TF-IDF to compare chunks to the original search query.\n \"\"\"\n\n def __init__(self, n_retrieved_chunks):\n super().__init__(n_retrieved_chunks)\n self._vectorizer = TfidfVectorizer()\n\n def get_top_chunks(\n self,\n query: str,\n doc_title: str,\n doc_chunks: Union[List[str], str],\n doc_url: str,\n ):\n assert isinstance(doc_chunks, list)\n vectorized_corpus = self._vectorizer.fit_transform(doc_chunks + [query])\n docs_vec = vectorized_corpus[:-1, :]\n q_vec = vectorized_corpus[-1, :]\n scores = np.hstack((q_vec * docs_vec.transpose()).toarray())\n top_chunk_ids = np.argsort(-scores)[: self.n_ret_chunks]\n return [(doc_chunks[i], scores[i]) for i in top_chunk_ids]\n\n\ndef retriever_factory(\n opt: Opt, dictionary: DictionaryAgent, shared=None\n) -> Optional[RagRetriever]:\n \"\"\"\n Build retriever.\n\n :param opt:\n ParlAI Opt\n :param dictionary:\n dictionary agent\n :param shared:\n shared objects.\n\n :return retriever:\n return a retriever for RAG.\n \"\"\"\n if opt.get('converting'):\n return None\n # only build retriever when not converting a BART model\n retriever = RetrieverType(opt['rag_retriever_type'])\n if retriever is RetrieverType.DPR:\n return DPRRetriever(opt, dictionary, shared=shared)\n elif retriever is RetrieverType.TFIDF:\n return TFIDFRetriever(opt, dictionary, shared=shared)\n elif retriever is RetrieverType.DPR_THEN_POLY:\n return DPRThenPolyRetriever(opt, dictionary, shared=shared)\n elif retriever is RetrieverType.POLY_FAISS:\n return PolyFaissRetriever(opt, dictionary, shared=shared)\n elif retriever is RetrieverType.SEARCH_ENGINE:\n return SearchQuerySearchEngineRetriever(opt, dictionary, shared=shared)\n elif retriever is RetrieverType.SEARCH_TERM_FAISS:\n return SearchQueryFAISSIndexRetriever(opt, dictionary, shared=shared)\n elif retriever is RetrieverType.OBSERVATION_ECHO_RETRIEVER:\n return ObservationEchoRetriever(opt, dictionary, shared=shared)\n" ]
[ [ "torch.Tensor", "torch.isnan", "numpy.append", "torch.mul", "torch.FloatTensor", "torch.cuda.is_available", "torch.stack", "numpy.argsort", "numpy.array", "sklearn.feature_extraction.text.TfidfVectorizer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mvernacc/lvreuse
[ "e2ac6aca334b49b0d4f5f881861cb42ce86dd130", "e2ac6aca334b49b0d4f5f881861cb42ce86dd130" ]
[ "lvreuse/cost/indirect_ops.py", "lvreuse/performance/payload.py" ]
[ "import numpy as np\n\ndef indirect_ops_cost(launch_rate, launch_provider_type):\n \"\"\"Find the indirect operations cost per launch.\n\n Arguments:\n launch_rate (positive scalar): launch rate [units: launches/year]\n launch_provider_type: letter designation indicating the launch provider type\n\n Returns:\n Indirect operations cost per flight [units: Work-Year]\"\"\"\n\n ioc_data = {'A': [65, 49, 42, 38, 35, 33, 31, 30, 29, 27, 26, 25],\n 'B': [45, 34, 29, 27, 24, 23, 22, 21, 20, 19, 18, 17],\n 'C': [32, 24, 22, 19, 18, 17, 16, 15, 14, 13, 12, 11]}\n\n ioc_line = ioc_data[launch_provider_type]\n launch_rate_list = range(1, 13)\n\n ioc_cost = np.interp(launch_rate, launch_rate_list, ioc_line)\n\n return ioc_cost\n", "\"\"\"Payload capability of a 2-stage launch vehicle.\"\"\"\nimport numpy as np\nfrom scipy.optimize import fsolve\n\n\ndef payload_fixed_stages(c_1, c_2, e_1, e_2, y, dv_mission, return_all_pis=False):\n \"\"\"Get the payload capacity for a 2-stage launcher with fixed stagne mass ratio.\n\n Arguments:\n c_1 (scalar): 1st stage effective exhaust velocity, e.g. Isp * g_0 \n (average over recovery burns) [units: meter second**-1].\n c_2 (scalar): 2nd stage effective exhaust velocity, e.g. Isp * g_0 \n (average over recovery burns) [units: meter second**-1].\n e_1 (scalar): 1st stage unaviaible mass ratio [units: dimensionless].\n e_2 (scalar): 2nd stage unaviaible mass ratio [units: dimensionless].\n y (scalar): Stage mass ratio (m_p2 + m_s2)/(m_p1 + m_s1) [units: dimensionless].\n\n Returns:\n scalar: Overall payload mass ratio pi* [units: dimensionless].\n \"\"\"\n # Solve for the 1st stage payload fraction which matches the mission dv\n def root_fun(pi_1):\n \"\"\"Delta-v capability error as a function of 1st stage paylaod fraction.\"\"\"\n pi_2 = (y + 1) - y / pi_1\n dv = - c_1 * np.log(e_1 + (1 - e_1) * pi_1) - c_2 * np.log(e_2 + (1 - e_2) * pi_2)\n if np.isnan(dv):\n return dv_mission\n return dv_mission - dv\n pi_1_guess = y / (y + 1)\n x, infodict, ier, mesg = fsolve(root_fun, pi_1_guess, full_output=True)\n if ier != 1:\n return np.nan\n pi_1 = x[0]\n pi_2 = (y + 1) - y / pi_1\n pi_star = pi_1 * pi_2\n\n if pi_star < 0:\n return np.nan\n if return_all_pis:\n return (pi_star, pi_1, pi_2)\n return pi_star\n\n" ]
[ [ "numpy.interp" ], [ "numpy.isnan", "numpy.log", "scipy.optimize.fsolve" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
leitro/research-WriterAdaptation-HTR
[ "224f7843b7c848cd7f04957168f7f8f5e4386afa" ]
[ "utils.py" ]
[ "import os\nimport numpy as np\nimport cv2\nimport loadData6_vgg as loadData\n\nHEIGHT = loadData.IMG_HEIGHT\nWIDTH = loadData.IMG_WIDTH\noutput_max_len = loadData.OUTPUT_MAX_LEN\ntokens = loadData.tokens\nnum_tokens = loadData.num_tokens\nvocab_size = loadData.num_classes + num_tokens\nindex2letter = loadData.index2letter\nFLIP = loadData.FLIP\n\nload_data_func = loadData.loadData\n\ndef visualizeAttn(img, first_img_real_len, attn, epoch, count_n, name):\n folder_name = 'imgs'\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n img = img[:, :first_img_real_len]\n img = img.cpu().numpy()\n img -= img.min()\n img *= 255./img.max()\n img = img.astype(np.uint8)\n weights = [img] # (80, 460)\n for m in attn[:count_n]:\n mask_img = np.vstack([m]*10) # (10, 55)\n mask_img *= 255./mask_img.max()\n mask_img = mask_img.astype(np.uint8)\n mask_img = cv2.resize(mask_img, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC)\n weights.append(mask_img)\n output = np.vstack(weights)\n if loadData.FLIP:\n output = np.flip(output, 1)\n cv2.imwrite(folder_name+'/'+name+'_'+str(epoch)+'.jpg', output)\n\ndef writePredict(epoch, index, pred, flag): # [batch_size, vocab_size] * max_output_len\n folder_name = 'pred_logs'\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n file_prefix = folder_name+'/'+flag+'_predict_seq.'\n\n pred = pred.data\n pred2 = pred.topk(1)[1].squeeze(2) # (15, 32)\n pred2 = pred2.transpose(0, 1) # (32, 15)\n pred2 = pred2.cpu().numpy()\n\n batch_count_n = []\n with open(file_prefix+str(epoch)+'.log', 'a') as f:\n for n, seq in zip(index, pred2):\n f.write(n+' ')\n count_n = 0\n for i in seq:\n if i ==tokens['END_TOKEN']:\n #f.write('<END>')\n break\n else:\n if i ==tokens['GO_TOKEN']:\n f.write('<GO>')\n elif i ==tokens['PAD_TOKEN']:\n f.write('<PAD>')\n else:\n f.write(index2letter[i-num_tokens])\n count_n += 1\n batch_count_n.append(count_n)\n f.write('\\n')\n return batch_count_n\n\ndef writeLoss(loss_value, flag): #flag: train, valid, test, domain\n folder_name = 'pred_logs'\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n\n file_name = folder_name + '/loss_' + flag + '.log'\n with open(file_name, 'a') as f:\n f.write(str(loss_value))\n f.write(' ')\n" ]
[ [ "numpy.flip", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thewtex/itkwidgets
[ "c8ac608aebf38479bc45ec20700f82456a26b2fc" ]
[ "itkwidgets/_transform_types.py" ]
[ "__all__ = ['to_itk_image', 'to_point_set', 'to_geometry', 'vtkjs_to_zarr', 'zarr_to_vtkjs']\n\nimport itk\nimport numpy as np\n\ndef is_arraylike(arr):\n return hasattr(arr, 'shape') and \\\n hasattr(arr, 'dtype') and \\\n hasattr(arr, '__array__') and \\\n hasattr(arr, 'ndim')\n\n# from IPython.core.debugger import set_trace\n\nhave_imagej = False\ntry:\n import imagej # noqa: F401\n have_imagej = True\nexcept ImportError:\n pass\nhave_vtk = False\ntry:\n import vtk\n have_vtk = True\nexcept ImportError:\n pass\nhave_dask = False\ntry:\n import dask.array\n have_dask = True\nexcept ImportError:\n pass\nhave_simpleitk = False\ntry:\n import SimpleITK as sitk\n have_simpleitk = True\nexcept ImportError:\n pass\nhave_skan = False\ntry:\n import skan\n have_skan = True\nexcept ImportError:\n pass\nhave_zarr = False\ntry:\n import zarr\n have_zarr = True\nexcept ImportError:\n pass\n\ndef vtkjs_to_zarr(vtkjs, group, chunks=True):\n \"\"\"Convert a vtk.js-like Python object to a Zarr Group.\n\n Parameters\n ----------\n\n vtkjs: dictionary, required\n The vtk.js-like data structure to convert.\n\n group: zarr.Group, required\n The Zarr group to store the result.\n\n chunks: bool or int or tuple of ints, optional\n The chunk size passed to zarr.creation.create.\n \"\"\"\n for key, value in vtkjs.items():\n if key == 'vtkClass':\n group.attrs[key] = value\n elif key == 'arrays':\n for index, arr in enumerate(value):\n vtkjs_to_zarr(arr,\n group.create_group('arrays/' + str(index), True),\n chunks=chunks)\n elif isinstance(value, dict):\n vtkjs_to_zarr(value,\n group.create_group(key, True),\n chunks=chunks)\n elif isinstance(value, np.ndarray):\n group.array(key, value, chunks=chunks)\n else:\n group.attrs[key] = value\n return group\n\ndef zarr_to_vtkjs(group):\n \"\"\"Convert Zarr Group that contains vtk.js data structure to a Python-like object.\n\n Parameters\n ----------\n\n group: zarr.Group, required\n The Zarr group to convert.\n \"\"\"\n\n def process_group(group, result):\n for key, value in group.attrs.items():\n result[key] = value\n for name, value in group.arrays():\n result[name] = np.asarray(value)\n for name, value in group.groups():\n if name == 'arrays':\n nested = []\n for index, subgroup in value.groups():\n subresult = dict()\n process_group(subgroup, subresult)\n nested.append(subresult)\n result[name] = nested\n else:\n nested = dict()\n process_group(value, nested)\n result[name] = nested\n result = dict()\n process_group(group, result)\n return result\n\n\n_itk_pixel_to_vtkjs_type_components = {\n itk.SC: ('Int8Array', 1),\n itk.UC: ('Uint8Array', 1),\n itk.SS: ('Int16Array', 1),\n itk.US: ('Uint16Array', 1),\n itk.SI: ('Int32Array', 1),\n itk.UI: ('Uint32Array', 1),\n itk.F: ('Float32Array', 1),\n itk.D: ('Float64Array', 1),\n}\n\ndef _vtk_to_vtkjs(data_array):\n from vtk.util.numpy_support import vtk_to_numpy\n # From vtkType.h\n _vtk_data_type_to_vtkjs_type = {\n 2: 'Int8Array',\n 15: 'Int8Array',\n 3: 'Uint8Array',\n 4: 'Int16Array',\n 5: 'Uint16Array',\n 6: 'Int32Array',\n 7: 'Uint32Array',\n 8: 'BigInt64Array',\n 9: 'BigUint64Array',\n 10: 'Float32Array',\n 11: 'Float64Array',\n 12: 'BigInt64Array',\n 16: 'BigInt64Array',\n 17: 'BigUint64Array',\n }\n vtk_data_type = data_array.GetDataType()\n data_type = _vtk_data_type_to_vtkjs_type[vtk_data_type]\n numpy_array = vtk_to_numpy(data_array)\n if vtk_data_type == 8 or vtk_data_type == 16:\n ii32 = np.iinfo(np.int32)\n value_range = data_array.GetValueRange()\n if value_range[0] < ii32.min or value_range[1] > ii32.max:\n raise ValueError(\n '64 integers are not supported yet by WebGL / vtk.js')\n numpy_array = numpy_array.astype(np.int32)\n data_type = 'Int32Array'\n elif vtk_data_type == 9 or vtk_data_type == 17:\n ui32 = np.iinfo(np.uint32)\n value_range = data_array.GetValueRange()\n if value_range[0] < ui32.min or value_range[1] > ui32.max:\n raise ValueError(\n '64 integers are not supported by WebGL / vtk.js yet')\n numpy_array = numpy_array.astype(np.uint32)\n data_type = 'Uint32Array'\n\n return data_type, numpy_array\n\n\ndef _vtk_data_attributes_to_vtkjs(attributes):\n vtkjs_attributes = {\"vtkClass\": \"vtkDataSetAttributes\"}\n arrays = []\n for array_index in range(attributes.GetNumberOfArrays()):\n vtk_array = attributes.GetArray(array_index)\n data_type, values = _vtk_to_vtkjs(vtk_array)\n array = {\"data\": {\n 'vtkClass': 'vtkDataArray',\n 'name': vtk_array.GetName(),\n 'numberOfComponents': vtk_array.GetNumberOfComponents(),\n 'size': vtk_array.GetSize(),\n 'dataType': data_type,\n 'values': values}}\n scalars = attributes.GetScalars()\n if scalars and scalars.GetName() == vtk_array.GetName():\n vtkjs_attributes['activeScalars'] = array_index\n globalIds = attributes.GetGlobalIds()\n if globalIds and globalIds.GetName() == vtk_array.GetName():\n vtkjs_attributes['activeGlobalIds'] = array_index\n normals = attributes.GetNormals()\n if normals and normals.GetName() == vtk_array.GetName():\n vtkjs_attributes['activeNormals'] = array_index\n pedigreeIds = attributes.GetPedigreeIds()\n if pedigreeIds and pedigreeIds.GetName() == vtk_array.GetName():\n vtkjs_attributes['activePedigreeIds'] = array_index\n tCoords = attributes.GetTCoords()\n if tCoords and tCoords.GetName() == vtk_array.GetName():\n vtkjs_attributes['activeTCoords'] = array_index\n vectors = attributes.GetVectors()\n if vectors and vectors.GetName() == vtk_array.GetName():\n vtkjs_attributes['activeVectors'] = array_index\n arrays.append(array)\n vtkjs_attributes[\"arrays\"] = arrays\n return vtkjs_attributes\n\ndef _numpy_array_to_point_set(point_set_like):\n point_values = np.asarray(point_set_like).astype(np.float32)\n if len(\n point_values.shape) > 1 and point_values.shape[1] == 2 or point_values.shape[1] == 3:\n if not point_values.flags['CONTIGUOUS']:\n point_values = np.ascontiguousarray(point_values)\n if point_values.shape[1] == 2:\n point_values = np.hstack(\n (point_values, -5.0e-6 * np.ones((point_values.shape[0], 1)))).astype(np.float32)\n point_set = {'vtkClass': 'vtkPolyData'}\n points = {'vtkClass': 'vtkPoints',\n 'name': '_points',\n 'numberOfComponents': 3,\n 'dataType': 'Float32Array',\n 'size': point_values.size,\n 'values': point_values}\n point_set['points'] = points\n vert_values = np.ones((point_values.size * 2,), dtype=np.uint32)\n vert_values[1::2] = np.arange(point_values.size)\n verts = {'vtkClass': 'vtkCellArray',\n 'name': '_verts',\n 'numberOfComponents': 1,\n 'dataType': 'Uint32Array',\n 'size': vert_values.size,\n 'values': vert_values}\n point_set['verts'] = verts\n return point_set\n else:\n return None\n\ndef to_itk_image(image_like):\n\n if isinstance(image_like, (itk.Image, itk.VectorImage)):\n return image_like\n\n if is_arraylike(image_like):\n array = np.asarray(image_like)\n can_use_view = array.flags['OWNDATA']\n if have_dask and isinstance(image_like, dask.array.core.Array):\n can_use_view = False\n array = np.ascontiguousarray(array)\n # JavaScript does not support 64-bit integers\n if array.dtype == np.int64:\n array = array.astype(np.float32)\n elif array.dtype == np.uint64:\n array = array.astype(np.float32)\n if can_use_view:\n image_from_array = itk.image_view_from_array(array)\n else:\n image_from_array = itk.image_from_array(array)\n return image_from_array\n\n elif have_vtk and isinstance(image_like, vtk.vtkImageData):\n from vtk.util import numpy_support as vtk_numpy_support\n array = vtk_numpy_support.vtk_to_numpy(\n image_like.GetPointData().GetScalars())\n dims = list(image_like.GetDimensions())\n spacing = list(image_like.GetSpacing())\n origin = list(image_like.GetOrigin())\n\n # Check for zdim==1\n zdim = dims.pop()\n if zdim>1:\n # zdim>1, put it back in the dims array\n dims.append(zdim)\n else:\n #zdim==1, remove z-spacing and z-origin\n spacing.pop()\n origin.pop()\n\n array.shape = dims[::-1]\n image_from_array = itk.image_view_from_array(array)\n image_from_array.SetSpacing(spacing)\n image_from_array.SetOrigin(origin)\n return image_from_array\n\n elif have_simpleitk and isinstance(image_like, sitk.Image):\n array = sitk.GetArrayViewFromImage(image_like)\n image_from_array = itk.image_view_from_array(array)\n image_from_array.SetSpacing(image_like.GetSpacing())\n image_from_array.SetOrigin(image_like.GetOrigin())\n direction = image_like.GetDirection()\n npdirection = np.asarray(direction)\n npdirection = np.reshape(npdirection, (-1, image_like.GetDimension()))\n itkdirection = itk.matrix_from_array(npdirection)\n image_from_array.SetDirection(itkdirection)\n return image_from_array\n\n elif have_imagej:\n import imglyb\n if isinstance(image_like,\n imglyb.util.ReferenceGuardingRandomAccessibleInterval):\n array = imglyb.to_numpy(image_like)\n image_from_array = itk.image_view_from_array(array)\n return image_from_array\n\n elif isinstance(image_like, itk.ProcessObject):\n return itk.output(image_like)\n\n return None\n\n\ndef to_point_set(point_set_like): # noqa: C901\n if isinstance(point_set_like, itk.PointSet):\n if not hasattr(itk, 'PyVectorContainer'):\n raise ImportError(\n 'itk.MeshToPolyDataFilter is not available -- install the itk-meshtopolydata package')\n itk_polydata = itk.mesh_to_poly_data_filter(point_set_like)\n\n point_set = {'vtkClass': 'vtkPolyData'}\n\n points = itk_polydata.GetPoints()\n \n point_template = itk.template(points)\n element_type = point_template[1][1]\n \n # todo: test array_view here and below\n point_values = itk.array_from_vector_container(points)\n\n if len(point_values.shape) > 1 and point_values.shape[1] == 2 or point_values.shape[1] == 3:\n if point_values.shape[1] == 2:\n point_values = np.hstack(\n (point_values, -5.0e-6 * np.ones((point_values.shape[0], 1)))).astype(np.float32)\n points = {'vtkClass': 'vtkPoints',\n 'numberOfComponents': 3,\n 'dataType': 'Float32Array',\n 'size': point_values.size,\n 'values': point_values}\n point_set['points'] = points\n else:\n return None\n\n itk_point_data = itk_polydata.GetPointData()\n if itk_point_data and itk_point_data.Size():\n point_data_template = itk.template(itk_point_data)\n element_type = point_data_template[1][1]\n\n # Make use of functional interface if available\n data = itk.array_from_vector_container(itk_point_data)\n\n data_type, number_of_components = _itk_pixel_to_vtkjs_type_components[element_type]\n point_data = {\n \"vtkClass\": \"vtkDataSetAttributes\",\n \"activeScalars\": 0,\n \"arrays\": [\n {\"data\": {\n 'vtkClass': 'vtkDataArray',\n 'name': 'Point Data',\n 'numberOfComponents': number_of_components,\n 'size': data.size,\n 'dataType': data_type,\n 'values': data}\n }],\n }\n point_set['pointData'] = point_data\n\n return point_set\n elif isinstance(point_set_like, itk.GroupSpatialObject):\n children = point_set_like.GetChildren()\n\n point_set = {'vtkClass': 'vtkPolyData'}\n\n points_list = []\n for ii in range(len(children)):\n child = children[ii]\n down_casted = itk.down_cast(child)\n if isinstance(down_casted, itk.PointBasedSpatialObject):\n n_points = down_casted.GetNumberOfPoints()\n for ii in range(n_points):\n point = down_casted.GetPoint(ii)\n point.SetSpatialObject(down_casted)\n position = point.GetPositionInWorldSpace()\n points_list.append(list(position))\n return _numpy_array_to_point_set(points_list)\n elif is_arraylike(point_set_like):\n return _numpy_array_to_point_set(point_set_like)\n elif have_vtk and isinstance(point_set_like, vtk.vtkPolyData):\n from vtk.util.numpy_support import vtk_to_numpy\n point_set = {'vtkClass': 'vtkPolyData'}\n\n points_data = vtk_to_numpy(point_set_like.GetPoints().GetData())\n points_data = points_data.astype(np.float32).ravel()\n points = {'vtkClass': 'vtkPoints',\n 'name': '_points',\n 'numberOfComponents': 3,\n 'dataType': 'Float32Array',\n 'size': points_data.size,\n 'values': points_data}\n point_set['points'] = points\n\n vtk_verts = point_set_like.GetVerts()\n if vtk_verts.GetNumberOfCells():\n data = vtk_to_numpy(vtk_verts.GetData())\n data = data.astype(np.uint32).ravel()\n cells = {'vtkClass': 'vtkCellArray',\n 'name': '_' + 'verts',\n 'numberOfComponents': 1,\n 'size': data.size,\n 'dataType': 'Uint32Array',\n 'values': data}\n point_set['verts'] = cells\n vtk_point_data = point_set_like.GetPointData()\n if vtk_point_data and vtk_point_data.GetNumberOfArrays():\n vtkjs_point_data = _vtk_data_attributes_to_vtkjs(vtk_point_data)\n point_set['pointData'] = vtkjs_point_data\n\n vtk_cell_data = point_set_like.GetCellData()\n if vtk_cell_data and vtk_cell_data.GetNumberOfArrays():\n vtkjs_cell_data = _vtk_data_attributes_to_vtkjs(vtk_cell_data)\n point_set['cellData'] = vtkjs_cell_data\n\n return point_set\n elif isinstance(point_set_like, zarr.Group):\n return zarr_to_vtkjs(point_set_like)\n\n return None\n\n\ndef to_geometry(geometry_like): # noqa: C901\n if isinstance(geometry_like, itk.Mesh):\n if not hasattr(itk, 'PyVectorContainer'):\n raise ImportError(\n 'itk.MeshToPolyDataFilter is not available -- install the itk-meshtopolydata package')\n itk_polydata = itk.mesh_to_poly_data_filter(geometry_like)\n\n geometry = {'vtkClass': 'vtkPolyData'}\n \n points = itk_polydata.GetPoints()\n point_template = itk.template(points)\n element_type = point_template[1][1]\n \n # todo: test array_view here and below\n # Make use of functional interface if available\n point_values = itk.array_from_vector_container(points)\n\n if len(\n point_values.shape) > 1 and point_values.shape[1] == 2 or point_values.shape[1] == 3:\n if point_values.shape[1] == 2:\n point_values.resize((point_values.shape[0], 3))\n point_values[:, 2] = 0.0\n points = {'vtkClass': 'vtkPoints',\n 'numberOfComponents': 3,\n 'dataType': 'Float32Array',\n 'size': point_values.size,\n 'values': point_values}\n geometry['points'] = points\n else:\n return None\n\n itk_verts = itk_polydata.GetVertices()\n itk_lines = itk_polydata.GetLines()\n itk_polys = itk_polydata.GetPolygons()\n itk_strips = itk_polydata.GetTriangleStrips()\n for cell_type, itk_cells in [('verts', itk_verts), ('lines', itk_lines),\n ('polys', itk_polys), ('strips', itk_strips)]:\n if itk_cells.Size():\n # Make use of functional interface if available\n data = itk.array_from_vector_container(itk_cells)\n cells = {'vtkClass': 'vtkCellArray',\n 'name': '_' + cell_type,\n 'numberOfComponents': 1,\n 'size': data.size,\n 'dataType': 'Uint32Array',\n 'values': data}\n geometry[cell_type] = cells\n \n itk_point_data = itk_polydata.GetPointData()\n if itk_point_data and itk_point_data.Size():\n \n # Template parameter list [identifier_type, element_type]\n point_data_template = itk.template(itk_point_data)\n element_type = point_data_template[1][1]\n\n # Make use of functional interface if available\n data = itk.array_from_vector_container(itk_point_data)\n\n data_type, number_of_components = _itk_pixel_to_vtkjs_type_components[element_type]\n point_data = {\n \"vtkClass\": \"vtkDataSetAttributes\",\n \"activeScalars\": 0,\n \"arrays\": [\n {\"data\": {\n 'vtkClass': 'vtkDataArray',\n 'name': 'Point Data',\n 'numberOfComponents': number_of_components,\n 'size': data.size,\n 'dataType': data_type,\n 'values': data}\n }],\n }\n geometry['pointData'] = point_data\n itk_cell_data = itk_polydata.GetCellData()\n if itk_cell_data and itk_cell_data.Size():\n point_data_template = itk.template(itk_point_data)\n element_type = point_data_template[1][1]\n\n data = itk.array_from_vector_container(itk_cell_data)\n\n data_type, number_of_components = _itk_pixel_to_vtkjs_type_components[element_type]\n cell_data = {\n \"vtkClass\": \"vtkDataSetAttributes\",\n \"activeScalars\": 0,\n \"arrays\": [\n {\"data\": {\n 'vtkClass': 'vtkDataArray',\n 'name': 'Cell Data',\n 'numberOfComponents': number_of_components,\n 'size': data.size,\n 'dataType': data_type,\n 'values': data}\n }],\n }\n geometry['cellData'] = cell_data\n\n return geometry\n elif isinstance(geometry_like, itk.PolyLineParametricPath):\n vertex_list = geometry_like.GetVertexList()\n number_of_points = vertex_list.Size()\n geometry = {'vtkClass': 'vtkPolyData'}\n\n points_data = -5.0e-6 * \\\n np.ones((number_of_points, 3), dtype=np.float64)\n dimension = len(vertex_list.GetElement(0))\n # Todo: replace with itk.PyVectorContainer direct NumPy conversion\n for index in range(number_of_points):\n points_data[index, :dimension] = vertex_list.GetElement(index)\n points_data = points_data.astype(np.float32).ravel()\n points = {'vtkClass': 'vtkPoints',\n 'name': '_points',\n 'numberOfComponents': 3,\n 'dataType': 'Float32Array',\n 'size': points_data.size,\n 'values': points_data}\n geometry['points'] = points\n\n verts_data = np.ones((2 * number_of_points,), dtype=np.uint32)\n verts_data[1::2] = np.arange(number_of_points, dtype=np.uint32)\n\n lines_data = 2 * \\\n np.ones((3 * (number_of_points - 1),), dtype=np.uint32)\n lines_data[1::3] = np.arange(number_of_points - 1, dtype=np.uint32)\n lines_data[2::3] = np.arange(1, number_of_points, dtype=np.uint32)\n\n # For cell_type, cell_data in [('verts', verts_data),]:\n for cell_type, cell_data in [\n ('verts', verts_data), ('lines', lines_data)]:\n cells = {'vtkClass': 'vtkCellArray',\n 'name': '_' + cell_type,\n 'numberOfComponents': 1,\n 'size': cell_data.size,\n 'dataType': 'Uint32Array',\n 'values': cell_data}\n geometry[cell_type] = cells\n\n return geometry\n elif have_skan and isinstance(geometry_like, skan.csr.Skeleton):\n\n geometry = {'vtkClass': 'vtkPolyData'}\n\n number_of_points = geometry_like.coordinates.shape[0]\n dimension = geometry_like.coordinates.shape[1]\n\n points_data = -5.0e-6 * \\\n np.ones((number_of_points, 3), dtype=np.float64)\n points_data[:, :dimension] = np.flip(geometry_like.coordinates[:, :dimension], 1)\n points_data = points_data.astype(np.float32).ravel()\n points = {'vtkClass': 'vtkPoints',\n 'name': '_points',\n 'numberOfComponents': 3,\n 'dataType': 'Float32Array',\n 'size': points_data.size,\n 'values': points_data}\n geometry['points'] = points\n\n verts_data = np.empty((0,), dtype=np.uint32)\n lines_data = np.empty((0,), dtype=np.uint32)\n for path in geometry_like.paths_list():\n path_number_of_points = len(path)\n verts = np.ones((2 * path_number_of_points,), dtype=np.uint32)\n verts[1::2] = np.array(path, dtype=np.uint32)\n verts_data = np.concatenate((verts_data, verts))\n\n lines = 2 * \\\n np.ones((3 * (path_number_of_points - 1),), dtype=np.uint32)\n lines[1::3] = np.array(path[:-1], dtype=np.uint32)\n lines[2::3] = np.array(path[1:], dtype=np.uint32)\n lines_data = np.concatenate((lines_data, lines))\n\n for cell_type, cell_data in [\n ('verts', verts_data), ('lines', lines_data)]:\n cells = {'vtkClass': 'vtkCellArray',\n 'name': '_' + cell_type,\n 'numberOfComponents': 1,\n 'size': cell_data.size,\n 'dataType': 'Uint32Array',\n 'values': cell_data}\n geometry[cell_type] = cells\n\n return geometry\n elif have_vtk and isinstance(geometry_like, vtk.vtkPolyData):\n from vtk.util.numpy_support import vtk_to_numpy\n\n geometry = {'vtkClass': 'vtkPolyData'}\n\n points_data = vtk_to_numpy(geometry_like.GetPoints().GetData())\n points_data = points_data.astype(np.float32).ravel()\n points = {'vtkClass': 'vtkPoints',\n 'name': '_points',\n 'numberOfComponents': 3,\n 'dataType': 'Float32Array',\n 'size': points_data.size,\n 'values': points_data}\n geometry['points'] = points\n\n vtk_verts = geometry_like.GetVerts()\n vtk_lines = geometry_like.GetLines()\n vtk_polys = geometry_like.GetPolys()\n vtk_strips = geometry_like.GetStrips()\n for cell_type, vtk_cells in [('verts', vtk_verts), ('lines', vtk_lines),\n ('polys', vtk_polys), ('strips', vtk_strips)]:\n if vtk_cells.GetNumberOfCells():\n data = vtk_to_numpy(vtk_cells.GetData())\n data = data.astype(np.uint32).ravel()\n cells = {'vtkClass': 'vtkCellArray',\n 'name': '_' + cell_type,\n 'numberOfComponents': 1,\n 'size': data.size,\n 'dataType': 'Uint32Array',\n 'values': data}\n geometry[cell_type] = cells\n vtk_point_data = geometry_like.GetPointData()\n if vtk_point_data and vtk_point_data.GetNumberOfArrays():\n vtkjs_point_data = _vtk_data_attributes_to_vtkjs(vtk_point_data)\n geometry['pointData'] = vtkjs_point_data\n\n vtk_cell_data = geometry_like.GetCellData()\n if vtk_cell_data and vtk_cell_data.GetNumberOfArrays():\n vtkjs_cell_data = _vtk_data_attributes_to_vtkjs(vtk_cell_data)\n geometry['cellData'] = vtkjs_cell_data\n\n return geometry\n elif have_vtk and isinstance(geometry_like, (vtk.vtkUnstructuredGrid,\n vtk.vtkStructuredGrid,\n vtk.vtkRectilinearGrid,\n vtk.vtkImageData)):\n geometry_filter = vtk.vtkGeometryFilter()\n geometry_filter.SetInputData(geometry_like)\n geometry_filter.Update()\n geometry = to_geometry(geometry_filter.GetOutput())\n return geometry\n elif isinstance(geometry_like, zarr.Group):\n return zarr_to_vtkjs(geometry_like)\n\n return None\n" ]
[ [ "numpy.asarray", "numpy.arange", "numpy.ascontiguousarray", "numpy.ones", "numpy.concatenate", "numpy.iinfo", "numpy.array", "numpy.flip", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RyanHonwad/Two-Samples-in-GANs
[ "190853e56d6e7843c0d991dc64658cfb3311135e" ]
[ "stacked_MNIST_experiments/unrolled_GAN_experiment/D=0.5G/utils.py" ]
[ "\"\"\"\nSome codes from https://github.com/Newmu/dcgan_code\n\"\"\"\nfrom __future__ import division\nimport math\nimport json\nimport random\nimport pprint\nimport scipy.misc\nimport numpy as np\nfrom time import gmtime, strftime\nfrom six.moves import xrange\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\npp = pprint.PrettyPrinter()\n\nget_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1])\n\ndef show_all_variables():\n model_vars = tf.trainable_variables()\n slim.model_analyzer.analyze_vars(model_vars, print_info=True)\n\ndef get_image(image_path, input_height, input_width,\n resize_height=64, resize_width=64,\n crop=True, grayscale=False):\n image = imread(image_path, grayscale)\n return transform(image, input_height, input_width,\n resize_height, resize_width, crop)\n\ndef save_images(images, size, image_path):\n return imsave(inverse_transform(images), size, image_path)\n\ndef imread(path, grayscale = False):\n if (grayscale):\n return scipy.misc.imread(path, flatten = True).astype(np.float)\n else:\n return scipy.misc.imread(path).astype(np.float)\n\ndef merge_images(images, size):\n return inverse_transform(images)\n\ndef merge(images, size):\n h, w = images.shape[1], images.shape[2]\n if (images.shape[3] in (3,4)):\n c = images.shape[3]\n img = np.zeros((h * size[0], w * size[1], c))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j * h:j * h + h, i * w:i * w + w, :] = image\n return img\n elif images.shape[3]==1:\n img = np.zeros((h * size[0], w * size[1]))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]\n return img\n else:\n raise ValueError('in merge(images,size) images parameter '\n 'must have dimensions: HxW or HxWx3 or HxWx4')\n\ndef imsave(images, size, path):\n image = np.squeeze(merge(images, size))\n return scipy.misc.imsave(path, image)\n\ndef center_crop(x, crop_h, crop_w,\n resize_h=64, resize_w=64):\n if crop_w is None:\n crop_w = crop_h\n h, w = x.shape[:2]\n j = int(round((h - crop_h)/2.))\n i = int(round((w - crop_w)/2.))\n return scipy.misc.imresize(\n x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])\n\ndef transform(image, input_height, input_width, \n resize_height=64, resize_width=64, crop=True):\n if crop:\n cropped_image = center_crop(\n image, input_height, input_width, \n resize_height, resize_width)\n else:\n cropped_image = scipy.misc.imresize(image, [resize_height, resize_width])\n return np.array(cropped_image)/127.5 - 1.\n\ndef inverse_transform(images):\n return (images+1.)/2.\n\ndef to_json(output_path, *layers):\n with open(output_path, \"w\") as layer_f:\n lines = \"\"\n for w, b, bn in layers:\n layer_idx = w.name.split('/')[0].split('h')[1]\n\n B = b.eval()\n\n if \"lin/\" in w.name:\n W = w.eval()\n depth = W.shape[1]\n else:\n W = np.rollaxis(w.eval(), 2, 0)\n depth = W.shape[0]\n\n biases = {\"sy\": 1, \"sx\": 1, \"depth\": depth, \"w\": ['%.2f' % elem for elem in list(B)]}\n if bn != None:\n gamma = bn.gamma.eval()\n beta = bn.beta.eval()\n\n gamma = {\"sy\": 1, \"sx\": 1, \"depth\": depth, \"w\": ['%.2f' % elem for elem in list(gamma)]}\n beta = {\"sy\": 1, \"sx\": 1, \"depth\": depth, \"w\": ['%.2f' % elem for elem in list(beta)]}\n else:\n gamma = {\"sy\": 1, \"sx\": 1, \"depth\": 0, \"w\": []}\n beta = {\"sy\": 1, \"sx\": 1, \"depth\": 0, \"w\": []}\n\n if \"lin/\" in w.name:\n fs = []\n for w in W.T:\n fs.append({\"sy\": 1, \"sx\": 1, \"depth\": W.shape[0], \"w\": ['%.2f' % elem for elem in list(w)]})\n\n lines += \"\"\"\n var layer_%s = {\n \"layer_type\": \"fc\", \n \"sy\": 1, \"sx\": 1, \n \"out_sx\": 1, \"out_sy\": 1,\n \"stride\": 1, \"pad\": 0,\n \"out_depth\": %s, \"in_depth\": %s,\n \"biases\": %s,\n \"gamma\": %s,\n \"beta\": %s,\n \"filters\": %s\n };\"\"\" % (layer_idx.split('_')[0], W.shape[1], W.shape[0], biases, gamma, beta, fs)\n else:\n fs = []\n for w_ in W:\n fs.append({\"sy\": 5, \"sx\": 5, \"depth\": W.shape[3], \"w\": ['%.2f' % elem for elem in list(w_.flatten())]})\n\n lines += \"\"\"\n var layer_%s = {\n \"layer_type\": \"deconv\", \n \"sy\": 5, \"sx\": 5,\n \"out_sx\": %s, \"out_sy\": %s,\n \"stride\": 2, \"pad\": 1,\n \"out_depth\": %s, \"in_depth\": %s,\n \"biases\": %s,\n \"gamma\": %s,\n \"beta\": %s,\n \"filters\": %s\n };\"\"\" % (layer_idx, 2**(int(layer_idx)+2), 2**(int(layer_idx)+2),\n W.shape[0], W.shape[3], biases, gamma, beta, fs)\n layer_f.write(\" \".join(lines.replace(\"'\",\"\").split()))\n\ndef make_gif(images, fname, duration=2, true_image=False):\n import moviepy.editor as mpy\n\n def make_frame(t):\n try:\n x = images[int(len(images)/duration*t)]\n except:\n x = images[-1]\n\n if true_image:\n return x.astype(np.uint8)\n else:\n return ((x+1)/2*255).astype(np.uint8)\n\n clip = mpy.VideoClip(make_frame, duration=duration)\n clip.write_gif(fname, fps = len(images) / duration)\n\ndef visualize(sess, dcgan, config, option):\n image_frame_dim = int(math.ceil(config.batch_size**.5))\n if option == 0:\n z_sample = np.random.uniform(-0.5, 0.5, size=(config.batch_size, dcgan.z_dim))\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z[0]: z_sample})\n save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_%s.png' % strftime(\"%Y%m%d%H%M%S\", gmtime()))\n elif option == 1:\n values = np.arange(0, 1, 1./config.batch_size)\n for idx in xrange(100):\n print(\" [*] %d\" % idx)\n z_sample = np.zeros([config.batch_size, dcgan.z_dim])\n for kdx, z in enumerate(z_sample):\n z[idx] = values[kdx]\n\n if config.dataset == \"mnist\":\n y = np.random.choice(10, config.batch_size)\n y_one_hot = np.zeros((config.batch_size, 10))\n y_one_hot[np.arange(config.batch_size), y] = 1\n\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})\n else:\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})\n\n save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_arange_%s.png' % (idx))\n elif option == 2:\n values = np.arange(0, 1, 1./config.batch_size)\n for idx in [random.randint(0, 99) for _ in xrange(100)]:\n print(\" [*] %d\" % idx)\n z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))\n z_sample = np.tile(z, (config.batch_size, 1))\n #z_sample = np.zeros([config.batch_size, dcgan.z_dim])\n for kdx, z in enumerate(z_sample):\n z[idx] = values[kdx]\n\n if config.dataset == \"mnist\":\n y = np.random.choice(10, config.batch_size)\n y_one_hot = np.zeros((config.batch_size, 10))\n y_one_hot[np.arange(config.batch_size), y] = 1\n\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})\n else:\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})\n\n try:\n make_gif(samples, './samples/test_gif_%s.gif' % (idx))\n except:\n save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_%s.png' % strftime(\"%Y%m%d%H%M%S\", gmtime()))\n elif option == 3:\n values = np.arange(0, 1, 1./config.batch_size)\n for idx in xrange(100):\n print(\" [*] %d\" % idx)\n z_sample = np.zeros([config.batch_size, dcgan.z_dim])\n for kdx, z in enumerate(z_sample):\n z[idx] = values[kdx]\n\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})\n make_gif(samples, './samples/test_gif_%s.gif' % (idx))\n elif option == 4:\n image_set = []\n values = np.arange(0, 1, 1./config.batch_size)\n\n for idx in xrange(100):\n print(\" [*] %d\" % idx)\n z_sample = np.zeros([config.batch_size, dcgan.z_dim])\n for kdx, z in enumerate(z_sample): z[idx] = values[kdx]\n\n image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))\n make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))\n\n new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) \\\n for idx in range(64) + range(63, -1, -1)]\n make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)\n\n\ndef image_manifold_size(num_images):\n manifold_h = int(np.floor(np.sqrt(num_images)))\n manifold_w = int(np.ceil(np.sqrt(num_images)))\n assert manifold_h * manifold_w == num_images\n return manifold_h, manifold_w\n" ]
[ [ "numpy.sqrt", "numpy.random.choice", "numpy.arange", "numpy.tile", "tensorflow.trainable_variables", "numpy.random.uniform", "numpy.array", "tensorflow.contrib.slim.model_analyzer.analyze_vars", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
spencerahill/aospy
[ "6c8df45705927476e140df903bcb88e5abadae22" ]
[ "aospy/test/test_utils_vertcoord.py" ]
[ "#!/usr/bin/env python\n\"\"\"Test suite for aospy.utils module.\"\"\"\nimport sys\nimport unittest\n\nimport numpy as np\n\nimport aospy.utils.vertcoord as vertcoord\n\n\nclass AospyUtilsTestCase(unittest.TestCase):\n def setUp(self):\n self.p_in_hpa = np.array([1000, 925, 850, 775, 700, 600, 500, 400, 300,\n 200, 150, 100, 70, 50, 30, 20, 10],\n dtype=np.float64)\n self.p_in_pa = self.p_in_hpa*1e2\n self.p_top = 0\n self.p_bot = 1.1e5\n self.p_edges = 0.5*(self.p_in_pa[1:] + 0.5*self.p_in_pa[:-1])\n self.phalf = np.concatenate(([self.p_bot], self.p_edges, [self.p_top]))\n\n def tearDown(self):\n pass\n\n\nclass TestUtils(AospyUtilsTestCase):\n def test_to_pascal_scalar_positive(self):\n self.assertEqual(vertcoord.to_pascal(1e2), 1e4)\n self.assertEqual(vertcoord.to_pascal(1e5), 1e5)\n\n def test_to_pascal_scalar_negative(self):\n self.assertEqual(vertcoord.to_pascal(-1e2), -1e4)\n self.assertEqual(vertcoord.to_pascal(-1e5), -1e5)\n\n def test_to_pascal_array(self):\n np.testing.assert_array_equal(vertcoord.to_pascal(self.p_in_hpa),\n self.p_in_pa)\n np.testing.assert_array_equal(vertcoord.to_pascal(self.p_in_pa),\n self.p_in_pa)\n\n\nif __name__ == '__main__':\n sys.exit(unittest.main())\n" ]
[ [ "numpy.concatenate", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
prakashcc/datacamp-python-data-science-track
[ "8d35b2d78e5f923c7320e33bfc7b038556efe30a" ]
[ "10.Merging DataFrames with pandas/Chapter 1 - Preparing data.py" ]
[ "#Chapter 1 - Preparing data\r\n#--------------------------------------------------------------------------------------------#\r\n\r\n\r\n#Reading DataFrames from multiple files in a loop\r\n# Import pandas\r\nimport pandas as pd\r\n\r\n# Create the list of file names: filenames\r\nfilenames = ['Gold.csv', 'Silver.csv', 'Bronze.csv']\r\n\r\n# Create the list of three DataFrames: dataframes\r\ndataframes = []\r\nfor filename in filenames :\r\n dataframes.append(pd.read_csv(filename))\r\n\r\n# Print top 5 rows of 1st DataFrame in dataframes\r\nprint(dataframes[0].head())\r\n\r\n\r\n#--------------------------------------------------------------------------------------------#\r\n#Combining DataFrames from multiple data files\r\n\r\n\r\n# Import pandas\r\nimport pandas as pd\r\n\r\n# Make a copy of gold: medals\r\nmedals = gold.copy()\r\n\r\n# Create list of new column labels: new_labels\r\nnew_labels = ['NOC', 'Country', 'Gold']\r\n\r\n# Rename the columns of medals using new_labels\r\nmedals.columns = new_labels\r\n\r\n# Add columns 'Silver' & 'Bronze' to medals\r\nmedals['Silver'] = silver['Total']\r\nmedals['Bronze'] = bronze['Total']\r\n\r\n# Print the head of medals\r\nprint(medals.head())\r\n\r\n\r\n#--------------------------------------------------------------------------------------------#\r\n\r\n#Sorting DataFrame with the Index & columns\r\n# Import pandas\r\nimport pandas as pd\r\n\r\n# Read 'monthly_max_temp.csv' into a DataFrame: weather1\r\nweather1 = pd.read_csv('monthly_max_temp.csv', index_col='Month')\r\n\r\n# Print the head of weather1\r\nprint(weather1.head())\r\n\r\n# Sort the index of weather1 in alphabetical order: weather2\r\nweather2 = weather1.sort_index()\r\n\r\n# Print the head of weather2\r\nprint(weather2.head())\r\n\r\n# Sort the index of weather1 in reverse alphabetical order: weather3\r\nweather3 = weather1.sort_index(ascending=False)\r\n\r\n# Print the head of weather3\r\nprint(weather3.head())\r\n\r\n# Sort weather1 numerically using the values of 'Max TemperatureF': weather4\r\nweather4 = weather1.sort_values('Max TemperatureF')\r\n\r\n# Print the head of weather4\r\nprint(weather4.head())\r\n\r\n\r\n#--------------------------------------------------------------------------------------------#\r\n\r\n##Reindexing DataFrame from a list\r\n\r\n# Import pandas\r\nimport pandas as pd\r\n\r\n# Reindex weather1 using the list year: weather2\r\nweather2 = weather1.reindex(year)\r\n\r\n# Print weather2\r\nprint(weather2)\r\n\r\n# Reindex weather1 using the list year with forward-fill: weather3\r\nweather3 = weather1.reindex(year).ffill()\r\n\r\n# Print weather3\r\nprint(weather3)\r\n\r\n\r\n\r\n\r\n\r\n\r\n#--------------------------------------------------------------------------------------------#\r\n\r\n#Reindexing using another DataFrame Index\r\n# Import pandas\r\nimport pandas as pd\r\n\r\n# Reindex names_1981 with index of names_1881: common_names\r\ncommon_names = names_1981.reindex(names_1881.index)\r\n\r\n# Print shape of common_names\r\nprint(common_names.shape)\r\n\r\n# Drop rows with null counts: common_names\r\ncommon_names = common_names.dropna()\r\n\r\n# Print shape of new common_names\r\nprint(common_names.shape)\r\n\r\n\r\n#--------------------------------------------------------------------------------------------#\r\n#Broadcasting in arithmetic formulas\r\n# Extract selected columns from weather as new DataFrame: temps_f\r\ntemps_f = weather[['Min TemperatureF','Mean TemperatureF','Max TemperatureF']]\r\n\r\n# Convert temps_f to celsius: temps_c\r\ntemps_c = (temps_f - 32) * 5/9\r\n\r\n# Rename 'F' in column names with 'C': temps_c.columns\r\ntemps_c.columns = temps_c.columns.str.replace('F', 'C')\r\n\r\n# Print first 5 rows of temps_c\r\nprint(temps_c.head())\r\n\r\n\r\n#--------------------------------------------------------------------------------------------#\r\n#Computing percentage growth of GDP\r\nimport pandas as pd\r\n\r\n# Read 'GDP.csv' into a DataFrame: gdp\r\ngdp = pd.read_csv('GDP.csv', index_col='DATE', parse_dates=True)\r\n\r\n# Slice all the gdp data from 2008 onward: post2008\r\npost2008 = gdp['2008':]\r\n\r\n# Print the last 8 rows of post2008\r\nprint(post2008.tail(8))\r\n\r\n# Resample post2008 by year, keeping last(): yearly\r\nyearly = post2008.resample('A').last()\r\n\r\n# Print yearly\r\nprint(yearly)\r\n\r\n# Compute percentage growth of yearly: yearly['growth']\r\nyearly['growth'] = yearly.pct_change() * 100\r\n\r\n# Print yearly again\r\nprint(yearly)\r\n\r\n\r\n\r\n\r\n#--------------------------------------------------------------------------------------------#\r\n#Converting currency of stocks\r\n# Import pandas\r\nimport pandas as pd\r\n\r\n# Read 'sp500.csv' into a DataFrame: sp500\r\nsp500 = pd.read_csv('sp500.csv',index_col='Date', parse_dates=True)\r\n\r\n# Read 'exchange.csv' into a DataFrame: exchange\r\nexchange = pd.read_csv('exchange.csv',index_col='Date', parse_dates=True)\r\n\r\n# Subset 'Open' & 'Close' columns from sp500: dollars\r\ndollars = sp500[['Open','Close']]\r\n\r\n# Print the head of dollars\r\nprint(dollars.head())\r\n\r\n# Convert dollars to pounds: pounds\r\npounds = dollars.multiply(exchange['GBP/USD'], axis='rows')\r\n\r\n# Print the head of pounds\r\nprint(pounds.head())\r\n\r\n\r\n\r\n#--------------------------------------------------------------------------------------------#\r\n\r\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
zalayo/Hierarchical-Linkage-Regression
[ "0d6eb11affb3dbf452cd6ded6d564862f9ccb55f", "0d6eb11affb3dbf452cd6ded6d564862f9ccb55f" ]
[ "HLRdata.py", "libHLR.py" ]
[ "\r\nimport numpy as np\r\nfrom libHLR import randclust as rclust\r\n\r\n## set paths\r\ninputpath = './input/'\r\n\r\n## Generate Random Clustered Data for training and testing\r\n################################################################################\r\n## Total number of clustering instances\r\nK = 100\r\n## maximum number of clusters allowed per clustering instance:\r\nmaxclust = 30\r\n## embedding dimension of data (i.e. feature dimensionality)\r\ndim = 50\r\n## maximum span of cluster centers (applied to all axes):\r\n# lower bound\r\nlb = -10\r\n# upper bound\r\nub = 10\r\n## bounds on cluster standard deviation (applied to all axes):\r\n# lower bound\r\nsdl = 0.25\r\n# upper bound\r\nsdu = 2.5\r\n## minimum number of samples allowed per cluster\r\nminsamples = 10\r\n## maximum number of samples allowed per cluster\r\nmaxsamples = 500\r\n################################################################################\r\n\r\n# seed random generator\r\nnp.random.seed()\r\n\r\n# initialize HLR random cluster object\r\nxRand = rclust(K)\r\n\r\n# set user-defined parameters\r\nxRand.maxclust = maxclust\r\nxRand.dim = dim\r\nxRand.lb = lb\r\nxRand.ub = ub\r\nxRand.sdl = sdl\r\nxRand.sdu = sdu\r\nxRand.minsamples = minsamples\r\nxRand.maxsamples = maxsamples\r\n\r\n# generate random clustered dataset\r\n# X = clustered data\r\n# y = ground truth cluster number\r\n# kx = clustering instance index\r\n# cx = cluster index which rolls over for each clustering instance\r\nX, y, kx, cx = xRand.dataset()\r\n\r\n# save generated data\r\nnp.savetxt(inputpath + 'X.txt', X, fmt='%.8f', delimiter='\\t', newline='\\n')\r\nnp.savetxt(inputpath + 'kx.txt', kx, fmt='%d')\r\nnp.savetxt(inputpath + 'cx.txt', cx, fmt='%d')\r\nnp.savetxt(inputpath + 'y.txt', y, fmt='%d')\r\n", "import os\r\nimport numpy as np\r\nfrom sklearn.cluster import AgglomerativeClustering as agglom\r\n\r\n\r\nclass randclust:\r\n ## Generate Random Clustered Data for training and testing\r\n ############################################################################\r\n ## Total number of clustering instances\r\n K = 100\r\n ## maximum number of clusters allowed per clustering instance:\r\n maxclust = 30\r\n ## embedding dimension of data (i.e. feature dimensionality)\r\n dim = 50\r\n ## maximum span of cluster centers (applied to all axes):\r\n # lower bound\r\n lb = -10\r\n # upper bound\r\n ub = 10\r\n ## bounds on cluster standard deviation (applied to all axes):\r\n # lower bound\r\n sdl = 0.25\r\n # upper bound\r\n sdu = 2.5\r\n ## minimum number of samples allowed per cluster\r\n minsamples = 20\r\n ## maximum number of samples allowed per cluster\r\n maxsamples = 500\r\n ############################################################################\r\n\r\n # instantiate object with total number of clustering intances\r\n def __init__(self, value):\r\n np.random.seed()\r\n self.K = value\r\n\r\n # generate ith cluster in kth instance\r\n def cluster(self, clustsz, i):\r\n xkc = np.random.uniform(self.lb, self.ub, size = (1,self.dim))\r\n sdk = np.random.uniform(self.sdl, self.sdu, size = (1,self.dim))\r\n xki = np.multiply(np.random.randn(clustsz[0], self.dim), sdk)\r\n Xki = xkc + xki\r\n indx_ki = (i+1)*np.ones((clustsz[0],))\r\n return Xki, indx_ki\r\n\r\n # generate kth clustering instance\r\n def instance(self, k):\r\n clustnum_k = np.random.randint(1, self.maxclust, size=1)\r\n Xk = np.asarray([])\r\n indx_i = np.asarray([])\r\n for i in range(clustnum_k[0]):\r\n clustsz = np.random.randint(self.minsamples,\r\n self.maxsamples, size=1)\r\n Xki, indx_ki = self.cluster(clustsz, i)\r\n if i != 0:\r\n Xk = np.vstack((Xk, Xki))\r\n if i == 0:\r\n Xk = Xki\r\n indx_i = np.concatenate((indx_i, indx_ki))\r\n indx_k = (k+1)*np.ones((np.shape(Xk)[0],))\r\n return Xk, indx_k, indx_i, clustnum_k\r\n\r\n # generate dataset of K clustering instances\r\n def dataset(self):\r\n X, y, kx, cx = np.asarray([]), np.asarray([]), \\\r\n np.asarray([]), np.asarray([])\r\n for k in range(self.K):\r\n Xk, indx_k, indx_i, clustnum_k = self.instance(k)\r\n if k != 0:\r\n X = np.vstack((X, Xk))\r\n if k == 0:\r\n X = Xk\r\n y = np.concatenate((y, clustnum_k))\r\n kx = np.concatenate((kx, indx_k))\r\n cx = np.concatenate((cx, indx_i))\r\n y = np.asarray(y)\r\n return X, y, kx, cx\r\n\r\n\r\nclass linkages:\r\n ## Generate Hierarchical Linkage Features for Regression\r\n ############################################################################\r\n # Bin count for 2D histogram\r\n R = 40\r\n # Distance metric (e.g. 'manhattan' or 'euclidean')\r\n distance = 'manhattan'\r\n # Link type (e.g. 'complete' or 'ward')\r\n linktype = 'complete'\r\n # Definition of epsilon\r\n eps = np.finfo(np.float).eps\r\n ###########################################################################\r\n\r\n def __init__(self, distance, linktype):\r\n # instantiate linkage model\r\n self.linkage = agglom(affinity = self.distance,\r\n linkage = self.linktype,\r\n compute_full_tree = 'auto',\r\n connectivity = None,\r\n distance_threshold = None,\r\n memory = None,\r\n n_clusters = 1)\r\n\r\n def get(self,X):\r\n # Generate linkage hierarchy and extract feature vector\r\n # Number of samples in X\r\n sample_num = np.shape(X)[0]\r\n # Generate linkage hierarchy\r\n self.linkage.fit(X)\r\n # Get internal node coordinates of linkage tree\r\n Z = np.asarray(self.linkage.children_ + 1)\r\n Z.reshape((self.linkage.n_leaves_ - 1, 2))\r\n # Generate 2D histogram of linkage coordinates\r\n Nz, nx, ny = np.histogram2d(Z[:,0], Z[:,1], bins = self.R,\r\n density = False)\r\n # Normalize histogram and tag entries with eps\r\n Nz = Nz/sample_num + self.eps\r\n # Dimension reduce to R(R+1)/2 (omit zero values below diagonal of Nz)\r\n Nz = np.triu(Nz, k = 0)\r\n L = Nz[Nz != 0]\r\n return L\r\n" ]
[ [ "numpy.savetxt", "numpy.random.seed" ], [ "numpy.random.seed", "numpy.asarray", "numpy.finfo", "numpy.ones", "numpy.concatenate", "numpy.random.randn", "numpy.shape", "numpy.histogram2d", "numpy.triu", "numpy.random.uniform", "sklearn.cluster.AgglomerativeClustering", "numpy.vstack", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
silverriver/AdaLabel
[ "baefd765d79d90869ed4d28f76418c1a39eb0ae8" ]
[ "onmt/utils/loss.py" ]
[ "\"\"\"\nThis includes: LossComputeBase and the standard NMTLossCompute, and\n sharded loss compute stuff.\n\"\"\"\nfrom __future__ import division\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport onmt\nfrom onmt.modules.sparse_losses import SparsemaxLoss\nfrom onmt.modules.sparse_activations import LogSparsemax\n\n\ndef build_loss_compute(model, tgt_field, opt, train=True):\n \"\"\"\n Returns a LossCompute subclass which wraps around an nn.Module subclass\n (such as nn.NLLLoss) which defines the loss criterion. The LossCompute\n object allows this loss to be computed in shards and passes the relevant\n data to a Statistics object which handles training/validation logging.\n Currently, the NMTLossCompute class handles all loss computation except\n for when using a copy mechanism.\n \"\"\"\n device = torch.device(\"cuda\" if onmt.utils.misc.use_gpu(opt) else \"cpu\")\n\n padding_idx = tgt_field.vocab.stoi[tgt_field.pad_token]\n unk_idx = tgt_field.vocab.stoi[tgt_field.unk_token]\n\n if opt.lambda_coverage != 0:\n assert opt.coverage_attn, \"--coverage_attn needs to be set in \" \\\n \"order to use --lambda_coverage != 0\"\n\n if opt.copy_attn:\n criterion = onmt.modules.CopyGeneratorLoss(\n len(tgt_field.vocab), opt.copy_attn_force,\n unk_index=unk_idx, ignore_index=padding_idx\n )\n elif opt.label_smoothing > 0 and train:\n criterion = LabelSmoothingLoss(\n opt.label_smoothing, len(tgt_field.vocab), ignore_index=padding_idx\n )\n elif opt.adalab and train:\n eos_idx = tgt_field.vocab.stoi[tgt_field.eos_token]\n criterion = AdaLabLoss(\n len(tgt_field.vocab),\n opt.batch_size, ignore_index=padding_idx, reduction='sum',\n temperature=opt.ada_temp, eos_index=eos_idx\n )\n elif isinstance(model.generator[-1], LogSparsemax):\n criterion = SparsemaxLoss(ignore_index=padding_idx, reduction='sum')\n else:\n criterion = nn.NLLLoss(ignore_index=padding_idx, reduction='sum')\n\n # if the loss function operates on vectors of raw logits instead of\n # probabilities, only the first part of the generator needs to be\n # passed to the NMTLossCompute. At the moment, the only supported\n # loss function of this kind is the sparsemax loss.\n use_raw_logits = isinstance(criterion, SparsemaxLoss)\n loss_gen = model.generator[0] if use_raw_logits else model.generator\n if opt.copy_attn:\n compute = onmt.modules.CopyGeneratorLossCompute(\n criterion, loss_gen, tgt_field.vocab, opt.copy_loss_by_seqlength,\n lambda_coverage=opt.lambda_coverage\n )\n else:\n bidecoder_loss_gen = model.bidecoder_generator\n compute = AdaLabLossCompute(\n criterion, loss_gen, bidecoder_loss_gen, lambda_coverage=opt.lambda_coverage)\n # compute = NMTLossCompute(\n # criterion, loss_gen, lambda_coverage=opt.lambda_coverage)\n compute.to(device)\n\n return compute\n\n\nclass LossComputeBase(nn.Module):\n \"\"\"\n Class for managing efficient loss computation. Handles\n sharding next step predictions and accumulating multiple\n loss computations\n\n Users can implement their own loss computation strategy by making\n subclass of this one. Users need to implement the _compute_loss()\n and make_shard_state() methods.\n\n Args:\n generator (:obj:`nn.Module`) :\n module that maps the output of the decoder to a\n distribution over the target vocabulary.\n tgt_vocab (:obj:`Vocab`) :\n torchtext vocab object representing the target output\n normalzation (str): normalize by \"sents\" or \"tokens\"\n \"\"\"\n\n def __init__(self, criterion, generator):\n super(LossComputeBase, self).__init__()\n self.criterion = criterion\n self.generator = generator\n\n @property\n def padding_idx(self):\n return self.criterion.ignore_index\n\n def _make_shard_state(self, batch, output, range_, attns=None):\n \"\"\"\n Make shard state dictionary for shards() to return iterable\n shards for efficient loss computation. Subclass must define\n this method to match its own _compute_loss() interface.\n Args:\n batch: the current batch.\n output: the predict output from the model.\n range_: the range of examples for computing, the whole\n batch or a trunc of it?\n attns: the attns dictionary returned from the model.\n \"\"\"\n return NotImplementedError\n\n def _compute_loss(self, batch, output, target, **kwargs):\n \"\"\"\n Compute the loss. Subclass must define this method.\n\n Args:\n\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n **kwargs(optional): additional info for computing loss.\n \"\"\"\n return NotImplementedError\n\n def __call__(self,\n batch,\n output,\n attns,\n normalization=1.0,\n shard_size=0,\n trunc_start=0,\n trunc_size=None):\n \"\"\"Compute the forward loss, possibly in shards in which case this\n method also runs the backward pass and returns ``None`` as the loss\n value.\n\n Also supports truncated BPTT for long sequences by taking a\n range in the decoder output sequence to back propagate in.\n Range is from `(trunc_start, trunc_start + trunc_size)`.\n\n Note sharding is an exact efficiency trick to relieve memory\n required for the generation buffers. Truncation is an\n approximate efficiency trick to relieve the memory required\n in the RNN buffers.\n\n Args:\n batch (batch) : batch of labeled examples\n output (:obj:`FloatTensor`) :\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict) : dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n normalization: Optional normalization factor.\n shard_size (int) : maximum number of examples in a shard\n trunc_start (int) : starting position of truncation window\n trunc_size (int) : length of truncation window\n\n Returns:\n A tuple with the loss and a :obj:`onmt.utils.Statistics` instance.\n \"\"\"\n if trunc_size is None:\n trunc_size = batch.tgt.size(0) - trunc_start\n trunc_range = (trunc_start, trunc_start + trunc_size)\n shard_state = self._make_shard_state(batch, output, trunc_range, attns)\n if shard_size == 0:\n loss, stats = self._compute_loss(batch, **shard_state)\n return loss / float(normalization), stats\n batch_stats = onmt.utils.Statistics()\n for shard in shards(shard_state, shard_size):\n loss, stats = self._compute_loss(batch, **shard)\n loss.div(float(normalization)).backward()\n batch_stats.update(stats)\n return None, batch_stats\n\n def _stats(self, loss, scores, target):\n \"\"\"\n Args:\n loss (:obj:`FloatTensor`): the loss computed by the loss criterion.\n scores (:obj:`FloatTensor`): a score for each possible output\n target (:obj:`FloatTensor`): true targets\n\n Returns:\n :obj:`onmt.utils.Statistics` : statistics for this batch.\n \"\"\"\n pred = scores.max(1)[1]\n non_padding = target.ne(self.padding_idx)\n num_correct = pred.eq(target).masked_select(non_padding).sum().item()\n num_non_padding = non_padding.sum().item()\n return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct)\n\n def _bottle(self, _v):\n return _v.view(-1, _v.size(2))\n\n def _unbottle(self, _v, batch_size):\n return _v.view(-1, batch_size, _v.size(1))\n\n\nclass LabelSmoothingLoss(nn.Module):\n \"\"\"\n With label smoothing,\n KL-divergence between q_{smoothed ground truth prob.}(w)\n and p_{prob. computed by model}(w) is minimized.\n \"\"\"\n\n def __init__(self, label_smoothing, tgt_vocab_size, ignore_index=-100):\n assert 0.0 < label_smoothing <= 1.0\n self.ignore_index = ignore_index\n super(LabelSmoothingLoss, self).__init__()\n\n smoothing_value = label_smoothing / (tgt_vocab_size - 2)\n one_hot = torch.full((tgt_vocab_size,), smoothing_value)\n one_hot[self.ignore_index] = 0\n self.register_buffer('one_hot', one_hot.unsqueeze(0))\n\n self.confidence = 1.0 - label_smoothing\n\n def forward(self, output, target):\n \"\"\"\n output (FloatTensor): batch_size x n_classes\n target (LongTensor): batch_size\n \"\"\"\n model_prob = self.one_hot.repeat(target.size(0), 1)\n model_prob.scatter_(1, target.unsqueeze(1), self.confidence)\n model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)\n\n return F.kl_div(output, model_prob, reduction='sum')\n\n\nclass NMTLossCompute(LossComputeBase):\n \"\"\"\n Standard NMT Loss Computation.\n \"\"\"\n\n def __init__(self, criterion, generator, normalization=\"sents\",\n lambda_coverage=0.0):\n super(NMTLossCompute, self).__init__(criterion, generator)\n self.lambda_coverage = lambda_coverage\n\n def _make_shard_state(self, batch, output, bidec_output, range_, attns=None):\n shard_state = {\n \"output\": output,\n \"target\": batch.tgt[range_[0] + 1: range_[1], :, 0],\n }\n if self.lambda_coverage != 0.0:\n coverage = attns.get(\"coverage\", None)\n std = attns.get(\"std\", None)\n assert attns is not None\n assert std is not None, \"lambda_coverage != 0.0 requires \" \\\n \"attention mechanism\"\n assert coverage is not None, \"lambda_coverage != 0.0 requires \" \\\n \"coverage attention\"\n\n shard_state.update({\n \"std_attn\": attns.get(\"std\"),\n \"coverage_attn\": coverage\n })\n return shard_state\n\n def _compute_loss(self, batch, output, target, std_attn=None,\n coverage_attn=None):\n\n bottled_output = self._bottle(output)\n\n scores = self.generator(bottled_output)\n gtruth = target.view(-1)\n\n loss = self.criterion(scores, gtruth)\n if self.lambda_coverage != 0.0:\n coverage_loss = self._compute_coverage_loss(\n std_attn=std_attn, coverage_attn=coverage_attn)\n loss += coverage_loss\n stats = self._stats(loss.clone(), scores, gtruth)\n\n return loss, stats\n\n def _compute_coverage_loss(self, std_attn, coverage_attn):\n covloss = torch.min(std_attn, coverage_attn).sum()\n covloss *= self.lambda_coverage\n return covloss\n\n\ndef filter_shard_state(state, shard_size=None):\n for k, v in state.items():\n if shard_size is None:\n yield k, v\n\n if v is not None:\n v_split = []\n if isinstance(v, torch.Tensor):\n for v_chunk in torch.split(v, shard_size):\n v_chunk = v_chunk.data.clone()\n v_chunk.requires_grad = v.requires_grad\n v_split.append(v_chunk)\n yield k, (v, v_split)\n\n\ndef shards(state, shard_size, eval_only=False):\n \"\"\"\n Args:\n state: A dictionary which corresponds to the output of\n *LossCompute._make_shard_state(). The values for\n those keys are Tensor-like or None.\n shard_size: The maximum size of the shards yielded by the model.\n eval_only: If True, only yield the state, nothing else.\n Otherwise, yield shards.\n\n Yields:\n Each yielded shard is a dict.\n\n Side effect:\n After the last shard, this function does back-propagation.\n \"\"\"\n if eval_only:\n yield filter_shard_state(state)\n else:\n # non_none: the subdict of the state dictionary where the values\n # are not None.\n non_none = dict(filter_shard_state(state, shard_size))\n\n # Now, the iteration:\n # state is a dictionary of sequences of tensor-like but we\n # want a sequence of dictionaries of tensors.\n # First, unzip the dictionary into a sequence of keys and a\n # sequence of tensor-like sequences.\n keys, values = zip(*((k, [v_chunk for v_chunk in v_split])\n for k, (_, v_split) in non_none.items()))\n\n # Now, yield a dictionary for each shard. The keys are always\n # the same. values is a sequence of length #keys where each\n # element is a sequence of length #shards. We want to iterate\n # over the shards, not over the keys: therefore, the values need\n # to be re-zipped by shard and then each shard can be paired\n # with the keys.\n for shard_tensors in zip(*values):\n yield dict(zip(keys, shard_tensors))\n\n # Assumed backprop'd\n variables = []\n for k, (v, v_split) in non_none.items():\n if isinstance(v, torch.Tensor) and state[k].requires_grad:\n variables.extend(zip(torch.split(state[k], shard_size),\n [v_chunk.grad for v_chunk in v_split]))\n inputs, grads = zip(*variables)\n torch.autograd.backward(inputs, grads)\n\n\nclass AdaLabLossCompute(LossComputeBase):\n \"\"\"\n Standard NMT Loss Computation.\n \"\"\"\n\n def __init__(self, criterion, generator, bidecoder_generator, normalization=\"sents\",\n lambda_coverage=0.0):\n super(AdaLabLossCompute, self).__init__(criterion, generator)\n self.lambda_coverage = lambda_coverage\n self.bidecoder_generator = bidecoder_generator\n\n def __call__(self,\n batch,\n output,\n attns,\n bidec_outputs,\n normalization=1.0,\n shard_size=0,\n trunc_start=0,\n trunc_size=None):\n \"\"\"Compute the forward loss, possibly in shards in which case this\n method also runs the backward pass and returns ``None`` as the loss\n value.\n\n Also supports truncated BPTT for long sequences by taking a\n range in the decoder output sequence to back propagate in.\n Range is from `(trunc_start, trunc_start + trunc_size)`.\n\n Note sharding is an exact efficiency trick to relieve memory\n required for the generation buffers. Truncation is an\n approximate efficiency trick to relieve the memory required\n in the RNN buffers.\n\n Args:\n batch (batch) : batch of labeled examples\n output (:obj:`FloatTensor`) :\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict) : dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n normalization: Optional normalization factor.\n shard_size (int) : maximum number of examples in a shard\n trunc_start (int) : starting position of truncation window\n trunc_size (int) : length of truncation window\n\n Returns:\n A tuple with the loss and a :obj:`onmt.utils.Statistics` instance.\n \"\"\"\n if trunc_size is None:\n trunc_size = batch.tgt.size(0) - trunc_start\n trunc_range = (trunc_start, trunc_start + trunc_size)\n shard_state = self._make_shard_state(batch, output, bidec_outputs, trunc_range, attns)\n if shard_size == 0:\n loss, stats = self._compute_loss(batch, **shard_state)\n return loss / float(normalization), stats\n batch_stats = onmt.utils.Statistics()\n for shard in shards(shard_state, shard_size):\n loss, stats = self._compute_loss(batch, **shard)\n loss.div(float(normalization)).backward()\n batch_stats.update(stats)\n return None, batch_stats\n\n def _make_shard_state(self, batch, output, bidec_output, range_, attns=None):\n shard_state = {\n \"output\": output,\n \"target\": batch.tgt[range_[0] + 1: range_[1], :, 0],\n \"bidec_output\": bidec_output,\n }\n if self.lambda_coverage != 0.0:\n coverage = attns.get(\"coverage\", None)\n std = attns.get(\"std\", None)\n assert attns is not None\n assert std is not None, \"lambda_coverage != 0.0 requires \" \\\n \"attention mechanism\"\n assert coverage is not None, \"lambda_coverage != 0.0 requires \" \\\n \"coverage attention\"\n\n shard_state.update({\n \"std_attn\": attns.get(\"std\"),\n \"coverage_attn\": coverage\n })\n return shard_state\n\n def _compute_loss(self, batch, output, bidec_output, target, std_attn=None,\n coverage_attn=None):\n\n bottled_output = self._bottle(output)\n\n scores = self.generator(bottled_output)\n gtruth = target.view(-1)\n\n if self.bidecoder_generator is not None and bidec_output is not None:\n bidec_bottled_output = self._bottle(bidec_output)\n bidec_scores = self.bidecoder_generator(bidec_bottled_output)\n bidec_loss = F.cross_entropy(bidec_scores, gtruth,\n ignore_index=self.criterion.ignore_index, reduction=\"sum\")\n else:\n bidec_scores = None\n bidec_loss = torch.tensor(0, device=gtruth.device)\n\n if isinstance(self.criterion, AdaLabLoss):\n loss = self.criterion(scores, gtruth, target, bidec_scores)\n nll_loss = F.nll_loss(scores, gtruth,\n ignore_index=self.criterion.ignore_index, reduction=\"sum\")\n else:\n loss = self.criterion(scores, gtruth)\n nll_loss = loss\n\n # loss = self.criterion(scores, gtruth)\n if self.lambda_coverage != 0.0:\n coverage_loss = self._compute_coverage_loss(\n std_attn=std_attn, coverage_attn=coverage_attn)\n loss += coverage_loss\n # stats = self._stats(loss.clone(), scores, gtruth)\n stats = self._stats(loss.clone(), scores, gtruth,\n bidec_loss.clone(), bidec_scores, nll_loss.clone())\n\n return loss + bidec_loss, stats\n\n def _compute_coverage_loss(self, std_attn, coverage_attn):\n covloss = torch.min(std_attn, coverage_attn).sum()\n covloss *= self.lambda_coverage\n return covloss\n\n def _stats(self, loss, scores, target, bidec_loss, bidec_scores, nll_loss):\n pred = scores.max(1)[1]\n non_padding = target.ne(self.padding_idx)\n num_correct = pred.eq(target).masked_select(non_padding).sum().item()\n num_non_padding = non_padding.sum().item()\n\n if bidec_scores is None:\n bidec_num_correct = 0\n else:\n bidec_pred = bidec_scores.max(1)[1]\n bidec_num_correct = bidec_pred.eq(target).masked_select(non_padding).sum().item()\n return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct,\n bidec_loss.item(), bidec_num_correct, nll_loss.item())\n\n\nclass AdaLabLoss(nn.Module):\n \"\"\"\n With adaptive label smoothing,\n KL-divergence between q_{smoothed ground truth prob.}(w)\n and p_{prob. computed by model}(w) is minimized.\n \"\"\"\n\n def __init__(self, tgt_vocab_size, batch_size, ignore_index=-100, device=\"cuda\", reduction='sum',\n temperature=1, eos_index=3):\n self.ignore_index = ignore_index\n self.eos_index = eos_index\n self.tgt_vocab_size = tgt_vocab_size\n super(AdaLabLoss, self).__init__()\n self.device = device\n self.batch_size = batch_size\n self.reduction = reduction\n\n self.step = 0\n self.temperature = temperature\n self.top_head = 2\n self.top_tail = 500\n self.margin = 0.2\n self.alpha_param = 2\n self.topk = 5\n\n def forward(self, output, target, tgt_batch=None, label_scores=None):\n \"\"\"\n output (FloatTensor): batch_size x n_classes\n target (LongTensor): batch_size\n \"\"\"\n v = self._get_v(label_scores, target)\n epsilon = self._get_epsilon(output, target, v)\n\n confidence = 1 - epsilon\n smoothing_penalty = epsilon.unsqueeze(-1) * v\n\n model_prob = torch.zeros_like(output, device=output.device, dtype=torch.float)\n model_prob.scatter_(1, target.unsqueeze(1), confidence.unsqueeze(1))\n model_prob = model_prob + smoothing_penalty\n model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)\n\n return F.kl_div(output, model_prob, reduction=self.reduction)\n\n def _bottle(self, _v):\n return _v.view(-1, _v.size(2))\n\n def _unbottle(self, _v, batch_size):\n return _v.view(-1, batch_size, _v.size(1))\n\n def _get_epsilon(self, output, target, v):\n probs = output.detach().clone().exp()\n prob_max = probs.max(dim=1)[0]\n prob_gtruth = probs.gather(dim=1, index=target.unsqueeze(1)).squeeze()\n epsilon = 1 - prob_max\n maxv = v.max(dim=-1)[0]\n up_bond = 1 / (1 + maxv) - self.margin\n mask = epsilon.gt(up_bond)\n epsilon[mask] = up_bond[mask]\n alpha = (prob_gtruth / prob_max).pow(self.alpha_param)\n epsilon = alpha * epsilon\n return epsilon\n\n def _get_v(self, label_scores, target):\n v = label_scores.detach().clone()\n v = v / self.temperature\n v.scatter_(1, target.unsqueeze(1), -float('inf'))\n v[:, self.ignore_index] = -float('inf')\n\n # truncate tail\n upper_values, upper_indices = torch.topk(v, self.top_tail, dim=1)\n kth_upper = upper_values[:, -1].view([-1, 1])\n kth_upper = kth_upper.repeat([1, v.shape[1]]).float()\n upper_ignore = torch.lt(v, kth_upper)\n v = v.masked_fill(upper_ignore, -10000)\n\n # truncate head\n lower_values, lower_indices = torch.topk(v, self.top_head, dim=1)\n kth_lower = lower_values[:, -1].view([-1, 1])\n kth_lower = kth_lower.repeat([1, v.shape[1]]).float()\n lower_ignore = torch.gt(v, kth_lower)\n v = v.masked_fill(lower_ignore, -10000)\n\n v = v.softmax(dim=-1)\n return v\n\n def _compute_entropy(self, output):\n entropy = -torch.sum(output.exp() * output, -1)\n return entropy\n" ]
[ [ "torch.nn.functional.kl_div", "torch.nn.NLLLoss", "torch.full", "torch.autograd.backward", "torch.nn.functional.nll_loss", "torch.min", "torch.nn.functional.cross_entropy", "torch.zeros_like", "torch.lt", "torch.tensor", "torch.split", "torch.topk", "torch.gt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ruestefa/stormtrack
[ "e9378f013c406d387ea944c97e5adc68df864dee", "e9378f013c406d387ea944c97e5adc68df864dee", "e9378f013c406d387ea944c97e5adc68df864dee", "e9378f013c406d387ea944c97e5adc68df864dee" ]
[ "tests/test_stormtrack/test_core/test_features/data/circle_on_globe_clat-10_rad-800_delta-0.5_dyntools.py", "tests/test_stormtrack/test_core/test_features/data/circle_on_globe_clat-00_rad-800_delta-1.0_pyproj.py", "tests/test_stormtrack/test_core/test_features/data/circle_on_globe_clat-00_rad-800_delta-1.0_dyntools.py", "tests/test_stormtrack/test_core/test_features/data/circle_on_globe_clat-60_rad-800_delta-0.5_dyntools.py" ]
[ "import numpy as np\n\n# fmt: off\n\nclon, clat = 0.0, 10.0\nrad_km = 800.0\narea_km2 = np.pi*rad_km**2\n\nnlat, nlon = 31, 31\nlat1d = np.linspace(2.5, 17.5, nlat)\nlon1d = np.linspace(-7.5, 7.5, nlon)\nlat2d, lon2d = np.meshgrid(lat1d, lon1d)\n\n_, X = 0, 1\nmask = np.array([\n[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_],\n[_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_],\n[_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_],\n[_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_],\n[_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_],\n[_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_],\n[_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_],\n[_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_],\n[_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],\n], np.bool).T[:, ::-1]\n", "import numpy as np\n\n# fmt: off\n\nclon, clat = 0.0, 0.0\nrad_km = 800.0\narea_km2 = np.pi*rad_km**2\n\nnlat, nlon = 17, 17\nlat1d = np.linspace(-8.0, 8.0, nlat)\nlon1d = np.linspace(-8.0, 8.0, nlon)\nlat2d, lon2d = np.meshgrid(lat1d, lon1d)\n\n_, X = 0, 1\nmask = np.array([\n[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,X,X,X,X,X,X,X,_,_,_,_,_],\n[_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_],\n[_,_,_,_,_,X,X,X,X,X,X,X,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],\n], np.bool).T[:, ::-1]\n", "import numpy as np\n\n# fmt: off\n\nclon, clat = 0.0, 0.0\nrad_km = 800.0\narea_km2 = np.pi*rad_km**2\n\nnlat, nlon = 17, 17\nlat1d = np.linspace(-8.0, 8.0, nlat)\nlon1d = np.linspace(-8.0, 8.0, nlon)\nlat2d, lon2d = np.meshgrid(lat1d, lon1d)\n\n_, X = 0, 1\nmask = np.array([\n[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,X,X,X,_,_,_,_,_,_,_],\n[_,_,_,_,_,X,X,X,X,X,X,X,_,_,_,_,_],\n[_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_],\n[_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_],\n[_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_],\n[_,_,_,_,_,X,X,X,X,X,X,X,_,_,_,_,_],\n[_,_,_,_,_,_,_,X,X,X,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],\n], np.bool).T[:, ::-1]\n", "import numpy as np\n\n# fmt: off\n\nclon, clat = 0.0, 60.0\nrad_km = 800.0\narea_km2 = np.pi*rad_km**2\n\nnlat, nlon = 31, 61\nlat1d = np.linspace(52.5, 67.5, nlat)\nlon1d = np.linspace(-15.0, 15.0, nlon)\nlat2d, lon2d = np.meshgrid(lat1d, lon1d)\n\n_, X = 0, 1\nmask = np.array([\n[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_],\n[_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_],\n[_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_],\n[_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],\n[_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_],\n[_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_],\n[_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_],\n[_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_],\n[_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],\n[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],\n], np.bool).T[:, ::-1]\n" ]
[ [ "numpy.array", "numpy.meshgrid", "numpy.linspace" ], [ "numpy.array", "numpy.meshgrid", "numpy.linspace" ], [ "numpy.array", "numpy.meshgrid", "numpy.linspace" ], [ "numpy.array", "numpy.meshgrid", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tau-lex/market-analysis-system
[ "19926ca92bcab31a51fdac8f1910c8775d3d38d1", "19926ca92bcab31a51fdac8f1910c8775d3d38d1" ]
[ "mas_mt/v0.4_simple_classifier/classifier_0_kfold.py", "mas_tools/classes.py" ]
[ "# -*- coding: utf-8 -*-\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom mas_tools.os import get_parameters\nfrom mas_tools.ml import plot_history\nfrom mas_tools.data import create_timeseries_matrix\nfrom mas_tools.data import get_delta, get_diff, get_log_diff\nfrom mas_tools.data import get_sigmoid_to_zero, get_sigmoid_ration\nfrom mas_tools.models import save_model, load_model\nfrom mas_tools.classes import signal_to_class, class_to_signal\nfrom sklearn.model_selection import train_test_split\n\nfrom keras.models import Sequential\nfrom keras.layers import BatchNormalization\nfrom keras.layers import Dense, Activation\nfrom keras.layers import LeakyReLU\nfrom keras.layers import Dropout, ActivityRegularization\nfrom keras import regularizers\nfrom keras.optimizers import RMSprop, SGD\nfrom keras.optimizers import Adam, Nadam, Adagrad, Adamax, Adadelta\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\nfrom keras.callbacks import CSVLogger, EarlyStopping\n\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import matthews_corrcoef\n\n\n#=============================================================================#\n# P R E P A R E V A R I A B L E S #\n#=============================================================================#\n# params[symb+period, arg1, arg2, ..]\nparams = ['EURUSD15', '-train', '100', '-graph']\n# params = ['EURUSD15', '-predict']\nlimit = 10000\nbatch_size = 128\nfit_epoch = 100\ntrain_test = 0.2\nts_lookback = 10\n\nnclasses = 3\nnormalize_class = True\n\nrun_type = 0\ngraph = False\n\nidx = 0\nfor item in params:\n if idx > 0:\n if item == '-train':\n run_type = 0\n elif item == '-predict':\n run_type = 1\n elif item == '-graph':\n graph = True\n elif item == '-limit':\n pass\n elif int(item) > 0:\n if params[idx-1] == '-train':\n fit_epoch = int(item)\n elif params[idx-1] == '-limit':\n limit = int(item)\n idx += 1\n\nnp.random.seed(13)\n\n\npath = 'C:/Users/Alexey/AppData/Roaming/MetaQuotes/Terminal/287469DEA9630EA94D0715D755974F1B/MQL4/Files/ML-Assistant/'\nworkfile = params[0]\nfile_x = path + workfile + '_x_20k.csv'\nfile_y = path + workfile + '_y_20k.csv'\nfile_xx = path + workfile + '_xx.csv'\nfile_yy = path + workfile + '_yy.csv'\nprefix = 'wgts/classifier_0_'\nmodel = None\ndata_x = np.array([])\ndata_y = np.array([])\ntrain_x = np.array([])\ntrain_y = np.array([])\ntest_x = np.array([])\ntest_y = np.array([])\nhistory = None\n\n# print('Backend:', backend())\nprint('\\nWork file:', workfile)\n\n\n#=============================================================================#\n# L O A D D A T A #\n#=============================================================================#\ndef prepare_data(data):\n # for time(0, 6), market(7, 10), ema(11, 14), macd(15, 16)\n # for atr(17), cci(18), rsi(19), usdx(20), eurx(21)\n #\n # delta = get_delta(data, 7, 10)\n # sigmoid = get_sigmoid_to_zero\n # sigmoid = get_sigmoid_ration\n # sigm1 = sigmoid(data[:, 8])\n # sigm2 = sigmoid(data[:, 9])\n # sigm3 = sigmoid(data[:, 10])\n diff1 = get_diff(data[:, 8])\n diff2 = get_diff(data[:, 9])\n diff3 = get_diff(data[:, 10])\n # logdiff1 = get_log_diff(data[:, 8])\n # logdiff2 = get_log_diff(data[:, 9])\n # logdiff3 = get_log_diff(data[:, 10])\n detrend1 = get_delta(data, 10, 11) # close - ema13\n detrend2 = get_delta(data, 10, 12) # close - ema26\n #\n # edelta1 = get_delta(data, 11, 12)\n # edelta2 = get_delta(data, 13, 14)\n # ediff1 = get_diff(data[:, 11])\n # ediff2 = get_diff(data[:, 12])\n # elogdiff1 = get_log_diff(data[:, 11])\n # elogdiff2 = get_log_diff(data[:, 12])\n #\n # xdelta = get_delta(data, 20, 21)\n xdiff1 = get_diff(data[:, 20])\n xdiff2 = get_diff(data[:, 21])\n # xlogdiff1 = get_log_diff(data[:, 20])\n # xlogdiff2 = get_log_diff(data[:, 21])\n return np.array(np.column_stack((\n # data[:, 5:6], # hours and minutes\n # data[:, 8:11], # prices (without open)\n # delta,\n # sigm1, sigm2, sigm3,\n diff1, diff2, diff3,\n # logdiff1, logdiff2, logdiff3,\n detrend1, detrend2,\n # data[:, 11:15], # ema's\n # edelta1, edelta2,\n # ediff1, ediff2,\n # elogdiff1, elogdiff2,\n # data[:, 15:17], # macd\n data[:, 17:20], # atr, cci, rsi\n # data[:, 20:22], # usd and eur indexes\n # xdelta,\n # xdiff1, xdiff2,\n # xlogdiff1, xlogdiff2,\n ))\n )\n\nprint('Loading Data...')\n\ntrain_data = np.genfromtxt(file_x, delimiter=';')\ntarget_data = np.genfromtxt(file_y, delimiter=';')\n\ntrain_data, target_data = train_data[-limit:,], target_data[-limit:]\n\ndata_x = prepare_data(train_data)\ndata_y = signal_to_class(target_data, n=nclasses, normalize=normalize_class)\ndata_x, data_y = create_timeseries_matrix(data_x, data_y, ts_lookback)\n\n# batch_input_shape=(batch_size, timesteps, units)\n# data_x = np.reshape(data_x, (data_x.shape[0], ts_lookback, train_data.shape[1]))\n\n# For training validation\ntrain_x, test_x, train_y, test_y = train_test_split(data_x, data_y, test_size=train_test)\n\nprint('Input data shape :', data_x.shape)\nprint('Train/Test :', len(train_y), '/', len(test_y))\n\n\n#=============================================================================#\n# P R E P A R E M O D E L #\n#=============================================================================#\nprint('\\nCreating Model...')\n\nbatch_size = 256\nfa = 'tanh'\ninit = 'lecun_normal' #'lecun_uniform' #'random_uniform'\ninit_b = 'random_uniform'\nreg = regularizers.l2\nrs = 0.01\n\ndef get_model():\n model = Sequential()\n model.add(BatchNormalization(batch_input_shape=(None, data_x.shape[1])))\n model.add(Dense(data_x.shape[1], \n # activation=fa,\n kernel_initializer=init,\n bias_initializer=init_b,\n # kernel_regularizer=reg(rs)\n )\n )\n model.add(LeakyReLU())\n model.add(Dense(50, \n # activation=fa,\n kernel_initializer=init,\n bias_initializer=init_b,\n # kernel_regularizer=reg(rs)\n )\n )\n model.add(LeakyReLU())\n model.add(ActivityRegularization(l1=0.01, l2=0.01))\n model.add(Dropout(0.3))\n model.add(Dense(25, \n # activation=fa,\n kernel_initializer=init,\n bias_initializer=init_b,\n # kernel_regularizer=reg(rs)\n )\n )\n model.add(LeakyReLU())\n model.add(ActivityRegularization(l1=0.01, l2=0.01))\n model.add(Dense(nclasses,\n activation='softmax',\n kernel_initializer='lecun_normal',\n bias_initializer=init_b,\n # kernel_regularizer=regularizers.l2(rs)\n )\n )\n # opt = SGD(lr=0.1, momentum=0.5, nesterov=True)\n # opt = Adadelta(lr=0.1) #Adamax, Adadelta\n opt = Nadam(lr=0.002)\n model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['acc']) #, metrics=['acc']\n\n return model\n\nestimator = KerasClassifier(build_fn=get_model, epochs=100, batch_size=128, verbose=0)\n\n\n#=============================================================================#\n# T R A I N I N G #\n#=============================================================================#\nprint('Training...')\n\n# reduce_lr = ReduceLROnPlateau(factor=0.05, patience=5, min_lr=0.000001, verbose=1)\n# checkpointer = ModelCheckpoint(filepath=(prefix+workfile+\"_{epoch:02d}-{val_loss:.2f}\"+'.hdf5'), verbose=0, save_best_only=True)\n# es = EarlyStopping(patience=40, min_delta=0.0001)\n\n# history = model.fit(train_x, train_y,\n# batch_size=batch_size,\n# epochs=fit_epoch,\n# callbacks=[reduce_lr],\n# validation_data=(test_x, test_y)\n# )\n\nkfold = KFold(n_splits=10)\n\nresults = cross_val_score(estimator,\n # X=data_x, y=data_y,\n X=train_x, y=train_y,\n cv=kfold,\n verbose=1,\n # fit_params={}\n )\n\nprint(\"Baseline: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n\nhistory = estimator.fit(x=train_x, y=train_y, epochs=100)\n\n#=============================================================================#\n# P R E D I C T I N G #\n#=============================================================================#\nprint('\\nPredicting...')\n\npredict_data = prepare_data(np.genfromtxt(file_xx, delimiter=';'))\nprint(predict_data.shape)\ndata_xx, empty = create_timeseries_matrix(predict_data, look_back=ts_lookback)\nprint(data_xx.shape)\n# data_xx = np.reshape(data_xx, (data_xx.shape[0], ts_lookback, predict_data.shape[1]))\n# print(data_xx.shape)\n\n# Prediction model\ndata_yy = estimator.predict(data_xx, batch_size=batch_size)\npredicted = data_yy\n# data_yy = class_to_signal(data_yy.reshape(data_xx.shape[0], nclasses),\n# n=nclasses,\n# normalized=normalize_class)\n\nnp.savetxt(file_yy, data_yy, fmt='%.2f', delimiter=';')\nprint(\"Predict saved:\\n\", file_yy)\n\n\n#=============================================================================#\n# P L O T #\n#=============================================================================#\n# calculate root mean squared error\ntrain_y = class_to_signal(train_y,\n n=nclasses,\n normalized=normalize_class)\ntest_y = class_to_signal(test_y,\n n=nclasses,\n normalized=normalize_class)\ntrain_predict = class_to_signal(estimator.predict(train_x),\n n=nclasses,\n normalized=normalize_class)\ntest_predict = class_to_signal(estimator.predict(test_x),\n n=nclasses,\n normalized=normalize_class)\n\ntrain_score = math.sqrt(mean_squared_error(train_y, train_predict))\nprint('\\nTrain Score: %.6f RMSE' % (train_score))\ntest_score = math.sqrt(mean_squared_error(test_y, test_predict))\nprint('Test Score: %.6f RMSE' % (test_score))\n\nCM = confusion_matrix(test_y, test_predict)\nprint('\\nMATTHEWS CORRELATION')\nprint(matthews_corrcoef(test_y, test_predict))\nprint('\\nCONFUSION MATRIX')\nprint(CM / CM.astype(np.float).sum(axis=1))\nprint('\\nCLASSIFICATION REPORT')\nprint(classification_report(test_y, test_predict))\nprint('-' * 20)\n\n\nplt.plot(predicted)\nplt.title('Predict')\nplt.ylabel('class')\nplt.xlabel('bar')\nplt.legend(['buy', 'hold', 'sell'])\nplt.show()\n\nplt.plot(data_yy)\nplt.title('Saved predict')\nplt.ylabel('class')\nplt.xlabel('bar')\nplt.legend(['prediction'])\nplt.show()\n\nplot_history(history)\n\n", "# -*- coding: utf-8 -*-\nimport numpy as np\n\n\ndef signal_to_class(data, n=2, normalize=True):\n \"\"\"\n Converts a list of signals to a n-dimensional list of classes [buy, .., sell].\n\n Arguments\n n (int): Number of classes.\n normalize (bool): It normalizes to unity. False - the signal changes only the sign.\n \n Returns\n Array of classes.\n \"\"\"\n\n result = np.array([])\n data = np.array(data)\n\n if len(data.shape) > 1:\n raise ValueError(\"The array must be one-dimensional.\")\n\n if n == 2:\n if normalize:\n for item in data:\n if item > 0: # buy\n result = np.append(result, [1.0, 0.0])\n if item <= 0: # sell\n result = np.append(result, [0.0, 1.0])\n else:\n for item in data:\n result = np.append(result, [0.5+item/2.0, 0.5-item/2.0])\n elif n == 3:\n if normalize:\n for item in data:\n if item > 0: # buy\n result = np.append(result, [1.0, 0.0, 0.0])\n if item < 0: # sell\n result = np.append(result, [0.0, 0.0, 1.0])\n if item == 0: # pass\n result = np.append(result, [0.0, 1.0, 0.0])\n else:\n for item in data:\n if item > 0: # buy\n result = np.append(result, [abs(item), (1.0-abs(item)), 0.0])\n if item < 0: # sell\n result = np.append(result, [0.0, (1.0-abs(item)), abs(item)])\n if item == 0: # pass\n result = np.append(result, [0.0, 1.0, 0.0])\n elif n == 6:\n for item in data:\n if item >= 0.8 and item <= 1.0:\n result = np.append(result, [1.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n elif item >= 0.4 and item < 0.8:\n result = np.append(result, [0.0, 1.0, 0.0, 0.0, 0.0, 0.0])\n elif item >= 0.0 and item < 0.4:\n result = np.append(result, [0.0, 0.0, 1.0, 0.0, 0.0, 0.0])\n elif item > -0.4 and item < 0.0:\n result = np.append(result, [0.0, 0.0, 0.0, 1.0, 0.0, 0.0])\n elif item > -0.8 and item <= 0.4:\n result = np.append(result, [0.0, 0.0, 0.0, 0.0, 1.0, 0.0])\n elif item >= -1.0 and item <= 0.8:\n result = np.append(result, [0.0, 0.0, 0.0, 0.0, 0.0, 1.0])\n\n return result.reshape((data.shape[0], n))\n\n\ndef class_to_signal(data, n=2, normalized=True):\n \"\"\"\n Converts a n-dimensional list of classes to a list of signals.\n \"\"\"\n\n result = np.array([])\n\n if n == 2:\n if normalized:\n for item in data:\n result = np.append(result, 1 if item[0] > item[1] else -1)\n else:\n for item in data:\n result = np.append(result, item[0] * 2 - 1.0)\n elif n == 3:\n if normalized:\n for item in data:\n _class = np.argmax(item)\n if _class == 0:\n result = np.append(result, 1.0)\n elif _class == 1:\n result = np.append(result, 0.0)\n elif _class == 2:\n result = np.append(result, -1.0)\n else:\n for item in data:\n _class = np.argmax(item)\n if _class == 0:\n result = np.append(result, item[0])\n elif _class == 1:\n result = np.append(result, 0.0)\n elif _class == 2:\n result = np.append(result, -item[2])\n elif n == 6:\n for item in data:\n _class = np.argmax(item)\n if _class == 0:\n result = np.append(result, 1.0)\n elif _class == 1:\n result = np.append(result, 0.66)\n elif _class == 2:\n result = np.append(result, 0.33)\n elif _class == 3:\n result = np.append(result, -0.33)\n elif _class == 4:\n result = np.append(result, -0.66)\n elif _class == 5:\n result = np.append(result, -1.0)\n\n return result\n\n\ndef prepare_target(data, close_index=3, classes=6):\n \"\"\"\n Hello (=\n uniform classes\n \"\"\"\n # TODO\n # while const\n classes = 6\n \n data = np.array(data)\n new_target = data[1:, close_index] / data[:-1, close_index]\n new_target = np.insert(new_target, obj=0, values=[1.0])\n \n n, bins = np.histogram(new_target, bins=200, range=(0.99, 1.01))\n \n sixth = sum(n) / classes\n \n points = [0., 0., 1., 0., 0.]\n _sum = n[100]/2\n p_idx = 1\n for idx in range(99, -1):\n _sum += n[idx]\n if _sum >= sixth:\n points[p_idx] = (idx - 100) / 10**4 + 1\n p_idx -= 1\n if p_idx < 0:\n break\n _sum = n[100]/2\n p_idx = 3\n for idx in range(101, 201):\n _sum += n[idx]\n if _sum >= sixth:\n points[p_idx] = (idx - 100) / 10**4 + 1\n p_idx += 1\n if p_idx > 4:\n break\n # TODO\n def select(a):\n a > points[2]\n return 1\n new_target = [select(x) for x in new_target]\n\n return new_target\n" ]
[ [ "matplotlib.pyplot.legend", "sklearn.model_selection.cross_val_score", "numpy.random.seed", "matplotlib.pyplot.title", "sklearn.metrics.matthews_corrcoef", "sklearn.model_selection.train_test_split", "sklearn.model_selection.KFold", "numpy.genfromtxt", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.plot", "sklearn.metrics.mean_squared_error", "numpy.column_stack", "numpy.savetxt", "matplotlib.pyplot.xlabel", "numpy.array", "sklearn.metrics.classification_report", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "numpy.append", "numpy.argmax", "numpy.insert", "numpy.array", "numpy.histogram" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
braycarlson/warbler.py
[ "6746dc6479c9360811634c2d627606d788538d7e", "6746dc6479c9360811634c2d627606d788538d7e" ]
[ "warbler.py/analyze/3.0-pdf.py", "warbler.py/spectrogram/plot.py" ]
[ "import fitz\nimport io\nimport matplotlib\nimport pandas as pd\n\nfrom multiprocessing import cpu_count, Pool\nfrom natsort import os_sorted\nfrom parameters import BASELINE\nfrom path import DATA, INDIVIDUALS, PRINTOUTS\nfrom spectrogram.axes import SpectrogramAxes\nfrom spectrogram.plot import (\n create_luscinia_spectrogram,\n # create_spectrogram\n)\nfrom tqdm import tqdm\n\n\ndef process(data):\n individual, filename, page = data\n path = individual.joinpath('wav', filename)\n\n if not path.is_file():\n raise Exception(path)\n\n template = {\n 'filename': filename,\n 'path': path,\n 'page': int(page) - 1,\n 'stream': io.BytesIO()\n }\n\n stream = template.get('stream')\n\n plt = create_luscinia_spectrogram(path, BASELINE)\n plt.savefig(stream, format='png')\n\n stream.seek(0)\n plt.close()\n\n return template\n\n\ndef main():\n # https://github.com/matplotlib/matplotlib/issues/21950\n matplotlib.use('Agg')\n\n spreadsheet = DATA.joinpath('2017.xlsx')\n\n dataframe = pd.read_excel(\n spreadsheet,\n engine='openpyxl'\n )\n\n width = 321.900390625\n height = 98.97236633300781\n\n # Set page size\n page_size = fitz.Rect(\n 0.0,\n 0.0,\n 375,\n 165\n )\n\n # Add text\n text_box = fitz.Rect(\n 0.0,\n 5.0,\n 375,\n 80\n )\n\n # Add image\n image_box = fitz.Rect(\n 9.5,\n 8.0,\n width - 2,\n height\n )\n\n processes = int(cpu_count() / 2)\n maxtasksperchild = 200\n\n for individual, printout in zip(INDIVIDUALS, PRINTOUTS):\n row = dataframe[dataframe.Individual == individual.stem]\n\n pdf = os_sorted([\n file for file in printout.glob('*.pdf')\n if 'Clean' in file.stem\n ])\n\n for p in pdf:\n handle = fitz.open(p)\n total = len(handle)\n\n with Pool(\n processes=processes,\n maxtasksperchild=maxtasksperchild\n ) as pool:\n data = [\n (individual, filename, page)\n for _, filename, page in zip(\n range(0, total),\n row.updatedFileName.values,\n row.pageNumber.values\n )\n ]\n\n total = len(data)\n\n results = tqdm(\n pool.imap(\n process,\n data\n ),\n total=total\n )\n\n for result in results:\n page = result.get('page')\n filename = result.get('filename')\n stream = result.get('stream')\n\n current = handle.load_page(page)\n current.set_mediabox(page_size)\n\n current.insert_textbox(\n text_box,\n filename,\n fontsize=8,\n fontname='Times-Roman',\n fontfile=None,\n align=1\n )\n\n current.insert_image(\n image_box,\n stream=stream\n )\n\n filename = individual.stem + '.pdf'\n handle.save(filename)\n\n pool.close()\n pool.join()\n\n\nif __name__ == '__main__':\n main()\n", "import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom dataclass.signal import Signal\nfrom dataclass.spectrogram import Spectrogram\nfrom matplotlib import gridspec\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Rectangle\n\n\ndef plot_spectrogram(spectrogram, **kwargs):\n ax = kwargs.pop('ax')\n signal = kwargs.pop('signal')\n\n x_minimum = 0\n x_maximum = signal.duration\n y_minimum = 0\n y_maximum = signal.rate / 2\n\n extent = [\n x_minimum,\n x_maximum,\n y_minimum,\n y_maximum\n ]\n\n image = ax.matshow(\n spectrogram,\n interpolation=None,\n aspect='auto',\n origin='lower',\n extent=extent,\n **kwargs\n )\n\n ax.initialize()\n ax._x_lim(x_maximum)\n ax._x_step(x_maximum)\n\n return image\n\n\ndef plot_segmentation(signal, dts):\n spectrogram = dts.get('spec')\n onsets = dts.get('onsets')\n offsets = dts.get('offsets')\n\n fig, ax = plt.subplots(\n constrained_layout=True,\n figsize=(20, 4),\n subplot_kw={'projection': 'luscinia'}\n )\n\n plt.xticks(\n fontfamily='Arial',\n fontsize=14,\n fontweight=600\n )\n\n plt.yticks(\n fontfamily='Arial',\n fontsize=14,\n fontweight=600\n )\n\n image = plot_spectrogram(\n spectrogram,\n ax=ax,\n signal=signal,\n cmap=plt.cm.Greys,\n )\n\n ylmin, ylmax = ax.get_ylim()\n ysize = (ylmax - ylmin) * 0.1\n ymin = ylmax - ysize\n\n patches = []\n\n for index, (onset, offset) in enumerate(zip(onsets, offsets), 0):\n ax.axvline(\n onset,\n color='dodgerblue',\n ls='dashed',\n lw=1,\n alpha=0.75\n )\n\n ax.axvline(\n offset,\n color='dodgerblue',\n ls='dashed',\n lw=1,\n alpha=0.75\n )\n\n rectangle = Rectangle(\n xy=(onset, ymin),\n width=offset - onset,\n height=1000,\n )\n\n rx, ry = rectangle.get_xy()\n cx = rx + rectangle.get_width() / 2.0\n cy = ry + rectangle.get_height() / 2.0\n\n ax.annotate(\n index,\n (cx, cy),\n color='white',\n weight=600,\n fontfamily='Arial',\n fontsize=8,\n ha='center',\n va='center'\n )\n\n patches.append(rectangle)\n\n collection = PatchCollection(\n patches,\n color='dodgerblue',\n alpha=0.75\n )\n\n ax.add_collection(collection)\n return image\n\n\ndef plot_segmentation_with_vocal_envelope(signal, dts):\n spectrogram = dts.get('spec')\n vocal_envelope = dts.get('vocal_envelope')\n onsets = dts.get('onsets')\n offsets = dts.get('offsets')\n\n plt.figure(\n figsize=(15, 5)\n )\n\n gs = gridspec.GridSpec(\n 2,\n 1,\n height_ratios=[1, 3]\n )\n\n gs.update(hspace=0.0)\n ax0 = plt.subplot(gs[0])\n ax1 = plt.subplot(gs[1], projection='luscinia')\n\n image = plot_spectrogram(\n spectrogram,\n ax=ax1,\n signal=signal,\n cmap=plt.cm.Greys,\n )\n\n ax0.plot(vocal_envelope, color='k')\n\n ax0.set_xlim(\n [0, len(vocal_envelope)]\n )\n\n ylmin, ylmax = ax1.get_ylim()\n ysize = (ylmax - ylmin) * 0.1\n ymin = ylmax - ysize\n\n patches = []\n\n for index, (onset, offset) in enumerate(zip(onsets, offsets), 0):\n ax1.axvline(\n onset,\n color='dodgerblue',\n ls='dashed',\n lw=1,\n alpha=0.75\n )\n\n ax1.axvline(\n offset,\n color='dodgerblue',\n ls='dashed',\n lw=1,\n alpha=0.75\n )\n\n rectangle = Rectangle(\n xy=(onset, ymin),\n width=offset - onset,\n height=1000,\n )\n\n rx, ry = rectangle.get_xy()\n cx = rx + rectangle.get_width() / 2.0\n cy = ry + rectangle.get_height() / 2.0\n\n ax1.annotate(\n index,\n (cx, cy),\n color='white',\n weight=600,\n fontfamily='Arial',\n fontsize=8,\n ha='center',\n va='center'\n )\n\n patches.append(rectangle)\n\n collection = PatchCollection(\n patches,\n color='dodgerblue',\n alpha=0.75\n )\n\n ax1.add_collection(collection)\n ax0.axis('off')\n return image\n\n\ndef create_luscinia_spectrogram(path, parameters):\n signal = Signal(path)\n\n signal.filter(\n parameters.butter_lowcut,\n parameters.butter_highcut\n )\n\n spectrogram = Spectrogram(signal, parameters)\n\n fig, ax = plt.subplots(\n constrained_layout=True,\n figsize=(20, 4),\n subplot_kw={'projection': 'luscinia'}\n )\n\n plt.xticks(\n fontfamily='Arial',\n fontsize=14,\n fontweight=600\n )\n\n plt.yticks(\n fontfamily='Arial',\n fontsize=14,\n fontweight=600\n )\n\n plot_spectrogram(\n spectrogram.data,\n ax=ax,\n signal=signal,\n cmap=plt.cm.Greys,\n )\n\n return fig\n\n\ndef create_spectrogram(path, parameters):\n signal = Signal(path)\n\n signal.filter(\n parameters.butter_lowcut,\n parameters.butter_highcut\n )\n\n spectrogram = Spectrogram(signal, parameters)\n\n fig, ax = plt.subplots(\n figsize=(20, 3),\n subplot_kw={'projection': 'spectrogram'}\n )\n\n plot_spectrogram(\n spectrogram.data,\n ax=ax,\n signal=signal,\n cmap=plt.cm.afmhot,\n )\n\n plt.tight_layout()\n\n return fig\n" ]
[ [ "matplotlib.use", "pandas.read_excel" ], [ "matplotlib.pyplot.yticks", "matplotlib.collections.PatchCollection", "matplotlib.pyplot.tight_layout", "matplotlib.patches.Rectangle", "matplotlib.pyplot.subplots", "matplotlib.pyplot.subplot", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gokhanagma/nengo
[ "ee88870491af0dcec969f66613fdf4b2b68a92d4", "55b5c4f7da351dfefd8b40eb14d2e47c1006cff0" ]
[ "nengo/tests/test_dists.py", "nengo/networks/product.py" ]
[ "import time\n\nimport numpy as np\nimport pytest\n\nimport nengo.utils.numpy as npext\nfrom nengo.dists import (\n PDF,\n Choice,\n CosineSimilarity,\n DistOrArrayParam,\n Exponential,\n Gaussian,\n QuasirandomSequence,\n Samples,\n ScatteredHypersphere,\n SqrtBeta,\n Uniform,\n UniformHypersphere,\n _betaincinv22,\n)\nfrom nengo.exceptions import ValidationError\n\n\ndef test_pdf(rng, allclose):\n s = 0.25\n f = lambda x: (\n np.exp(-0.5 * (x + 0.5) ** 2 / s ** 2) + np.exp(-0.5 * (x - 0.5) ** 2 / s ** 2)\n )\n\n xref = np.linspace(-2, 2, 101)\n pref = f(xref)\n pref /= pref.sum()\n dist = PDF(xref, pref)\n\n n = 100000\n samples = dist.sample(n, rng=rng)\n h, xedges = np.histogram(samples, bins=101)\n x = 0.5 * (xedges[:-1] + xedges[1:])\n dx = np.diff(xedges)\n y = h / float(h.sum()) / dx\n z = f(x)\n z = z / z.sum() / dx\n assert allclose(y, z, atol=0.05)\n\n with pytest.raises(ValidationError, match=\"PDF must sum to one\"):\n dist = PDF([0, 1, 2], [0.1, 1.1, 0.1])\n\n with pytest.raises(ValidationError, match=\"`x` and `p` must be the same length\"):\n dist = PDF([0, 1], [0, 1, 0])\n\n\[email protected](\"low,high\", [(-2, -1), (-4, 1), (1, 2), (1, -1)])\ndef test_uniform(low, high, rng, allclose):\n n = 200\n dist = Uniform(low, high)\n samples = dist.sample(n, rng=rng)\n if low < high:\n assert np.all(samples >= low)\n assert np.all(samples < high)\n else:\n assert np.all(samples <= low)\n assert np.all(samples > high)\n histogram, _ = np.histogram(samples, bins=5)\n assert allclose(histogram, np.mean(histogram), atol=0.1 * n)\n\n # test `integer=true`\n dist = Uniform(low, high, integer=True)\n if low < high:\n samples = dist.sample(n, rng=rng)\n assert np.all(samples >= low)\n assert np.all(samples < high)\n assert np.all(samples % 1 == 0)\n else:\n with pytest.raises(ValueError):\n samples = dist.sample(n, rng=rng)\n\n\[email protected](\"mean,std\", [(0, 1), (0, 0), (10, 2)])\ndef test_gaussian(mean, std, rng):\n n = 500\n if std <= 0:\n with pytest.raises(ValueError):\n dist = Gaussian(mean, std)\n else:\n dist = Gaussian(mean, std)\n samples = dist.sample(n, rng=rng)\n assert abs(np.mean(samples) - mean) < 3 * std / np.sqrt(n)\n assert abs(np.std(samples) - std) < 0.25\n\n\[email protected](\n \"scale,shift,high\", [(1.0, 0.0, np.inf), (10.0, 0.0, 1.0), (0.1, 0.3, 1.0)]\n)\ndef test_exponential(scale, shift, high, rng):\n n = 100\n dist = Exponential(scale, shift=shift, high=high)\n samples = dist.sample(n, rng=rng)\n assert np.all(samples >= shift)\n assert np.all(samples <= high)\n # approximation of 95% confidence interval\n ci = scale * 1.96 / np.sqrt(n)\n if scale + ci < high:\n assert abs(np.mean(samples - shift) - scale) < ci\n\n\[email protected](\"cls\", [UniformHypersphere, ScatteredHypersphere])\[email protected](\n \"min_magnitude,d\", [(0, 1), (0, 2), (0, 5), (0.6, 1), (0.3, 2), (0.4, 5)]\n)\ndef test_hypersphere_volume(cls, min_magnitude, d, rng, allclose):\n n = 250 * d\n dist = cls(min_magnitude=min_magnitude)\n samples = dist.sample(n, d, rng=rng)\n assert samples.shape == (n, d)\n assert allclose(np.mean(samples, axis=0), 0, atol=0.1)\n\n norms = npext.norm(samples, axis=1)\n assert np.all(norms >= min_magnitude)\n assert np.all(norms <= 1)\n\n # probability of not finding a point in [min_magnitude, r_tol_min], [r_tol_max, 1]\n q = 1e-5\n r_min_d = min_magnitude ** d\n r_tol_min = (r_min_d + (1 - r_min_d) * (1 - q ** (1 / n))) ** (1 / d)\n assert norms.min() <= r_tol_min\n r_tol_max = (1 - (1 - r_min_d) * (1 - q ** (1 / n))) ** (1 / d)\n assert norms.max() >= r_tol_max\n\n\[email protected](\"cls\", [UniformHypersphere, ScatteredHypersphere])\[email protected](\"dimensions\", [1, 2, 5])\ndef test_hypersphere_surface(cls, dimensions, rng, allclose):\n n = 200 * dimensions\n dist = cls(surface=True)\n samples = dist.sample(n, dimensions, rng=rng)\n assert samples.shape == (n, dimensions)\n assert allclose(npext.norm(samples, axis=1), 1)\n assert allclose(np.mean(samples, axis=0), 0, atol=0.25 / dimensions)\n\n\[email protected](\"cls\", [UniformHypersphere, ScatteredHypersphere])\ndef test_hypersphere_errors(cls):\n with pytest.raises(ValidationError, match=\"Must be of type 'bool'\"):\n cls(surface=0)\n\n with pytest.raises(ValidationError, match=\"Dimensions must be a positive integer\"):\n cls().sample(1, d=-1)\n\n\[email protected](\"cls\", [UniformHypersphere, ScatteredHypersphere])\ndef test_hypersphere_warns(cls):\n with pytest.warns(UserWarning, match=\"min_magnitude ignored because surface\"):\n cls(surface=True, min_magnitude=0.1)\n\n\ndef test_quasirandom_sequence_phi():\n def phi(x, iters=100):\n y = 1\n for _ in range(iters):\n y = (1 + y) ** (1 / (x + 1))\n return y\n\n rd = QuasirandomSequence()\n for i in range(1, 20):\n assert np.allclose(rd._phi(i), phi(i)), str(i)\n\n with pytest.raises(RuntimeError, match=\"did not converge\"):\n print(rd._phi(np.nan))\n\n\[email protected](\"dims\", [2, 3, 7, 8])\[email protected](\"surface\", [True, False])\ndef test_scattered_hypersphere(dims, surface, seed, plt):\n scipy_special = pytest.importorskip(\"scipy.special\")\n\n n = 3000\n dists = [\n UniformHypersphere(surface=surface),\n ScatteredHypersphere(surface=surface, method=\"sct\"),\n ScatteredHypersphere(surface=surface, method=\"sct-approx\"),\n ScatteredHypersphere(surface=surface, method=\"tfww\"),\n ]\n assert isinstance(dists[0], UniformHypersphere)\n\n xx = [] # generated points, for each dist\n times = [] # time taken to generate the points, for each dist\n for dist in dists:\n rng = np.random.RandomState(seed)\n timer = time.time()\n x = dist.sample(n, d=dims, rng=rng)\n timer = time.time() - timer\n rng.shuffle(x) # shuffle so we can compute distances in blocks without bias\n xx.append(x)\n times.append(timer)\n\n dd = [] # distance to the nearest point for each point, for each dist\n rr = [] # radii (norms) of all the generated points, for each dist\n for x in xx:\n # compute distances in blocks for efficiency (this means we're not actually\n # getting the minimum distance, just a proxy)\n n_split = 1000\n d_min = []\n for i in range(0, n, n_split):\n xi = x[i : i + n_split]\n d2 = ((xi[:, :, None] - xi.T[None, :, :]) ** 2).sum(axis=1)\n np.fill_diagonal(d2, np.inf)\n d_min.append(np.sqrt(d2.min(axis=1)))\n d_min = np.concatenate(d_min)\n dd.append(d_min)\n rr.append(np.sqrt((x ** 2).sum(axis=1)))\n\n # compute the approximate distance between points if they were evenly spread\n volume = np.pi ** (0.5 * dims) / scipy_special.gamma(0.5 * dims + 1)\n if surface:\n volume *= dims\n even_distance = (volume / n) ** (1 / (dims - 1 if surface else dims))\n\n # --- plots\n colors = [\"b\", \"g\", \"r\", \"m\", \"c\"]\n\n plt.subplot(211)\n bins = np.linspace(np.min(dd), np.max(dd), 31)\n for i, d in enumerate(dd):\n histogram, _ = np.histogram(d, bins=bins)\n plt.plot(\n 0.5 * (bins[:-1] + bins[1:]),\n histogram,\n colors[i],\n )\n plt.plot([d.min()], [0], colors[i] + \"x\")\n plt.plot([even_distance], [0], \"kx\")\n plt.title(f\"surface={surface}, dims={dims}, n={n}\")\n\n plt.subplot(212)\n bins = np.linspace(0, 1.1, 31)\n for i, r in enumerate(rr):\n histogram, _ = np.histogram(r, bins=bins)\n plt.plot(\n 0.5 * (bins[:-1] + bins[1:]),\n histogram,\n colors[i],\n label=f\"{dists[i]}: t={times[i]:0.2e}\",\n )\n plt.legend()\n\n # --- checks\n uniform_min = dd[0].min()\n for i, dist in enumerate(dists):\n if i == 0:\n continue\n\n # check that we're significantly better than UniformHypersphere\n d_min = dd[i].min()\n assert d_min > 1.2 * uniform_min, str(dist)\n\n # check that all surface points are on the surface\n if surface:\n assert np.allclose(rr[i], 1.0, atol=1e-5), str(dist)\n\n\[email protected](\"weights\", [None, [5, 1, 2, 9], [3, 2, 1, 0]])\ndef test_choice(weights, rng, allclose):\n \"\"\"Tests the choice function with weights\"\"\"\n n = 1000\n choices = [[1, 1], [1, -1], [-1, 1], [-1, -1]]\n N = len(choices)\n\n dist = Choice(choices, weights=weights)\n # If d is passed, it has to match\n with pytest.raises(ValueError):\n dist.sample(n, d=4, rng=rng)\n sample = dist.sample(n, rng=rng)\n tsample = [tuple(point) for point in sample]\n tchoices = [tuple(choice) for choice in choices]\n\n # check that frequency of choices matches weights\n inds = [tchoices.index(s) for s in tsample]\n histogram, _ = np.histogram(inds, bins=np.linspace(-0.5, N - 0.5, N + 1))\n p_empirical = histogram / float(histogram.sum())\n p = np.ones(N) / N if dist.p is None else dist.p\n sterr = 1.0 / np.sqrt(n) # expected maximum standard error\n assert allclose(p, p_empirical, atol=2 * sterr)\n\n\ndef test_choice_errors():\n with pytest.raises(ValidationError, match=\"Number of weights.*must match.*options\"):\n Choice([2], [1, 2, 3])\n\n with pytest.raises(ValidationError, match=\"All weights must be non-negative\"):\n Choice([2], [-1])\n\n with pytest.raises(ValidationError, match=\"Sum of weights must be positive\"):\n Choice([1, 2], [0, 0])\n\n with pytest.raises(ValidationError, match=\"Options must be of dimensionality 1\"):\n Choice([0]).sample(n=2, d=1)\n\n\[email protected](\"shape\", [(12, 2), (7, 1), (7,), (1, 1)])\ndef test_samples(shape, rng, allclose):\n samples = rng.random_sample(size=shape)\n d = Samples(samples)\n dims = None if len(shape) == 1 else shape[1]\n assert allclose(d.sample(shape[0], dims), samples)\n\n\[email protected](\"samples\", [[1.0, 2.0, 3.0], [[1, 2], [3, 4]]])\ndef test_samples_list(samples, allclose):\n d = Samples(samples)\n shape = np.array(samples).shape\n dims = None if len(shape) == 1 else shape[1]\n assert allclose(d.sample(shape[0], dims), samples)\n\n\ndef test_samples_errors(rng):\n samples = rng.random_sample(size=(12, 2))\n with pytest.raises(ValidationError):\n Samples(samples).sample(11, 2)\n with pytest.raises(ValidationError):\n Samples(samples).sample(12, 1)\n with pytest.raises(ValidationError):\n Samples(samples).sample(12)\n\n samples = rng.random_sample(size=12)\n with pytest.raises(ValidationError):\n Samples(samples).sample(12, 2)\n\n\[email protected](\"n,m\", [(99, 1), (50, 50)])\ndef test_sqrt_beta(n, m, rng):\n num_samples = 1000\n num_bins = 5\n\n vectors = rng.randn(num_samples, n + m)\n vectors /= npext.norm(vectors, axis=1, keepdims=True)\n expectation, _ = np.histogram(npext.norm(vectors[:, :m], axis=1), bins=num_bins)\n\n dist = SqrtBeta(n, m)\n samples = dist.sample(num_samples, 1, rng=rng)\n histogram, _ = np.histogram(samples, bins=num_bins)\n\n assert np.all(np.abs(np.asfarray(histogram - expectation) / num_samples) < 0.16)\n\n\[email protected](\"n,m\", [(4, 1), (10, 5)])\ndef test_sqrt_beta_analytical(n, m, rng, allclose):\n \"\"\"Tests pdf, cdf, and ppf of SqrtBeta distribution.\"\"\"\n pytest.importorskip(\"scipy\") # beta and betainc\n\n dt = 0.001\n x = np.arange(dt, 1 + dt, dt)\n\n dist = SqrtBeta(n, m)\n\n pdf = dist.pdf(x)\n cdf = dist.cdf(x)\n ppf = dist.ppf(cdf)\n\n # The pdf should reflect the samples\n num_samples = 2500\n num_bins = 5\n\n samples = dist.sample(num_samples, rng=rng)\n act_hist, _ = np.histogram(samples, bins=num_bins)\n bin_points = np.linspace(0, 1, num_bins + 1)\n bin_cdf = dist.cdf(bin_points)\n exp_freq = bin_cdf[1:] - bin_cdf[:-1]\n assert np.all(np.abs(np.asfarray(act_hist) / num_samples - exp_freq) < 0.1)\n\n # The cdf should be the accumulated pdf\n assert allclose(cdf, np.cumsum(pdf) * dt, atol=0.01)\n\n # The ppf should give back x\n assert allclose(x, ppf, atol=0.01)\n\n\[email protected](\"d\", [2, 3, 10, 50])\ndef test_cosine_similarity(d, rng):\n \"\"\"Tests CosineSimilarity sampling.\"\"\"\n num_samples = 2500\n num_bins = 8\n\n # Check that it gives a single dimension from UniformHypersphere\n exp_dist = UniformHypersphere(surface=True)\n act_dist = CosineSimilarity(d)\n\n exp = exp_dist.sample(num_samples, d, rng=rng)[:, 0]\n act = act_dist.sample(num_samples, rng=rng)\n\n exp_hist, _ = np.histogram(exp, bins=num_bins)\n act_hist, _ = np.histogram(act, bins=num_bins)\n\n assert np.all(np.abs(np.asfarray(exp_hist - act_hist) / num_samples) < 0.15)\n\n\[email protected](\"d\", [2, 3, 10])\ndef test_cosine_analytical(d, allclose):\n pytest.importorskip(\"scipy\") # beta, betainc, betaincinv\n\n dt = 0.0001\n x = np.arange(-1 + dt, 1, dt)\n\n def p(x, d):\n # unnormalized CosineSimilarity distribution, derived by Eric H.\n return (1 - x * x) ** ((d - 3) / 2.0)\n\n dist = CosineSimilarity(d)\n\n pdf_exp = dist.pdf(x)\n pdf_act = p(x, d)\n\n cdf_exp = dist.cdf(x)\n cdf_act = np.cumsum(pdf_act) / np.sum(pdf_act)\n\n # Check that we get the expected pdf after normalization\n assert allclose(pdf_exp / np.sum(pdf_exp), pdf_act / np.sum(pdf_act), atol=0.01)\n\n # Check that this accumulates to the expected cdf\n assert allclose(cdf_exp, cdf_act, atol=0.01)\n\n # Check that the inverse cdf gives back x\n assert allclose(dist.ppf(cdf_exp), x, atol=0.01)\n\n\ndef test_cosine_sample_shape(seed, allclose):\n \"\"\"Tests that CosineSimilarity sample has correct shape.\"\"\"\n # sampling (n, d) should be the exact same as sampling (n*d,)\n n = 3\n d = 4\n dist = CosineSimilarity(2)\n a = dist.sample(n, d, rng=np.random.RandomState(seed))\n b = dist.sample(n * d, rng=np.random.RandomState(seed))\n assert allclose(a.flatten(), b)\n\n\[email protected](\"d,p\", [(3, 0), (5, 0.4), (10, 0.7), (50, 1.0)])\ndef test_cosine_intercept(d, p, rng, allclose):\n \"\"\"Tests CosineSimilarity inverse cdf for finding intercepts.\"\"\"\n pytest.importorskip(\"scipy\") # betaincinv\n\n num_samples = 500\n\n exp_dist = UniformHypersphere(surface=True)\n act_dist = CosineSimilarity(d)\n\n dots = exp_dist.sample(num_samples, d, rng=rng)[:, 0]\n\n # Find the desired intercept so that dots >= c with probability p\n c = act_dist.ppf(1 - p)\n assert allclose(np.sum(dots >= c) / float(num_samples), p, atol=0.05)\n\n\ndef test_distorarrayparam():\n \"\"\"DistOrArrayParams can be distributions or samples.\"\"\"\n\n class Test:\n dp = DistOrArrayParam(\"dp\", default=None, sample_shape=[\"*\", \"*\"])\n\n inst = Test()\n inst.dp = UniformHypersphere()\n assert isinstance(inst.dp, UniformHypersphere)\n inst.dp = np.array([[1], [2], [3]])\n assert np.all(inst.dp == np.array([[1], [2], [3]]))\n with pytest.raises(ValueError):\n inst.dp = \"a\"\n # Sample must have correct dims\n with pytest.raises(ValueError):\n inst.dp = np.array([1])\n\n\ndef test_distorarrayparam_sample_shape():\n \"\"\"sample_shape dictates the shape of the sample that can be set.\"\"\"\n\n class Test:\n dp = DistOrArrayParam(\"dp\", default=None, sample_shape=[\"d1\", 10])\n d1 = 4\n\n inst = Test()\n # Distributions are still cool\n inst.dp = UniformHypersphere()\n assert isinstance(inst.dp, UniformHypersphere)\n # Must be shape (4, 10)\n inst.dp = np.ones((4, 10))\n assert np.all(inst.dp == np.ones((4, 10)))\n with pytest.raises(ValidationError):\n inst.dp = np.ones((10, 4))\n assert np.all(inst.dp == np.ones((4, 10)))\n\n\ndef test_frozen():\n \"\"\"Test attributes inherited from FrozenObject\"\"\"\n a = Uniform(-0.3, 0.6)\n b = Uniform(-0.3, 0.6)\n c = Uniform(-0.2, 0.6)\n\n assert hash(a) == hash(a)\n assert hash(b) == hash(b)\n assert hash(c) == hash(c)\n\n assert a == b\n assert hash(a) == hash(b)\n assert a != c\n assert hash(a) != hash(c) # not guaranteed, but highly likely\n assert b != c\n assert hash(b) != hash(c) # not guaranteed, but highly likely\n\n\ndef _test_betaincinv22(plt, allclose):\n scipy_special = pytest.importorskip(\"scipy.special\")\n\n # call once to load table, so that doesn't effect timing\n _betaincinv22.lookup(5, [0.1])\n\n dims = np.concatenate(\n [np.arange(1, 50), np.round(np.logspace(np.log10(51), 3.1)).astype(np.int64)]\n )\n x = np.linspace(0, 1, 1000)\n\n results = []\n for dim in dims:\n ref_timer = time.time()\n yref = scipy_special.betaincinv(dim / 2, 0.5, x)\n ref_timer = time.time() - ref_timer\n\n timer = time.time()\n y = _betaincinv22.lookup(dim, x)\n timer = time.time() - timer\n\n results.append((yref, y, ref_timer, timer))\n\n n_show = 5\n resultsT = list(zip(*results))\n errors = np.abs(np.array(resultsT[0]) - np.array(resultsT[1])).max(axis=1)\n show_inds = np.argsort(errors)[-n_show:]\n\n subplots = plt.subplots(nrows=2, sharex=True)\n if isinstance(subplots, tuple):\n _, ax = subplots\n\n for i in show_inds:\n yref, y, ref_timer, timer = results[i]\n dim = dims[i]\n\n ax[0].plot(x, y, label=f\"dims={dim}\")\n ax[1].plot(x, y - yref)\n\n speedups = np.array(resultsT[2]) / np.array(resultsT[3])\n ax[0].set_title(f\"average speedup = {speedups.mean():0.1f} times\")\n ax[0].set_ylabel(\"value\")\n ax[1].set_xlabel(\"input\")\n ax[1].set_ylabel(\"error\")\n ax[0].legend()\n\n for i, (yref, y, ref_timer, timer) in enumerate(results):\n # allow error to increase for higher dimensions (to 5e-3 when dims=1000)\n atol = 1e-3 + (np.log10(dims[i]) / 3) * 4e-3\n assert allclose(y, yref, atol=atol), f\"dims={dims[i]}\"\n\n\ndef test_make_betaincinv22_table(monkeypatch, tmp_path, plt, allclose):\n pytest.importorskip(\"scipy.special\")\n test_path = str(tmp_path / \"betaincinv22_test_table.npz\")\n monkeypatch.setattr(_betaincinv22, \"path\", test_path)\n monkeypatch.setattr(_betaincinv22, \"table\", None)\n\n _betaincinv22.make_table(n_interp=200, n_dims=50)\n _test_betaincinv22(plt=plt, allclose=allclose)\n\n\ndef test_betaincinv22_lookup(monkeypatch, plt, allclose):\n pytest.importorskip(\"scipy.special\")\n monkeypatch.setattr(_betaincinv22, \"table\", None)\n _test_betaincinv22(plt=plt, allclose=allclose)\n\n\ndef test_betaincinv22_errors():\n x = np.linspace(0.1, 0.9)\n _betaincinv22.lookup(3, x)\n\n with pytest.raises(ValidationError, match=\"must be an integer >= 1\"):\n _betaincinv22.lookup(0, x)\n\n with pytest.raises(ValidationError, match=\"must be an integer >= 1\"):\n _betaincinv22.lookup(2.2, x)\n", "import warnings\n\nimport numpy as np\n\nfrom nengo.connection import Connection\nfrom nengo.exceptions import ObsoleteError\nfrom nengo.network import Network\nfrom nengo.networks.ensemblearray import EnsembleArray\nfrom nengo.node import Node\n\n\nclass Product(Network):\n \"\"\"Computes the element-wise product of two equally sized vectors.\n\n The network used to calculate the product is described in\n `Gosmann, 2015`_. A simpler version of this network can be found in the\n :doc:`Multiplication example <examples/basic/multiplication>`.\n\n Note that this network is optimized under the assumption that both input\n values (or both values for each input dimensions of the input vectors) are\n uniformly and independently distributed. Visualized in a joint 2D space,\n this would give a square of equal probabilities for pairs of input values.\n This assumption is violated with non-uniform input value distributions\n (for example, if the input values follow a Gaussian or cosine similarity\n distribution). In that case, no square of equal probabilities is obtained,\n but a probability landscape with circular equi-probability lines. To obtain\n the optimal network accuracy, scale the *input_magnitude* by a factor of\n ``1 / sqrt(2)``.\n\n .. _Gosmann, 2015:\n https://nbviewer.jupyter.org/github/ctn-archive/technical-reports/blob/\n master/Precise-multiplications-with-the-NEF.ipynb\n\n Parameters\n ----------\n n_neurons : int\n Number of neurons per dimension in the vector.\n\n .. note:: These neurons will be distributed evenly across two\n ensembles. If an odd number of neurons is specified, the\n extra neuron will not be used.\n dimensions : int\n Number of dimensions in each of the vectors to be multiplied.\n\n input_magnitude : float, optional\n The expected magnitude of the vectors to be multiplied.\n This value is used to determine the radius of the ensembles\n computing the element-wise product.\n **kwargs\n Keyword arguments passed through to ``nengo.Network``\n like 'label' and 'seed'.\n\n Attributes\n ----------\n input_a : Node\n The first vector to be multiplied.\n input_b : Node\n The second vector to be multiplied.\n output : Node\n The resulting product.\n sq1 : EnsembleArray\n Represents the first squared term. See `Gosmann, 2015`_ for details.\n sq2 : EnsembleArray\n Represents the second squared term. See `Gosmann, 2015`_ for details.\n \"\"\"\n\n def __init__(self, n_neurons, dimensions, input_magnitude=1.0, **kwargs):\n if \"net\" in kwargs:\n raise ObsoleteError(\"The 'net' argument is no longer supported.\")\n kwargs.setdefault(\"label\", \"Product\")\n super().__init__(**kwargs)\n\n with self:\n self.input_a = Node(size_in=dimensions, label=\"input_a\")\n self.input_b = Node(size_in=dimensions, label=\"input_b\")\n self.output = Node(size_in=dimensions, label=\"output\")\n\n self.sq1 = EnsembleArray(\n max(1, n_neurons // 2),\n n_ensembles=dimensions,\n ens_dimensions=1,\n radius=input_magnitude * np.sqrt(2),\n )\n self.sq2 = EnsembleArray(\n max(1, n_neurons // 2),\n n_ensembles=dimensions,\n ens_dimensions=1,\n radius=input_magnitude * np.sqrt(2),\n )\n\n tr = 1.0 / np.sqrt(2.0)\n Connection(self.input_a, self.sq1.input, transform=tr, synapse=None)\n Connection(self.input_b, self.sq1.input, transform=tr, synapse=None)\n Connection(self.input_a, self.sq2.input, transform=tr, synapse=None)\n Connection(self.input_b, self.sq2.input, transform=-tr, synapse=None)\n\n sq1_out = self.sq1.add_output(\"square\", np.square)\n Connection(sq1_out, self.output, transform=0.5, synapse=None)\n sq2_out = self.sq2.add_output(\"square\", np.square)\n Connection(sq2_out, self.output, transform=-0.5, synapse=None)\n\n @property\n def A(self): # pragma: no cover\n warnings.warn(DeprecationWarning(\"Use 'input_a' instead of 'A'.\"))\n return self.input_a\n\n @property\n def B(self): # pragma: no cover\n warnings.warn(DeprecationWarning(\"Use 'input_b' instead of 'B'.\"))\n return self.input_b\n\n\ndef dot_product_transform(dimensions, scale=1.0):\n \"\"\"Returns a transform for output to compute the scaled dot product.\"\"\"\n return scale * np.ones((1, dimensions))\n" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.cumsum", "numpy.all", "numpy.concatenate", "numpy.max", "numpy.mean", "numpy.fill_diagonal", "numpy.exp", "numpy.histogram", "numpy.allclose", "numpy.arange", "numpy.asfarray", "numpy.std", "numpy.diff", "numpy.min", "numpy.log10", "numpy.argsort", "numpy.array", "numpy.random.RandomState", "numpy.sum", "numpy.ones" ], [ "numpy.sqrt", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
canghaiyunfan/kaggle-dstl
[ "93d6509c1af86a3e9f7cdc6b98252960285f9b4e" ]
[ "make_submission_rssrai.py" ]
[ "#!/usr/bin/env python3\nimport argparse\nimport csv\nimport json\nimport gzip\nfrom functools import partial\nfrom pathlib import Path\nfrom multiprocessing.pool import Pool\nimport traceback\nfrom typing import List, Tuple, Set\n\nimport cv2\nimport numpy as np\n\nimport utils_rssrai\nfrom train_rssrai import Model, HyperParams, Image\n\n\nlogger = utils_rssrai.get_logger(__name__)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n arg('logdir', type=Path, help='Path to log directory')\n #arg('output', type=str, help='Submission csv')\n arg('--only', help='Only predict these image ids (comma-separated)')\n arg('--threshold', type=float, default=0.5)\n arg('--epsilon', type=float, default=2.0, help='smoothing')\n arg('--min-area', type=float, default=50.0)\n arg('--min-small-area', type=float, default=10.0)\n arg('--masks-only', action='store_true', help='Do only mask prediction')\n arg('--model-path', type=Path,\n help='Path to a specific model (if the last is not desired)')\n arg('--processes', type=int, default=30)\n arg('--validation', action='store_true',\n help='only validation images, check jaccard, '\n 'save masks and polygons as png')\n arg('--valid-polygons', action='store_true', help='validation via polygons')\n arg('--force-predict', action='store_true')\n arg('--no-edges', action='store_true', help='disable prediction on edges')\n arg('--buffer', type=float, help='do .buffer(x) on pred polygons')\n args = parser.parse_args()\n hps = HyperParams(**json.loads(\n args.logdir.joinpath('hps.json').read_text()))\n\n only = set(args.only.split(',')) if args.only else set()\n\n store = args.logdir # type: Path\n\n test_ids = set(['GF2_PMS1__20150902_L1A0001015646-MSS1',\n 'GF2_PMS1__20150902_L1A0001015648-MSS1',\n 'GF2_PMS1__20150912_L1A0001037899-MSS1',\n 'GF2_PMS1__20150926_L1A0001064469-MSS1',\n 'GF2_PMS1__20160327_L1A0001491484-MSS1',\n 'GF2_PMS1__20160430_L1A0001553848-MSS1',\n 'GF2_PMS1__20160623_L1A0001660727-MSS1',\n 'GF2_PMS1__20160627_L1A0001668483-MSS1',\n 'GF2_PMS1__20160704_L1A0001680853-MSS1',\n 'GF2_PMS1__20160801_L1A0001734328-MSS1'])\n\n val_ids = set(['GF2_PMS1__20160421_L1A0001537716-MSS1',\n 'GF2_PMS2__20150217_L1A0000658637-MSS2'])\n\n if only:\n to_predict = only\n elif args.validation:\n to_predict = set(val_ids)\n else:\n to_predict = set(test_ids)\n\n if not args.force_predict:\n to_predict_masks = [\n im_id for im_id in to_predict if not mask_path(store, im_id).exists()]\n else:\n to_predict_masks = to_predict\n\n if to_predict_masks:\n predict_masks(args, hps, store, to_predict_masks, args.threshold,\n validation=args.validation, no_edges=args.no_edges)\n if args.masks_only:\n logger.info('Was building masks only, done.')\n return\n\n\ndef mask_path(store: Path, im_id: str) -> Path:\n return store.joinpath('{}.bin-mask.gz'.format(im_id))\n\n\ndef predict_masks(args, hps, store, to_predict: List[str], threshold: float,\n validation: str=None, no_edges: bool=False):\n logger.info('Predicting {} masks: {}'\n .format(len(to_predict), ', '.join(sorted(to_predict))))\n model = Model(hps=hps)\n if args.model_path:\n model.restore_snapshot(args.model_path)\n else:\n model.restore_last_snapshot(args.logdir)\n\n def load_im(im_id):\n data = model.preprocess_image(utils_rssrai.load_image(im_id))\n if hps.n_channels != data.shape[0]:\n data = data[:hps.n_channels]\n if validation == 'square':\n data = square(data, hps)\n return Image(id=im_id, data=data)\n\n def predict_mask(im):\n logger.info(im.id)\n return im, model.predict_image_mask(im.data, no_edges=no_edges)\n\n im_masks = map(predict_mask, utils_rssrai.imap_fixed_output_buffer(\n load_im, sorted(to_predict), threads=2))\n\n for im, mask in im_masks:\n assert mask.shape[1:] == im.data.shape[1:]\n with gzip.open(str(mask_path(store, im.id)), 'wb') as f:\n # TODO - maybe do (mask * 20).astype(np.uint8)\n np.save(f, mask)\n\n\n # for im, mask in utils_rssrai.imap_fixed_output_buffer(\n # lambda _: next(im_masks), to_predict, threads=1):\n # assert mask.shape[1:] == im.data.shape[1:]\n # with gzip.open(str(mask_path(store, im.id)), 'wb') as f:\n # # TODO - maybe do (mask * 20).astype(np.uint8)\n # np.save(f, mask)\n\n\ndef square(x, hps):\n if len(x.shape) == 2 or x.shape[2] <= 20:\n return x[:hps.validation_square, :hps.validation_square]\n else:\n assert x.shape[0] <= 20\n return x[:, :hps.validation_square, :hps.validation_square]\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JayaIngle12345/CNN
[ "da8f02027604da8d4618854d3b33c6b69d4b0205" ]
[ "gb_mod.py" ]
[ "\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pickle\nimport tensorflow as tf\nimport pandas as pd \nimport numpy as np \nimport os\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import gen_nn_ops\n\nindices =[[0,0,7,22,189]]\nmx = 16\ntf.set_random_seed(1234)\n#Pickle for saving Data\n\n#Pickle Command for the purpose of saving object in file and reloading\ndef save_obj(obj, name ):\n with open('obj/'+ name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef load_obj(name ):\n with open('obj/' + name, 'rb') as f:\n return pickle.load(f)\n\n\n\nl1regScale = 0.5\nl2regScale = 0.5\n\ni = 9\nstep = 100\n\nbatch_size = 10\nlr = 0.0001\ninitMethod = 2\nvalSetSize = 10\nepochs = 1\nsaveDir = \"/home/ubuntu/pa3/saveDr/\"\n\n\nchkLocation = saveDir+\"model_e_\" + str(i)+\"_s_\"+str(step) +\".ckpt\"\n\nfileName = saveDir+\"submission.csv\"\n\nf = open(fileName,\"w\")\n\nf.write(\"pixelno,grad\\n\")\n\ndatasetLoc = \"/home/ubuntu/pa3/\"\n\n#Reading Data\ntrainD = pd.read_csv( datasetLoc + \"train.csv\", low_memory=False)\nvalD = pd.read_csv( datasetLoc + \"val.csv\", low_memory=False)\ntestD = pd.read_csv( datasetLoc + \"test.csv\", low_memory=False)\n\n#Breaking Labels and Features\n\n#Training\ntrainDnp = trainD.values\ntrainLabels = trainDnp[0:55001,785:786] #Label Classification\ntrainFeatures = trainDnp[0:55000,1:785] #Traning Input data\ntrainDnp = None\n\n#Validation\nvalDnp = valD.values\nvalLabels = valDnp[0:5001,785:786] #Label Classification\nvalFeatures = valDnp[0:5000,1:785] #Validation Input Data\n\nvalDnp = None\n\n#Testing\ntestDnp = testD.values\ntestFeatures = testDnp[:,1:785] #Testing we don't have Labels\n\ntestDnp = None\n## Normalizing Data\n\n(no_of_row,no_of_col) = (np.shape(trainFeatures)) #Training Operation\n(val_row,val_col) = (np.shape(valFeatures)) #Validation Operation\n(test_row,test_col) = (np.shape(testFeatures)) #Validation Operation\n\nmn = np.mean(trainFeatures,axis = 0) #Mean of training data\ntiled_mn = np.tile(mn, (no_of_row, 1)) #Training : tiling up mean for substraction\nval_tiled_mn = np.tile(mn,(val_row,1)) #Validation : tiling up mean for validation\ntest_tiled_mn = np.tile(mn,(test_row,1)) #Testing : tiling up mean for validation\n\nst_dev = np.std(trainFeatures,axis = 0) #Standard deviation of training data\ntiled_st_dev = np.tile(st_dev, (no_of_row, 1)) #Training : tiling of variance for division\nval_tiled_st_dev = np.tile(st_dev,(val_row,1)) #Validation : tiling up variance for division\ntest_tiled_st_dev = np.tile(st_dev,(test_row,1)) #Testing : tiling up variance for division\n\n#Normalizing Training Data\nmn_shifted_data = trainFeatures - tiled_mn \ntrainFeatures = mn_shifted_data/tiled_st_dev \n\n#Normalizing Validation Data\nmn_shifted_val_d = valFeatures - val_tiled_mn\nvalFeatures = mn_shifted_val_d/val_tiled_st_dev\n\n#Normalizing Testing Data\nmn_shifted_test_d = testFeatures - test_tiled_mn\ntestFeatures = mn_shifted_test_d/test_tiled_st_dev\n\n#Setting All the useless variables to None so that there memory can be freed\n\n#Cleaning Area###############################\n\nmn = None\ntiled_mn = None\nval_tiled_mn = None\ntest_tiled_mn = None\n\nst_dev = None\ntiled_st_dev = None \nval_tiled_st_dev = None\ntest_tiled_st_dev = None\n\nmn_shifted_data = None \nmn_shifted_val_d = None\nmn_shifted_test_d = None\n\ntrainD = None\nvalD = None\ntestD = None\n\n##############################################\n\n#Data set Variables are trainFeatures, valFeatures, testFeatures and truth valLabels, trainLabels\n\nfeatures = tf.placeholder(dtype=tf.float32)\nlabels = tf.placeholder(dtype=tf.int32)\n\n#Initialization Method Deciding Condition\nif initMethod == 1:\n initializerUsed = tf.contrib.layers.xavier_initializer()\nelse:\n initializerUsed = tf.keras.initializers.he_normal()\n\n\n# for input layer \ninput_layer =tf.reshape(features,[-1,28,28,1]) # here 3 corresponds to RGB channel\n\nreglr = tf.contrib.layers.l1_l2_regularizer(\n scale_l1=l1regScale,\n scale_l2=l2regScale,\n scope=None\n)\n\n#conv layer 1 \nconv1 = tf.layers.conv2d(\n inputs = input_layer,\n filters = 64,\n kernel_size = [3,3],\n padding = \"same\",\n activation=tf.nn.relu,\n kernel_initializer = initializerUsed,\n name = 'conv1',\n kernel_regularizer= reglr,\n bias_regularizer = reglr\n )\n\n# #Pooling Layer 1\npool1 = tf.layers.max_pooling2d(inputs = conv1,padding=\"same\",pool_size = [2,2],strides = 1,name = 'pool1')\n\n#conv layer 2\nconv2 = tf.layers.conv2d(\n inputs = pool1,\n filters = 128,\n kernel_size = [3,3], \n padding = \"same\",\n activation = tf.nn.relu,\n kernel_initializer = initializerUsed,\n name='conv2',\n kernel_regularizer= reglr,\n bias_regularizer = reglr\n )\n\n#pooling layer 2\npool2 = tf.layers.max_pooling2d(inputs = conv2,padding=\"same\",pool_size = [2,2],strides = 1,name ='pool2')\n\n#convo layer 3\nconv3 = tf.layers.conv2d(\n inputs = pool2,\n filters = 256,\n kernel_size = [3,3], \n padding = \"same\",\n activation = tf.nn.relu,\n kernel_initializer = initializerUsed,name = 'conv3',\n kernel_regularizer= reglr,\n bias_regularizer = reglr )\n\n#convo layer 4\nconv4 = tf.layers.conv2d( inputs = conv3,\n filters = 256,\n kernel_size = [3,3], \n padding = \"same\",\n activation = tf.nn.relu,\n kernel_initializer = initializerUsed, name = 'conv4',\n kernel_regularizer= reglr,\n bias_regularizer = reglr )\n\n#pool layer 3\npool3 = tf.layers.max_pooling2d(inputs = conv4,padding=\"same\",pool_size = [2,2],strides = 1,name = 'pool3')\n\n#Making pool3 flat for the purpose of sending it to dense layer\npool3_flat = tf.reshape(pool3,[-1,28*28*256])\n\nfc1 = tf.layers.dense(inputs=pool3_flat, units=1024, activation=tf.nn.relu,kernel_initializer = initializerUsed,name = 'fc1',kernel_regularizer= reglr,\n bias_regularizer = reglr)\n\n#fully connected 2\nfc2 = tf.layers.dense(inputs=fc1,units = 1024,activation =tf.nn.relu,kernel_initializer = initializerUsed,name = 'fc2',kernel_regularizer= reglr,\n bias_regularizer = reglr)\n\n\n# ##############BATCH NORMALISATION IS LEFT HERE ####################################\nnormalizedBatch = tf.layers.batch_normalization(\n inputs=fc2,\n axis=-1,\n momentum=0.99,\n epsilon=0.001,\n center=True,\n scale=True,\n beta_initializer=tf.zeros_initializer(),\n gamma_initializer=tf.ones_initializer(),\n moving_mean_initializer=tf.zeros_initializer(),\n moving_variance_initializer=tf.ones_initializer(),\n beta_regularizer=None,\n gamma_regularizer=None,\n beta_constraint=None,\n gamma_constraint=None,\n training=False,\n trainable=True,\n name=None,\n reuse=None,\n renorm=False,\n renorm_clipping=None,\n renorm_momentum=0.99,\n fused=None,\n virtual_batch_size=None,\n adjustment=None\n)\nupdate_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n\nlogits = tf.layers.dense(inputs =normalizedBatch,units =10,kernel_initializer = initializerUsed,name = 'logits')\npredictedLabels = tf.argmax(input=logits, axis=1)\npll = tf.cast(predictedLabels,tf.int32)\nc = tf.equal(pll,labels[:,0])\n\ncorrectCount = tf.reduce_sum(tf.cast(c, tf.int32))\nonehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)\nloss = tf.losses.softmax_cross_entropy(onehot_labels,logits)\noptimizer = tf.train.AdamOptimizer(lr)\ntraining = optimizer.minimize(loss)\n\n#out4 = np.asarray(conv4)\n#only1 = np.zeros([1,1,28,28,256])\n#only1[0,0,:,0,0] =1.0\n#masked = np.multiply(out4,only1)\n#only2 = tf.convert_to_tensor(only1, np.float32)\n#masked = tf.multiply(conv4, tf.cast(only2,dtype = tf.float32))\nc = tf.constant(0.0,shape = [1,1,28,28,256])\n#indices =[[0,0,9,14,219]]\nvalues = [1.0]\nshape = [1,1,28,28,256]\ndelta = tf.SparseTensor(indices, values, shape)\t\nresult = c + tf.sparse_tensor_to_dense(delta)\nmasked = tf.multiply(conv4, result)\n\n\n\n\n#grads_wrt_input_tensor = tf.gradients(masked, features)[0]\ngrad = tf.gradients(masked, features)\n\n\nsaver = tf.train.Saver()\nsess = tf.Session()\n#sess : Session, chkLocation : Location of checkpoint\nsaver.restore(sess,chkLocation)\n\nbatchD = trainFeatures[1] # this is t-shirt\n\ngrade,cv4= sess.run([grad,conv4],feed_dict={features:batchD})\n#maxm = np.amax(cv4)\n#index = [np.concatenate(np.where(cv4 ==maxm)) ]\n#print(type(cv4))\n#print(np.shape(cv4))\n#print(cv4)\nprint(mn)\n\nsave_obj(grade,\"grad\"+str(mx))\n\n\n\n\n#grade = sess.run([grad],fee_dict ={features:batchD,indices:index})\n#print(cv4[0,27,27,1])\n#print(grade)\n#cv4= sess.run([conv4],feed_dict={features:batchD})\n# find index of max 25 elements \n#ind = np.argpartition(cv4, -25)[-25:]\n#max,indices] = tf.nn.top_k(cv4,k=25,sorted=True)\n#print(ind[0,1])\n#for i in range(25):\n # grade = sess.run([grad],feed_dict ={indices = max[i]})\n # save_obj(grade,\"gradients_mod\"+i)\n\n\n\n\n\n#save_obj(cv4,\"cv4\")\n\n\n\n" ]
[ [ "tensorflow.contrib.layers.l1_l2_regularizer", "tensorflow.cast", "tensorflow.equal", "numpy.mean", "tensorflow.train.AdamOptimizer", "tensorflow.keras.initializers.he_normal", "pandas.read_csv", "tensorflow.get_collection", "tensorflow.gradients", "tensorflow.layers.dense", "tensorflow.losses.softmax_cross_entropy", "numpy.std", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.argmax", "tensorflow.layers.conv2d", "tensorflow.zeros_initializer", "tensorflow.sparse_tensor_to_dense", "tensorflow.placeholder", "tensorflow.set_random_seed", "tensorflow.multiply", "tensorflow.constant", "tensorflow.reshape", "numpy.tile", "tensorflow.layers.max_pooling2d", "tensorflow.SparseTensor", "numpy.shape", "tensorflow.ones_initializer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Michaelsqj/pytorch_connectomics
[ "abaf6ccb39fb2ec09f9fd0632a1294288678eb02" ]
[ "connectomics/data/dataset/dataset_tile.py" ]
[ "from __future__ import print_function, division\nfrom typing import Optional, List\nimport numpy as np\nimport json\nimport random\n\nimport torch\nimport torch.utils.data\n\nfrom . import VolumeDataset\nfrom ..augmentation import Compose\nfrom ..utils import crop_volume, relabel,seg_widen_border, tileToVolume \n\nTARGET_OPT_TYPE = List[str]\nWEIGHT_OPT_TYPE = List[List[str]]\nAUGMENTOR_TYPE = Optional[Compose]\n\nclass TileDataset(torch.utils.data.Dataset):\n \"\"\"Dataset class for large-scale tile-based datasets. Large-scale volumetric datasets are usually stored as \n individual tiles. Directly loading them as a single array for training and inference is infeasible. This \n class reads the paths of the tiles and construct smaller chunks for processing.\n\n Args:\n chunk_num (list): volume spliting parameters in :math:`(z, y, x)` order. Default: :math:`[2, 2, 2]` \n chunk_num_ind (list): predefined list of chunks. Default: None\n chunk_iter (int): number of iterations on each chunk. Default: -1\n chunk_stride (bool): allow overlap between chunks. Default: True\n volume_json (str): json file for input image. Default: ``'path/to/image'``\n label_json (str, optional): json file for label. Default: None\n valid_mask_json (str, optional): json file for valid mask. Default: None\n valid_ratio (float): volume ratio threshold for valid samples. Default: 0.5\n sample_volume_size (tuple, int): model input size.\n sample_label_size (tuple, int): model output size.\n sample_stride (tuple, int): stride size for sampling.\n augmentor (connectomics.data.augmentation.composition.Compose, optional): data augmentor for training. Default: None\n target_opt (list): list the model targets generated from segmentation labels.\n weight_opt (list): list of options for generating pixel-wise weight masks.\n mode (str): ``'train'``, ``'val'`` or ``'test'``. Default: ``'train'``\n do_2d (bool): load 2d samples from 3d volumes. Default: False\n label_erosion (int): label erosion parameter to widen border. Default: 0\n pad_size(list): padding parameters in :math:`(z, y, x)` order. Default: :math:`[0,0,0]`\n reject_size_thres (int): threshold to decide if a sampled volumes contains foreground objects. Default: 0\n reject_p (float): probability of rejecting non-foreground volumes. Default: 0.95\n \"\"\"\n\n def __init__(self, \n chunk_num: List[int] = [2, 2, 2], \n chunk_num_ind: Optional[list] = None,\n chunk_iter: int = -1, \n chunk_stride: bool = True,\n volume_json: str = 'path/to/image', \n label_json: Optional[str] = None,\n valid_mask_json: Optional[str] = None,\n valid_ratio: float = 0.5,\n sample_volume_size: tuple = (8, 64, 64),\n sample_label_size: Optional[tuple] = None,\n sample_stride: tuple = (1, 1, 1),\n augmentor: AUGMENTOR_TYPE = None,\n target_opt: TARGET_OPT_TYPE = ['0'],\n weight_opt: WEIGHT_OPT_TYPE = [['1']],\n mode: str = 'train', \n do_2d: bool = False,\n label_erosion: int = 0, \n pad_size: List[int] = [0,0,0],\n reject_size_thres: int = 0,\n reject_p: float = 0.95):\n \n self.sample_volume_size = sample_volume_size\n self.sample_label_size = sample_label_size\n self.sample_stride = sample_stride\n self.valid_ratio = valid_ratio\n self.augmentor = augmentor\n\n self.target_opt = target_opt\n self.weight_opt = weight_opt\n\n self.mode = mode\n self.do_2d = do_2d\n self.chunk_iter = chunk_iter\n self.label_erosion = label_erosion\n self.pad_size = pad_size\n\n self.chunk_step = 1\n if chunk_stride: # if do stride, 50% overlap\n self.chunk_step = 2\n\n self.chunk_num = chunk_num\n if chunk_num_ind is None:\n self.chunk_num_ind = range(np.prod(chunk_num))\n else:\n self.chunk_num_ind = chunk_num_ind\n self.chunk_id_done = []\n\n self.json_volume = json.load(open(volume_json))\n self.json_label = json.load(open(label_json)) if (label_json is not None) else None\n self.json_valid = json.load(open(valid_mask_json)) if (valid_mask_json is not None) else None\n self.json_size = [self.json_volume['depth'],\n self.json_volume['height'],\n self.json_volume['width']]\n\n self.coord_m = np.array([0, self.json_volume['depth'],\n 0, self.json_volume['height'],\n 0, self.json_volume['width']], int)\n self.coord = np.zeros(6, int)\n\n # rejection samping\n self.reject_size_thres = reject_size_thres\n self.reject_p = reject_p \n\n def get_coord_name(self):\n return '-'.join([str(x) for x in self.coord])\n\n def updatechunk(self, do_load=True):\n if len(self.chunk_id_done)==len(self.chunk_num_ind):\n self.chunk_id_done = []\n id_rest = list(set(self.chunk_num_ind)-set(self.chunk_id_done))\n if self.mode == 'train':\n id_sample = id_rest[int(np.floor(random.random()*len(id_rest)))]\n elif self.mode == 'test':\n id_sample = id_rest[0]\n self.chunk_id_done += [id_sample]\n\n zid = float(id_sample//(self.chunk_num[1]*self.chunk_num[2]))\n yid = float((id_sample//self.chunk_num[2])%(self.chunk_num[1]))\n xid = float(id_sample%self.chunk_num[2])\n \n x0,x1 = np.floor(np.array([xid,xid+self.chunk_step])/(self.chunk_num[2]+self.chunk_step-1)*self.json_size[2]).astype(int)\n y0,y1 = np.floor(np.array([yid,yid+self.chunk_step])/(self.chunk_num[1]+self.chunk_step-1)*self.json_size[1]).astype(int)\n z0,z1 = np.floor(np.array([zid,zid+self.chunk_step])/(self.chunk_num[0]+self.chunk_step-1)*self.json_size[0]).astype(int)\n\n self.coord = np.array([z0,z1,y0,y1,x0,x1],int)\n\n if do_load:\n self.loadchunk()\n\n def loadchunk(self):\n coord_p = self.coord+[-self.pad_size[0],self.pad_size[0],-self.pad_size[1],self.pad_size[1],-self.pad_size[2],self.pad_size[2]]\n print('load tile', self.coord)\n # keep it in uint8 to save memory\n volume = [tileToVolume(self.json_volume['image'], coord_p, self.coord_m, \\\n tile_sz=self.json_volume['tile_size'], tile_st=self.json_volume['tile_st'],\n tile_ratio=self.json_volume['tile_ratio'])]\n \n label = None\n if self.json_label is not None: \n dt={'uint8':np.uint8, 'uint16':np.uint16, 'uint32':np.uint32, 'uint64':np.uint64}\n # float32 may misrepresent large uint32/uint64 numbers -> relabel to decrease the label index\n label = [relabel(tileToVolume(self.json_label['image'], coord_p, self.coord_m, \\\n tile_sz=self.json_label['tile_size'],tile_st=self.json_label['tile_st'],\n tile_ratio=self.json_label['tile_ratio'], ndim=self.json_label['ndim'],\n dt=dt[self.json_label['dtype']], do_im=0), do_type=True)]\n if self.label_erosion != 0:\n label[0] = seg_widen_border(label[0], self.label_erosion)\n\n valid_mask = None\n if self.json_valid is not None:\n valid_mask = [tileToVolume(self.json_valid['image'], coord_p, self.coord_m, \\\n tile_sz=self.json_valid['tile_size'], tile_st=self.json_valid['tile_st'],\n tile_ratio=self.json_valid['tile_ratio'])]\n \n self.dataset = VolumeDataset(volume, label, valid_mask,\n valid_ratio = self.valid_ratio,\n sample_volume_size = self.sample_volume_size,\n sample_label_size = self.sample_label_size,\n sample_stride = self.sample_stride,\n augmentor = self.augmentor,\n target_opt = self.target_opt,\n weight_opt = self.weight_opt,\n mode = self.mode,\n do_2d = self.do_2d,\n iter_num = self.chunk_iter,\n reject_size_thres = self.reject_size_thres,\n reject_p = self.reject_p)\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
elan17/GamestonkTerminal
[ "f958f8275dc15ffaf30c1f0652f5b033725b7f10", "f958f8275dc15ffaf30c1f0652f5b033725b7f10", "f958f8275dc15ffaf30c1f0652f5b033725b7f10", "f958f8275dc15ffaf30c1f0652f5b033725b7f10", "f958f8275dc15ffaf30c1f0652f5b033725b7f10" ]
[ "gamestonk_terminal/stocks/options/pricing_controller.py", "gamestonk_terminal/common/prediction_techniques/arima_view.py", "discordbot/stocks/government/gtrades.py", "tests/gamestonk_terminal/stocks/prediction_techniques/test_pred_controller.py", "gamestonk_terminal/economy/alphavantage_view.py" ]
[ "\"\"\" Pricing Controller Module \"\"\"\n__docformat__ = \"numpy\"\n\nimport argparse\nfrom typing import List\nimport pandas as pd\nfrom tabulate import tabulate\nfrom prompt_toolkit.completion import NestedCompleter\nfrom gamestonk_terminal.rich_config import console\nfrom gamestonk_terminal import feature_flags as gtff\nfrom gamestonk_terminal.parent_classes import BaseController\nfrom gamestonk_terminal.helper_funcs import (\n parse_known_args_and_warn,\n)\nfrom gamestonk_terminal.menu import session\nfrom gamestonk_terminal.stocks.options import yfinance_view\n\n\nclass PricingController(BaseController):\n \"\"\"Pricing Controller class\"\"\"\n\n CHOICES_COMMANDS = [\n \"add\",\n \"rmv\",\n \"show\",\n \"rnval\",\n ]\n\n def __init__(\n self,\n ticker: str,\n selected_date: str,\n prices: pd.DataFrame,\n queue: List[str] = None,\n ):\n \"\"\"Constructor\"\"\"\n super().__init__(\"/stocks/options/pricing/\", queue)\n\n self.ticker = ticker\n self.selected_date = selected_date\n self.prices = prices\n\n if session and gtff.USE_PROMPT_TOOLKIT:\n choices: dict = {c: {} for c in self.controller_choices}\n self.completer = NestedCompleter.from_nested_dict(choices)\n\n def print_help(self):\n \"\"\"Print help\"\"\"\n help_text = f\"\"\"\n[param]Ticker: [/param]{self.ticker or None}\n[param]Expiry: [/param]{self.selected_date or None}\n[cmds]\n add add an expected price to the list\n rmv remove an expected price from the list\n\n show show the listed of expected prices\n rnval risk neutral valuation for an option[/cmds]\n \"\"\"\n console.print(text=help_text, menu=\"Stocks - Options - Pricing\")\n\n def custom_reset(self):\n \"\"\"Class specific component of reset command\"\"\"\n if self.ticker:\n if self.selected_date:\n return [\n \"stocks\",\n f\"load {self.ticker}\",\n \"options\",\n f\"exp -d {self.selected_date}\",\n \"pricing\",\n ]\n return [\"stocks\", f\"load {self.ticker}\", \"options\", \"payoff\"]\n return []\n\n def call_add(self, other_args: List[str]):\n \"\"\"Process add command\"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"add\",\n description=\"Adds a price to the list\",\n )\n parser.add_argument(\n \"-p\",\n \"--price\",\n type=float,\n required=\"-h\" not in other_args,\n dest=\"price\",\n help=\"Projected price of the stock at the expiration date\",\n )\n parser.add_argument(\n \"-c\",\n \"--chance\",\n type=float,\n required=\"-h\" not in other_args,\n dest=\"chance\",\n help=\"Chance that the stock is at a given projected price\",\n )\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-p\")\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if ns_parser:\n if ns_parser.price in self.prices[\"Price\"].to_list():\n df = self.prices[(self.prices[\"Price\"] != ns_parser.price)]\n else:\n df = self.prices\n\n new = {\"Price\": ns_parser.price, \"Chance\": ns_parser.chance}\n df = df.append(new, ignore_index=True)\n self.prices = df.sort_values(\"Price\")\n console.print(\"\")\n\n def call_rmv(self, other_args: List[str]):\n \"\"\"Process rmv command\"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"rmv\",\n description=\"Removes a price from the list\",\n )\n parser.add_argument(\n \"-p\",\n \"--price\",\n type=float,\n required=\"-h\" not in other_args and \"-a\" not in other_args,\n dest=\"price\",\n help=\"Price you want to remove from the list\",\n )\n parser.add_argument(\n \"-a\",\n \"--all\",\n action=\"store_true\",\n default=False,\n dest=\"all\",\n help=\"Remove all prices from the list\",\n )\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-p\")\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if ns_parser:\n if ns_parser.all:\n self.prices = pd.DataFrame(columns=[\"Price\", \"Chance\"])\n else:\n self.prices = self.prices[(self.prices[\"Price\"] != ns_parser.price)]\n console.print(\"\")\n\n def call_show(self, other_args):\n \"\"\"Process show command\"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"show\",\n description=\"Display prices\",\n )\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if ns_parser:\n console.print(\n f\"Estimated price(s) of {self.ticker} at {self.selected_date}\"\n )\n if gtff.USE_TABULATE_DF:\n console.print(\n tabulate(\n self.prices,\n headers=self.prices.columns,\n floatfmt=\".2f\",\n showindex=False,\n tablefmt=\"fancy_grid\",\n ),\n \"\\n\",\n )\n else:\n console.print(self.prices.to_string, \"\\n\")\n\n def call_rnval(self, other_args: List[str]):\n \"\"\"Process rnval command\"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"rnval\",\n description=\"The risk neutral value of the options\",\n )\n parser.add_argument(\n \"-p\",\n \"--put\",\n action=\"store_true\",\n default=False,\n help=\"Show puts instead of calls\",\n )\n parser.add_argument(\n \"-m\",\n \"--min\",\n type=float,\n default=None,\n dest=\"mini\",\n help=\"Minimum strike price shown\",\n )\n parser.add_argument(\n \"-M\",\n \"--max\",\n type=float,\n default=None,\n dest=\"maxi\",\n help=\"Maximum strike price shown\",\n )\n parser.add_argument(\n \"-r\",\n \"--risk\",\n type=float,\n default=None,\n dest=\"risk\",\n help=\"The risk-free rate to use\",\n )\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if ns_parser:\n if self.ticker:\n if self.selected_date:\n if sum(self.prices[\"Chance\"]) == 1:\n yfinance_view.risk_neutral_vals(\n self.ticker,\n self.selected_date,\n ns_parser.put,\n self.prices,\n ns_parser.mini,\n ns_parser.maxi,\n ns_parser.risk,\n )\n else:\n console.print(\"Total chances must equal one\\n\")\n else:\n console.print(\"No expiry loaded. First use `exp {expiry date}`\\n\")\n else:\n console.print(\"No ticker loaded. First use `load <ticker>`\\n\")\n", "\"\"\" ARIMA Prediction View \"\"\"\n__docformat__ = \"numpy\"\n\n\nimport datetime\nimport os\nfrom typing import Union\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pandas.plotting import register_matplotlib_converters\nfrom tabulate import tabulate\n\nfrom gamestonk_terminal import feature_flags as gtff\nfrom gamestonk_terminal.common.prediction_techniques import arima_model\nfrom gamestonk_terminal.common.prediction_techniques.pred_helper import (\n price_prediction_backtesting_color,\n print_prediction_kpis,\n print_pretty_prediction,\n)\nfrom gamestonk_terminal.config_plot import PLOT_DPI\nfrom gamestonk_terminal.helper_funcs import (\n export_data,\n get_next_stock_market_days,\n patch_pandas_text_adjustment,\n plot_autoscale,\n)\nfrom gamestonk_terminal.rich_config import console\n\n\nregister_matplotlib_converters()\n\n# pylint:disable=too-many-arguments\n\n\ndef display_arima(\n dataset: str,\n values: Union[pd.DataFrame, pd.Series],\n arima_order: str,\n n_predict: int,\n seasonal: bool,\n ic: str,\n results: bool,\n s_end_date: str = \"\",\n export: str = \"\",\n time_res: str = \"\",\n):\n \"\"\"View fit ARIMA model\n\n Parameters\n ----------\n dataset : str\n String indicating dataset (for plot title)\n values : Union[pd.DataFrame, pd.Series]\n Data to fit\n arima_order : str\n String of ARIMA params in form \"p,q,d\"\n n_predict : int\n Days to predict\n seasonal : bool\n Flag to use seasonal model\n ic : str\n Information Criteria for model evaluation\n results : bool\n Flag to display model summary\n s_end_date : str, optional\n Specified end date for backtesting comparisons\n export : str, optional\n Format to export image\n time_res : str\n Resolution for data, allowing for predicting outside of standard market days\n \"\"\"\n\n if arima_order:\n t_order = tuple(int(ord) for ord in arima_order.split(\",\"))\n if s_end_date:\n if not time_res:\n future_index = get_next_stock_market_days(\n last_stock_day=s_end_date, n_next_days=n_predict\n )\n else:\n future_index = pd.date_range(\n s_end_date, periods=n_predict + 1, freq=time_res\n )[1:]\n\n if future_index[-1] > datetime.datetime.now():\n console.print(\n \"Backtesting not allowed, since End Date + Prediction days is in the future\\n\"\n )\n return\n\n df_future = values[future_index[0] : future_index[-1]]\n values = values[:s_end_date] # type: ignore\n\n l_predictions, model = arima_model.get_arima_model(\n values, arima_order, n_predict, seasonal, ic\n )\n\n # Prediction data\n if not time_res:\n l_pred_days = get_next_stock_market_days(\n last_stock_day=values.index[-1],\n n_next_days=n_predict,\n )\n else:\n l_pred_days = pd.date_range(\n values.index[-1], periods=n_predict + 1, freq=time_res\n )[1:]\n\n df_pred = pd.Series(l_predictions, index=l_pred_days, name=\"Price\")\n\n if results:\n console.print(model.summary())\n console.print(\"\")\n\n # Plotting\n fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)\n ax.plot(values.index, values, lw=2)\n\n # pylint:disable=no-member\n\n if arima_order:\n # BACKTESTING\n if s_end_date:\n ax.set_title(\n f\"BACKTESTING: ARIMA {str(t_order)} on {dataset} - {n_predict} step prediction\"\n )\n else:\n ax.set_title(\n f\"ARIMA {str(t_order)} on {dataset} - {n_predict} step prediction\"\n )\n else:\n # BACKTESTING\n if s_end_date:\n ax.set_title(\n f\"BACKTESTING: ARIMA {model.order} on {dataset} - {n_predict} step prediction\"\n )\n else:\n plt.title(f\"ARIMA {model.order} on {dataset} - {n_predict} step prediction\")\n ax.set_xlim(values.index[0], l_pred_days[-1])\n ax.set_xlabel(\"Time\")\n ax.set_ylabel(\"Value\")\n ax.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n ax.minorticks_on()\n ax.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n ax.plot(\n [values.index[-1], df_pred.index[0]],\n [values.values[-1], df_pred.values[0]],\n lw=1,\n c=\"tab:green\",\n linestyle=\"--\",\n )\n ax.plot(df_pred.index, df_pred, lw=2, c=\"tab:green\")\n ax.axvspan(values.index[-1], df_pred.index[-1], facecolor=\"tab:orange\", alpha=0.2)\n _, _, ymin, ymax = plt.axis()\n ax.vlines(values.index[-1], ymin, ymax, linewidth=1, linestyle=\"--\", color=\"k\")\n\n # BACKTESTING\n if s_end_date:\n ax.plot(\n df_future.index,\n df_future.values,\n lw=2,\n c=\"tab:blue\",\n ls=\"--\",\n )\n plt.plot(\n [values.index[-1], df_future.index[0]],\n [\n values.values[-1],\n df_future.values[0],\n ],\n lw=1,\n c=\"tab:blue\",\n linestyle=\"--\",\n )\n\n fig.tight_layout()\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n\n # BACKTESTING\n if s_end_date:\n fig, ax = plt.subplots(1, 2, figsize=plot_autoscale(), dpi=PLOT_DPI)\n ax0 = ax[0]\n ax0.plot(\n df_future.index,\n df_future.values,\n lw=2,\n c=\"tab:blue\",\n ls=\"--\",\n )\n ax0.plot(df_pred.index, df_pred, lw=2, c=\"green\")\n ax0.scatter(df_future.index, df_future, c=\"tab:blue\", lw=3)\n ax0.plot(\n [values.index[-1], df_future.index[0]],\n [\n values.values[-1],\n df_future.values[0],\n ],\n lw=2,\n c=\"tab:blue\",\n ls=\"--\",\n )\n ax0.scatter(df_pred.index, df_pred, c=\"green\", lw=3)\n ax0.plot(\n [values.index[-1], df_pred.index[0]],\n [values.values[-1], df_pred.values[0]],\n lw=2,\n c=\"green\",\n ls=\"--\",\n )\n ax0.set_title(\"BACKTESTING: Real data Prediction\")\n ax0.set_xlim(values.index[-1], df_pred.index[-1] + datetime.timedelta(days=1))\n ax0.set_xticks(\n [values.index[-1], df_pred.index[-1] + datetime.timedelta(days=1)]\n )\n ax0.set_ylabel(\"Value\")\n ax0.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n ax0.minorticks_on()\n ax0.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n ax0.legend([\"Real data\", \"Prediction data\"])\n ax0.set_xticks([])\n\n ax1 = ax[1]\n ax1.axhline(y=0, color=\"k\", linestyle=\"--\", linewidth=2)\n ax1.plot(\n df_future.index,\n 100 * (df_pred.values - df_future.values) / df_future.values,\n lw=2,\n c=\"red\",\n )\n ax1.scatter(\n df_future.index,\n 100 * (df_pred.values - df_future.values) / df_future.values,\n c=\"red\",\n lw=5,\n )\n ax1.set_title(\"BACKTESTING: Error between Real data and Prediction [%]\")\n ax1.plot(\n [values.index[-1], df_future.index[0]],\n [\n 0,\n 100 * (df_pred.values[0] - df_future.values[0]) / df_future.values[0],\n ],\n lw=2,\n ls=\"--\",\n c=\"red\",\n )\n ax1.set_xlim(values.index[-1], df_pred.index[-1] + datetime.timedelta(days=1))\n ax1.set_xticks(\n [values.index[-1], df_pred.index[-1] + datetime.timedelta(days=1)]\n )\n ax1.set_xlabel(\"Time\")\n ax1.set_ylabel(\"Prediction Error (%)\")\n ax1.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n ax1.minorticks_on()\n ax1.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n ax1.legend([\"Real data\", \"Prediction data\"])\n fig.tight_layout()\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n\n # Refactor prediction dataframe for backtesting print\n df_pred.name = \"Prediction\"\n df_pred = df_pred.to_frame()\n df_pred[\"Real\"] = df_future.values\n\n if gtff.USE_COLOR:\n if gtff.USE_TABULATE_DF:\n df_pred[\"Real\"] = df_pred[\"Real\"].astype(float)\n df_pred[\"Prediction\"] = df_pred[\"Prediction\"].astype(float)\n df_pred[\"Dif\"] = (\n 100 * (df_pred.Prediction - df_pred.Real) / df_pred.Real\n )\n print(\n tabulate(\n df_pred,\n headers=[\"Date\", \"Predicted\", \"Actual\", \"% Difference\"],\n showindex=True,\n floatfmt=\".2f\",\n tablefmt=\"fancy_grid\",\n )\n )\n else:\n patch_pandas_text_adjustment()\n console.print(\"Time Real [$] x Prediction [$]\")\n console.print(\n df_pred.apply(\n price_prediction_backtesting_color, axis=1\n ).to_string()\n )\n else:\n if gtff.USE_TABULATE_DF:\n df_pred[\"Real\"] = df_pred[\"Real\"].astype(float)\n df_pred[\"Prediction\"] = df_pred[\"Predicted\"].astype(float)\n df_pred[\"Dif\"] = (\n 100 * (df_pred.Prediction - df_pred.Real) / df_pred.Real\n )\n print(\n tabulate(\n df_pred,\n headers=[\"Date\", \"Predicted\", \"Actual\", \"% Difference\"],\n showindex=True,\n floatfmt=\".2f\",\n tablefmt=\"fancy_grid\",\n )\n )\n else:\n console.print(df_pred[[\"Real\", \"Prediction\"]].round(2).to_string())\n\n console.print(\"\")\n print_prediction_kpis(df_pred[\"Real\"].values, df_pred[\"Prediction\"].values)\n\n else:\n # Print prediction data\n print_pretty_prediction(df_pred, values.values[-1])\n export_data(export, os.path.dirname(os.path.abspath(__file__)), \"arima\")\n console.print(\"\")\n", "import os\nfrom datetime import datetime, timedelta\n\nimport discord\nimport pandas as pd\nimport matplotlib.dates as mdates\nfrom matplotlib import pyplot as plt\n\nfrom gamestonk_terminal.config_plot import PLOT_DPI\nfrom gamestonk_terminal.helper_funcs import plot_autoscale\nfrom gamestonk_terminal.stocks.government import quiverquant_model\n\nimport discordbot.config_discordbot as cfg\nfrom discordbot.run_discordbot import gst_imgur, logger\n\n\nasync def gtrades_command(\n ctx, ticker=\"\", gov_type=\"\", past_transactions_months=\"\", raw=\"\"\n):\n \"\"\"Displays government trades [quiverquant.com]\"\"\"\n try:\n # Debug user input\n if cfg.DEBUG:\n logger.debug(\n \"!stocks.gov.gtrades %s %s %s %s\",\n ticker,\n gov_type,\n past_transactions_months,\n raw,\n )\n\n if past_transactions_months == \"\":\n past_transactions_months = 10\n else:\n if not past_transactions_months.lstrip(\"-\").isnumeric():\n raise Exception(\"Number has to be an integer\")\n past_transactions_months = float(past_transactions_months)\n\n if raw in [\"false\", \"False\", \"FALSE\", \"\"]:\n raw = False\n\n if raw in [\"true\", \"True\", \"TRUE\"]:\n raw = True\n\n if raw not in [True, False]:\n raise Exception(\"raw argument has to be true or false\")\n\n if ticker == \"\":\n raise Exception(\"A ticker is required\")\n\n possible_args = [\"congress\", \"senate\", \"house\"]\n if gov_type == \"\":\n gov_type = \"congress\"\n elif gov_type not in possible_args:\n raise Exception(\n \"Enter a valid government argument, options are: congress, senate and house\"\n )\n\n # Retrieve Data\n df_gov = quiverquant_model.get_government_trading(gov_type, ticker)\n\n if df_gov.empty:\n raise Exception(f\"No {gov_type} trading data found\")\n\n # Output Data\n df_gov = df_gov.sort_values(\"TransactionDate\", ascending=False)\n\n start_date = datetime.now() - timedelta(days=past_transactions_months * 30)\n\n df_gov[\"TransactionDate\"] = pd.to_datetime(df_gov[\"TransactionDate\"])\n\n df_gov = df_gov[df_gov[\"TransactionDate\"] > start_date]\n\n if df_gov.empty:\n logger.debug(\"No recent %s trading data found\", gov_type)\n return\n\n df_gov[\"min\"] = df_gov[\"Range\"].apply(\n lambda x: x.split(\"-\")[0].strip(\"$\").replace(\",\", \"\").strip()\n )\n df_gov[\"max\"] = df_gov[\"Range\"].apply(\n lambda x: x.split(\"-\")[1].replace(\",\", \"\").strip().strip(\"$\")\n if \"-\" in x\n else x.strip(\"$\").replace(\",\", \"\").split(\"\\n\")[0]\n )\n\n df_gov[\"lower\"] = df_gov[[\"min\", \"max\", \"Transaction\"]].apply(\n lambda x: int(float(x[\"min\"]))\n if x[\"Transaction\"] == \"Purchase\"\n else -int(float(x[\"max\"])),\n axis=1,\n )\n df_gov[\"upper\"] = df_gov[[\"min\", \"max\", \"Transaction\"]].apply(\n lambda x: int(float(x[\"max\"]))\n if x[\"Transaction\"] == \"Purchase\"\n else -1 * int(float(x[\"min\"])),\n axis=1,\n )\n\n df_gov = df_gov.sort_values(\"TransactionDate\", ascending=True)\n\n fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)\n\n ax.fill_between(\n df_gov[\"TransactionDate\"].unique(),\n df_gov.groupby(\"TransactionDate\")[\"lower\"].sum().values / 1000,\n df_gov.groupby(\"TransactionDate\")[\"upper\"].sum().values / 1000,\n )\n\n ax.set_xlim(\n [\n df_gov[\"TransactionDate\"].values[0],\n df_gov[\"TransactionDate\"].values[-1],\n ]\n )\n ax.grid()\n ax.set_title(f\"{gov_type.capitalize()} trading on {ticker}\")\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Amount ($1k)\")\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(\"%Y/%m/%d\"))\n plt.gcf().autofmt_xdate()\n fig.tight_layout()\n\n plt.savefig(\"gov_gtrades.png\")\n uploaded_image = gst_imgur.upload_image(\"gov_gtrades.png\", title=\"something\")\n image_link = uploaded_image.link\n if cfg.DEBUG:\n logger.debug(\"Image URL: %s\", image_link)\n title = \"Stocks: [quiverquant.com] Government Trades\"\n if raw:\n description = df_gov.to_string()\n embed = discord.Embed(\n title=title, description=description, colour=cfg.COLOR\n )\n else:\n embed = discord.Embed(title=title, colour=cfg.COLOR)\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n embed.set_image(url=image_link)\n os.remove(\"gov_gtrades.png\")\n\n await ctx.send(embed=embed)\n\n except Exception as e:\n embed = discord.Embed(\n title=\"ERROR Stocks: [quiverquant.com] Government Trades\",\n colour=cfg.COLOR,\n description=e,\n )\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n\n await ctx.send(embed=embed)\n", "# IMPORTATION STANDARD\nimport os\nfrom datetime import datetime\n\n# IMPORTATION THIRDPARTY\nimport pandas as pd\nimport pytest\n\n# IMPORTATION INTERNAL\npred_controller = pytest.importorskip(\n modname=\"gamestonk_terminal.stocks.prediction_techniques.pred_controller\",\n reason=\"Requires prediction dependencies, like tensorflow\",\n)\n\n# pylint: disable=E1101\n# pylint: disable=W0603\n\n# pytest.skip(allow_module_level=True)\n\n\nDF_STOCK = pd.DataFrame.from_dict(\n data={\n pd.Timestamp(\"2020-11-30 00:00:00\"): {\n \"Open\": 75.69999694824219,\n \"High\": 76.08999633789062,\n \"Low\": 75.41999816894531,\n \"Close\": 75.75,\n \"Adj Close\": 71.90919494628906,\n \"Volume\": 5539100,\n \"date_id\": 1,\n \"OC_High\": 75.75,\n \"OC_Low\": 75.69999694824219,\n },\n pd.Timestamp(\"2020-12-01 00:00:00\"): {\n \"Open\": 76.0199966430664,\n \"High\": 77.12999725341797,\n \"Low\": 75.69000244140625,\n \"Close\": 77.02999877929688,\n \"Adj Close\": 73.1242904663086,\n \"Volume\": 6791700,\n \"date_id\": 2,\n \"OC_High\": 77.02999877929688,\n \"OC_Low\": 76.0199966430664,\n },\n },\n orient=\"index\",\n)\nEMPTY_DF = pd.DataFrame()\n\nPRED_CONTROLLER = pred_controller.PredictionTechniquesController(\n ticker=\"MOCK_TICKER\",\n start=datetime.strptime(\"2020-12-15\", \"%Y-%m-%d\"),\n interval=\"1440min\",\n stock=DF_STOCK,\n)\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"queue, expected\",\n [\n ([\"pick\", \"help\"], []),\n ([\"quit\", \"help\"], [\"help\"]),\n ],\n)\ndef test_menu_with_queue(expected, mocker, queue):\n mocker.patch(\n target=(\n \"gamestonk_terminal.stocks.prediction_techniques.pred_controller.\"\n \"PredictionTechniquesController.switch\"\n ),\n return_value=[\"quit\"],\n )\n result_menu = pred_controller.PredictionTechniquesController(\n ticker=\"TSLA\",\n start=datetime.strptime(\"2020-12-01\", \"%Y-%m-%d\"),\n interval=\"1440min\",\n stock=DF_STOCK,\n queue=queue,\n ).menu()\n\n assert result_menu == expected\n\n\[email protected](record_mode=\"none\")\ndef test_menu_without_queue_completion(mocker):\n # ENABLE AUTO-COMPLETION : HELPER_FUNCS.MENU\n mocker.patch(\n target=\"gamestonk_terminal.feature_flags.USE_PROMPT_TOOLKIT\",\n new=True,\n )\n mocker.patch(\n target=\"gamestonk_terminal.helper_funcs.session\",\n )\n mocker.patch(\n target=\"gamestonk_terminal.helper_funcs.session.prompt\",\n return_value=\"quit\",\n )\n\n # DISABLE AUTO-COMPLETION : CONTROLLER.COMPLETER\n mocker.patch.object(\n target=pred_controller.gtff,\n attribute=\"USE_PROMPT_TOOLKIT\",\n new=True,\n )\n mocker.patch(\n target=\"gamestonk_terminal.stocks.prediction_techniques.pred_controller.session\",\n )\n mocker.patch(\n target=\"gamestonk_terminal.stocks.prediction_techniques.pred_controller.session.prompt\",\n return_value=\"quit\",\n )\n\n result_menu = pred_controller.PredictionTechniquesController(\n ticker=\"TSLA\",\n start=datetime.strptime(\"2020-12-01\", \"%Y-%m-%d\"),\n interval=\"1440min\",\n stock=DF_STOCK,\n queue=None,\n ).menu()\n\n assert result_menu == []\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"mock_input\",\n [\"picked\", \"load help\", \"mock\"],\n)\ndef test_menu_without_queue_sys_exit(mock_input, mocker):\n # DISABLE AUTO-COMPLETION\n mocker.patch.object(\n target=pred_controller.gtff,\n attribute=\"USE_PROMPT_TOOLKIT\",\n new=False,\n )\n mocker.patch(\n target=\"gamestonk_terminal.stocks.prediction_techniques.pred_controller.session\",\n return_value=None,\n )\n\n # MOCK USER INPUT\n mocker.patch(\"builtins.input\", return_value=mock_input)\n\n # MOCK SWITCH\n class SystemExitSideEffect:\n def __init__(self):\n self.first_call = True\n\n def __call__(self, *args, **kwargs):\n if self.first_call:\n self.first_call = False\n raise SystemExit()\n return [\"quit\"]\n\n mock_switch = mocker.Mock(side_effect=SystemExitSideEffect())\n mocker.patch(\n target=(\n \"gamestonk_terminal.stocks.prediction_techniques.pred_controller.\"\n \"PredictionTechniquesController.switch\"\n ),\n new=mock_switch,\n )\n\n result_menu = pred_controller.PredictionTechniquesController(\n ticker=\"TSLA\",\n start=datetime.strptime(\"2020-12-01\", \"%Y-%m-%d\"),\n interval=\"1440min\",\n stock=DF_STOCK,\n queue=None,\n ).menu()\n\n assert result_menu == []\n\n\[email protected](record_mode=\"none\")\[email protected]_stdout\[email protected](\n \"start\",\n [\n None,\n datetime.strptime(\"2020-12-01\", \"%Y-%m-%d\"),\n ],\n)\ndef test_print_help(start):\n controller = pred_controller.PredictionTechniquesController(\n ticker=\"TSLA\",\n start=start,\n interval=\"1440min\",\n stock=DF_STOCK,\n )\n controller.print_help()\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"an_input, expected_queue\",\n [\n (\"\", []),\n (\"/help\", [\"quit\", \"quit\", \"help\"]),\n (\"help/help\", [\"help\"]),\n (\"q\", [\"quit\"]),\n (\"h\", []),\n (\"r\", [\"quit\", \"quit\", \"reset\", \"stocks\", \"load TSLA\", \"pred\"]),\n ],\n)\ndef test_switch(an_input, expected_queue):\n controller = pred_controller.PredictionTechniquesController(\n ticker=\"TSLA\",\n start=datetime.strptime(\"2020-12-01\", \"%Y-%m-%d\"),\n interval=\"1440min\",\n stock=DF_STOCK,\n queue=None,\n )\n queue = controller.switch(an_input=an_input)\n\n assert queue == expected_queue\n\n\[email protected](record_mode=\"none\")\ndef test_call_cls(mocker):\n mocker.patch(\"os.system\")\n controller = pred_controller.PredictionTechniquesController(\n ticker=\"TSLA\",\n start=datetime.strptime(\"2020-12-01\", \"%Y-%m-%d\"),\n interval=\"1440min\",\n stock=DF_STOCK,\n )\n controller.call_cls([])\n\n assert controller.queue == []\n os.system.assert_called_once_with(\"cls||clear\")\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"func, queue, expected_queue\",\n [\n (\n \"call_exit\",\n [],\n [\n \"quit\",\n \"quit\",\n \"quit\",\n ],\n ),\n (\"call_exit\", [\"help\"], [\"quit\", \"quit\", \"quit\", \"help\"]),\n (\"call_home\", [], [\"quit\", \"quit\"]),\n (\"call_help\", [], []),\n (\"call_quit\", [], [\"quit\"]),\n (\"call_quit\", [\"help\"], [\"quit\", \"help\"]),\n (\n \"call_reset\",\n [],\n [\"quit\", \"quit\", \"reset\", \"stocks\", \"load TSLA\", \"pred\"],\n ),\n (\n \"call_reset\",\n [\"help\"],\n [\"quit\", \"quit\", \"reset\", \"stocks\", \"load TSLA\", \"pred\", \"help\"],\n ),\n ],\n)\ndef test_call_func_expect_queue(expected_queue, queue, func):\n controller = pred_controller.PredictionTechniquesController(\n ticker=\"TSLA\",\n start=datetime.strptime(\"2020-12-01\", \"%Y-%m-%d\"),\n interval=\"1440min\",\n stock=DF_STOCK,\n queue=queue,\n )\n result = getattr(controller, func)([])\n\n assert result is None\n assert controller.queue == expected_queue\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"tested_func, mocked_func, other_args, called_with\",\n [\n (\n \"call_ets\",\n \"ets_view.display_exponential_smoothing\",\n [\n \"--day=1\",\n \"--trend=A\",\n \"--seasonal=M\",\n \"--period=2\",\n \"--end=2020-12-15\",\n \"--export=png\",\n ],\n dict(\n ticker=\"MOCK_TICKER\",\n values=PRED_CONTROLLER.stock[\"AdjClose\"],\n n_predict=1,\n trend=\"A\",\n seasonal=\"M\",\n seasonal_periods=2,\n s_end_date=datetime.strptime(\"2020-12-15\", \"%Y-%m-%d\"),\n export=\"png\",\n ),\n ),\n (\n \"call_knn\",\n \"knn_view.display_k_nearest_neighbors\",\n [\n \"--input=1\",\n \"--days=2\",\n \"--jumps=3\",\n \"--neighbors=4\",\n \"--end=2020-12-01\",\n \"--test_size=0.1\",\n \"--no_shuffle\",\n ],\n dict(\n ticker=\"MOCK_TICKER\",\n data=PRED_CONTROLLER.stock[\"AdjClose\"],\n n_neighbors=4,\n n_input_days=1,\n n_predict_days=2,\n test_size=0.1,\n end_date=datetime.strptime(\"2020-12-01\", \"%Y-%m-%d\"),\n no_shuffle=False,\n ),\n ),\n (\n \"call_regression\",\n \"regression_view.display_regression\",\n [\n \"4\",\n \"--input=1\",\n \"--days=2\",\n \"--jumps=3\",\n \"--end=2020-12-15\",\n \"--export=png\",\n ],\n dict(\n dataset=\"MOCK_TICKER\",\n values=PRED_CONTROLLER.stock[\"AdjClose\"],\n poly_order=4,\n n_input=1,\n n_predict=2,\n n_jumps=3,\n s_end_date=datetime.strptime(\"2020-12-15\", \"%Y-%m-%d\"),\n export=\"png\",\n ),\n ),\n (\n \"call_arima\",\n \"arima_view.display_arima\",\n [\n \"--days=1\",\n \"--ic=bic\",\n \"--seasonal\",\n \"--order=p,q,d\",\n \"--results\",\n \"--end=2020-12-15\",\n \"--export=png\",\n ],\n dict(\n dataset=\"MOCK_TICKER\",\n values=PRED_CONTROLLER.stock[\"AdjClose\"],\n arima_order=\"p,q,d\",\n n_predict=1,\n seasonal=True,\n ic=\"bic\",\n results=True,\n s_end_date=datetime.strptime(\"2020-12-15\", \"%Y-%m-%d\"),\n export=\"png\",\n ),\n ),\n (\n \"call_mlp\",\n \"neural_networks_view.display_mlp\",\n [\n \"--days=1\",\n \"--input=2\",\n \"--epochs=3\",\n \"--end=2020-12-15\",\n \"--batch_size=4\",\n \"--loops=5\",\n # \"--valid\", \"0.1\",\n \"--lr=0.01\",\n \"--no_shuffle\",\n ],\n dict(\n dataset=\"MOCK_TICKER\",\n data=PRED_CONTROLLER.stock[\"AdjClose\"],\n n_input_days=2,\n n_predict_days=1,\n learning_rate=0.01,\n epochs=3,\n batch_size=4,\n test_size=0.1,\n n_loops=5,\n no_shuffle=False,\n ),\n ),\n (\n \"call_rnn\",\n \"neural_networks_view.display_rnn\",\n [\n \"--days=1\",\n \"--input=2\",\n \"--epochs=3\",\n \"--end=2020-12-15\",\n \"--batch_size=4\",\n \"--loops=5\",\n # \"--valid\", \"0.1\",\n \"--lr=0.01\",\n \"--no_shuffle\",\n ],\n dict(\n dataset=\"MOCK_TICKER\",\n data=PRED_CONTROLLER.stock[\"AdjClose\"],\n n_input_days=2,\n n_predict_days=1,\n learning_rate=0.01,\n epochs=3,\n batch_size=4,\n test_size=0.1,\n n_loops=5,\n no_shuffle=False,\n ),\n ),\n (\n \"call_lstm\",\n \"neural_networks_view.display_lstm\",\n [\n \"--days=1\",\n \"--input=2\",\n \"--epochs=3\",\n \"--end=2020-12-15\",\n \"--batch_size=4\",\n \"--loops=5\",\n # \"--valid\", \"0.1\",\n \"--lr=0.01\",\n \"--no_shuffle\",\n ],\n dict(\n dataset=\"MOCK_TICKER\",\n data=PRED_CONTROLLER.stock[\"AdjClose\"],\n n_input_days=2,\n n_predict_days=1,\n learning_rate=0.01,\n epochs=3,\n batch_size=4,\n test_size=0.1,\n n_loops=5,\n no_shuffle=False,\n ),\n ),\n (\n \"call_conv1d\",\n \"neural_networks_view.display_conv1d\",\n [\n \"--days=1\",\n \"--input=2\",\n \"--epochs=3\",\n \"--end=2020-12-15\",\n \"--batch_size=4\",\n \"--loops=5\",\n # \"--valid\", \"0.1\",\n \"--lr=0.01\",\n \"--no_shuffle\",\n ],\n dict(\n dataset=\"MOCK_TICKER\",\n data=PRED_CONTROLLER.stock[\"AdjClose\"],\n n_input_days=2,\n n_predict_days=1,\n learning_rate=0.01,\n epochs=3,\n batch_size=4,\n test_size=0.1,\n n_loops=5,\n no_shuffle=False,\n ),\n ),\n (\n \"call_mc\",\n \"mc_view.display_mc_forecast\",\n [\n \"--days=1\",\n # \"--num=2\",\n \"--dist=normal\",\n \"--export=png\",\n ],\n dict(\n data=PRED_CONTROLLER.stock[\"AdjClose\"],\n n_future=1,\n n_sims=100,\n use_log=False,\n export=\"png\",\n ),\n ),\n ],\n)\ndef test_call_func(tested_func, mocked_func, other_args, called_with, mocker):\n mock = mocker.Mock()\n mocker.patch(\n \"gamestonk_terminal.stocks.prediction_techniques.pred_controller.\"\n + mocked_func,\n new=mock,\n )\n getattr(PRED_CONTROLLER, tested_func)(other_args=other_args)\n\n if isinstance(called_with, dict):\n mock.assert_called_once_with(**called_with)\n elif isinstance(called_with, list):\n mock.assert_called_once_with(*called_with)\n else:\n mock.assert_called_once()\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"func\",\n [\n \"call_load\",\n \"call_pick\",\n \"call_ets\",\n \"call_knn\",\n \"call_regression\",\n \"call_arima\",\n \"call_mlp\",\n \"call_rnn\",\n \"call_lstm\",\n \"call_conv1d\",\n \"call_mc\",\n ],\n)\ndef test_call_func_no_parser(func, mocker):\n mock = mocker.Mock(return_value=None)\n mocker.patch.object(\n target=pred_controller,\n attribute=\"parse_known_args_and_warn\",\n new=mock,\n )\n mocker.patch.object(\n target=pred_controller.pred_helper,\n attribute=\"parse_known_args_and_warn\",\n new=mock,\n )\n controller = pred_controller.PredictionTechniquesController(\n ticker=\"MOCK_TICKER\",\n start=datetime.strptime(\"2020-12-01\", \"%Y-%m-%d\"),\n interval=\"1440min\",\n stock=DF_STOCK.copy(),\n )\n\n func_result = getattr(controller, func)(other_args=[\"PM\"])\n assert func_result is None\n assert controller.queue == []\n getattr(pred_controller, \"parse_known_args_and_warn\").assert_called_once()\n\n\[email protected](record_mode=\"none\")\ndef test_call_load(mocker):\n # DISABLE AUTO-COMPLETION\n mocker.patch.object(\n target=pred_controller.gtff,\n attribute=\"USE_PROMPT_TOOLKIT\",\n new=True,\n )\n mocker.patch(\n target=\"gamestonk_terminal.stocks.prediction_techniques.pred_controller.session\",\n )\n mocker.patch(\n target=\"gamestonk_terminal.stocks.prediction_techniques.pred_controller.session.prompt\",\n return_value=\"quit\",\n )\n\n result_menu = pred_controller.PredictionTechniquesController(\n ticker=\"TSLA\",\n start=datetime.strptime(\"2020-12-01\", \"%Y-%m-%d\"),\n interval=\"1440min\",\n stock=DF_STOCK,\n queue=None,\n ).menu()\n\n assert result_menu == []\n", "\"\"\" Alpha Vantage View \"\"\"\n__docformat__ = \"numpy\"\n\nimport os\n\nimport matplotlib.pyplot as plt\nfrom tabulate import tabulate\n\nfrom gamestonk_terminal import feature_flags as gtff\nfrom gamestonk_terminal.economy import alphavantage_model\nfrom gamestonk_terminal.helper_funcs import export_data, plot_autoscale\nfrom gamestonk_terminal import config_plot as cfp\nfrom gamestonk_terminal.rich_config import console\n\n\ndef realtime_performance_sector(raw: bool, export: str):\n \"\"\"Display Real-Time Performance sector. [Source: AlphaVantage]\n\n Parameters\n ----------\n raw : bool\n Output only raw data\n export : str\n Export dataframe data to csv,json,xlsx file\n \"\"\"\n df_sectors = alphavantage_model.get_sector_data()\n\n # pylint: disable=invalid-sequence-index\n df_rtp = df_sectors[\"Rank A: Real-Time Performance\"]\n\n if raw:\n if gtff.USE_TABULATE_DF:\n print(\n tabulate(\n df_rtp.to_frame(),\n showindex=True,\n headers=[\"Sector\", \"Real-Time Performance\"],\n floatfmt=\".5f\",\n tablefmt=\"fancy_grid\",\n )\n )\n else:\n console.print(df_rtp.to_string())\n\n else:\n df_rtp.plot(kind=\"bar\")\n plt.title(\"Real Time Performance (%) per Sector\")\n plt.tight_layout()\n plt.grid()\n\n console.print(\"\")\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"rtps\",\n df_sectors,\n )\n\n if not raw:\n if gtff.USE_ION:\n plt.ion()\n plt.show()\n\n\ndef display_real_gdp(\n interval: str, start_year: int = 2010, raw: bool = False, export: str = \"\"\n):\n \"\"\"Display US GDP from AlphaVantage\n\n Parameters\n ----------\n interval : str\n Interval for GDP. Either \"a\" or \"q\"\n start_year : int, optional\n Start year for plot, by default 2010\n raw : bool, optional\n Flag to show raw data, by default False\n export : str, optional\n Format to export data, by default \"\"\n \"\"\"\n gdp_full = alphavantage_model.get_real_gdp(interval)\n if gdp_full.empty:\n console.print(\"Error getting data. Check API Key\")\n return\n gdp = gdp_full[gdp_full.date >= f\"{start_year}-01-01\"]\n int_string = \"Annual\" if interval == \"a\" else \"Quarterly\"\n year_str = str(start_year) if interval == \"a\" else str(list(gdp.date)[-1].year)\n fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)\n ax.plot(gdp.date, gdp.GDP, marker=\"o\", c=\"dodgerblue\")\n ax.set_xlabel(\"Date\")\n ax.set_title(f\"{int_string} US GDP ($B) from {year_str}\")\n ax.set_ylabel(\"US GDP ($B) \")\n ax.grid(\"on\")\n fig.tight_layout()\n if gtff.USE_ION:\n plt.ion()\n plt.show()\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"gdp\",\n gdp_full,\n )\n if raw:\n if gtff.USE_TABULATE_DF:\n print(\n tabulate(\n gdp.head(20),\n headers=[\"Date\", \"GDP\"],\n tablefmt=\"fancy_grid\",\n showindex=False,\n )\n )\n else:\n console.print(gdp.head(20).to_string())\n console.print(\"\")\n\n\ndef display_gdp_capita(start_year: int = 2010, raw: bool = False, export: str = \"\"):\n \"\"\"Display US GDP per Capita from AlphaVantage\n\n Parameters\n ----------\n start_year : int, optional\n Start year for plot, by default 2010\n raw : bool, optional\n Flag to show raw data, by default False\n export : str, optional\n Format to export data, by default \"\"\n \"\"\"\n gdp_capita = alphavantage_model.get_gdp_capita()\n if gdp_capita.empty:\n console.print(\"Error getting data. Check API Key\")\n return\n gdp = gdp_capita[gdp_capita.date >= f\"{start_year}-01-01\"]\n fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)\n ax.plot(gdp.date, gdp.GDP, marker=\"o\", c=\"dodgerblue\")\n ax.set_xlabel(\"Date\")\n ax.set_title(f\"US GDP per Capita (Chained 2012 USD) from {start_year}\")\n ax.set_ylabel(\"US GDP (Chained 2012 USD) \")\n ax.grid(\"on\")\n fig.tight_layout()\n if gtff.USE_ION:\n plt.ion()\n plt.show()\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"gdpc\",\n gdp_capita,\n )\n if raw:\n if gtff.USE_TABULATE_DF:\n print(\n tabulate(\n gdp.head(20),\n headers=[\"Date\", \"GDP\"],\n tablefmt=\"fancy_grid\",\n showindex=False,\n )\n )\n else:\n console.print(gdp.head(20).to_string())\n console.print(\"\")\n\n\ndef display_inflation(start_year: int = 2010, raw: bool = False, export: str = \"\"):\n \"\"\"Display US Inflation from AlphaVantage\n\n Parameters\n ----------\n start_year : int, optional\n Start year for plot, by default 2010\n raw : bool, optional\n Flag to show raw data, by default False\n export : str, optional\n Format to export data, by default \"\"\n \"\"\"\n inflation = alphavantage_model.get_inflation()\n if inflation.empty:\n console.print(\"Error getting data. Check API Key\")\n return\n inf = inflation[inflation.date >= f\"{start_year}-01-01\"]\n fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)\n ax.plot(inf.date, inf.Inflation, marker=\"o\", c=\"dodgerblue\")\n ax.set_xlabel(\"Date\")\n ax.set_title(f\"US Inflation from {list(inf.date)[-1].year}\")\n ax.set_ylabel(\"Inflation (%)\")\n ax.grid(\"on\")\n fig.tight_layout()\n if gtff.USE_ION:\n plt.ion()\n plt.show()\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"inf\",\n inflation,\n )\n if raw:\n if gtff.USE_TABULATE_DF:\n print(\n tabulate(\n inf.head(20),\n headers=[\"Date\", \"Inflation\"],\n tablefmt=\"fancy_grid\",\n showindex=False,\n )\n )\n else:\n console.print(inf.head(20).to_string())\n console.print(\"\")\n\n\ndef display_cpi(\n interval: str, start_year: int = 2010, raw: bool = False, export: str = \"\"\n):\n \"\"\"Display US consumer price index (CPI) from AlphaVantage\n\n Parameters\n ----------\n interval : str\n Interval for GDP. Either \"m\" or \"s\"\n start_year : int, optional\n Start year for plot, by default 2010\n raw : bool, optional\n Flag to show raw data, by default False\n export : str, optional\n Format to export data, by default \"\"\n \"\"\"\n cpi_full = alphavantage_model.get_cpi(interval)\n if cpi_full.empty:\n console.print(\"Error getting data. Check API Key\")\n return\n cpi = cpi_full[cpi_full.date >= f\"{start_year}-01-01\"]\n int_string = \"Semi-Annual\" if interval == \"s\" else \"Monthly\"\n year_str = str(list(cpi.date)[-1].year)\n fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)\n ax.plot(cpi.date, cpi.CPI, marker=\"o\", c=\"dodgerblue\")\n ax.set_xlabel(\"Date\")\n ax.set_title(f\"{int_string} Consumer Price Index from {year_str}\")\n ax.set_ylabel(\"CPI \")\n ax.grid(\"on\")\n fig.tight_layout()\n if gtff.USE_ION:\n plt.ion()\n plt.show()\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"cpi\",\n cpi_full,\n )\n if raw:\n if gtff.USE_TABULATE_DF:\n print(\n tabulate(\n cpi.head(20),\n headers=[\"Date\", \"CPI\"],\n tablefmt=\"fancy_grid\",\n showindex=False,\n )\n )\n else:\n console.print(cpi.head(20).to_string())\n console.print(\"\")\n\n\ndef display_treasury_yield(\n interval: str, maturity: str, start_date: str, raw: bool = False, export: str = \"\"\n):\n \"\"\"Display historical treasury yield for given maturity\n\n Parameters\n ----------\n interval : str\n Interval for data. Can be \"d\",\"w\",\"m\" for daily, weekly or monthly\n maturity : str\n Maturity timeline. Can be \"3mo\",\"5y\",\"10y\" or \"30y\"\n start_date: str\n Start date for data. Should be in YYYY-MM-DD format\n raw : bool, optional\n Flag to display raw data, by default False\n export : str, optional\n Format to export data, by default \"\"\n \"\"\"\n d_maturity = {\"3m\": \"3month\", \"5y\": \"5year\", \"10y\": \"10year\", \"30y\": \"30year\"}\n yields = alphavantage_model.get_treasury_yield(interval, maturity)\n if yields.empty:\n console.print(\"Error getting data. Check API Key\")\n return\n yld = yields[yields.date >= start_date]\n fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)\n ax.plot(yld.date, yld.Yield, marker=\"o\", c=\"dodgerblue\")\n ax.set_xlabel(\"Date\")\n ax.set_title(f\"{d_maturity[maturity]} Treasury Yield\")\n ax.set_ylabel(\"Yield\")\n ax.grid(\"on\")\n fig.tight_layout()\n if gtff.USE_ION:\n plt.ion()\n plt.show()\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"tyld\",\n yields,\n )\n if raw:\n if gtff.USE_TABULATE_DF:\n print(\n tabulate(\n yld.head(20),\n headers=[\"Date\", \"Yield\"],\n tablefmt=\"fancy_grid\",\n showindex=False,\n )\n )\n else:\n console.print(yld.head(20).to_string())\n console.print(\"\")\n\n\ndef display_unemployment(start_year: int = 2015, raw: bool = False, export: str = \"\"):\n \"\"\"Display US unemployment AlphaVantage\n\n Parameters\n ----------\n start_year : int, optional\n Start year for plot, by default 2010\n raw : bool, optional\n Flag to show raw data, by default False\n export : str, optional\n Format to export data, by default \"\"\n \"\"\"\n\n unemp = alphavantage_model.get_unemployment()\n\n if unemp.empty:\n console.print(\"Error getting data. Check API Key\")\n return\n\n un = unemp[unemp.date >= f\"{start_year}-01-01\"]\n\n fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)\n ax.plot(un.date, un.unemp, marker=\"o\", c=\"dodgerblue\")\n ax.set_xlabel(\"Date\")\n ax.set_title(f\"US Unemployment from {start_year}\")\n ax.set_ylabel(\"US Unemployment \")\n ax.grid(\"on\")\n fig.tight_layout()\n if gtff.USE_ION:\n plt.ion()\n plt.show()\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"unemp\",\n unemp,\n )\n\n if raw:\n if gtff.USE_TABULATE_DF:\n print(\n tabulate(\n un.head(20),\n headers=[\"Date\", \"GDP\"],\n tablefmt=\"fancy_grid\",\n showindex=False,\n )\n )\n else:\n console.print(un.head(20).to_string())\n\n console.print(\"\")\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.Series", "pandas.date_range", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "pandas.plotting.register_matplotlib_converters", "matplotlib.pyplot.axis", "matplotlib.pyplot.show", "matplotlib.pyplot.ion" ], [ "matplotlib.pyplot.gca", "matplotlib.dates.DateFormatter", "pandas.to_datetime", "matplotlib.pyplot.savefig", "matplotlib.pyplot.gcf" ], [ "pandas.Timestamp", "pandas.DataFrame" ], [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "matplotlib.pyplot.grid", "matplotlib.pyplot.show", "matplotlib.pyplot.ion" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
martinrohbeck/kartothek
[ "9314d66b2b35a64282945c6b6ae24d8bb5a51ed0" ]
[ "tests/io/dask/dataframe/test_read.py" ]
[ "import pickle\nfrom functools import partial\n\nimport pandas as pd\nimport pytest\nfrom pandas import testing as pdt\n\nfrom kartothek.io.dask.dataframe import read_dataset_as_ddf\nfrom kartothek.io.eager import store_dataframes_as_dataset\nfrom kartothek.io.testing.read import * # noqa\nfrom kartothek.io_components.metapartition import SINGLE_TABLE\n\n\[email protected]()\ndef output_type():\n return \"table\"\n\n\ndef _read_as_ddf(\n dataset_uuid,\n store,\n factory=None,\n categoricals=None,\n tables=None,\n dataset_has_index=False,\n **kwargs,\n):\n table = tables or SINGLE_TABLE\n if categoricals:\n categoricals = categoricals[table]\n ddf = read_dataset_as_ddf(\n dataset_uuid=dataset_uuid,\n store=store,\n factory=factory,\n categoricals=categoricals,\n table=table,\n **kwargs,\n )\n if categoricals:\n assert ddf._meta.dtypes[\"P\"] == pd.api.types.CategoricalDtype(\n categories=[\"__UNKNOWN_CATEGORIES__\"], ordered=False\n )\n if dataset_has_index:\n assert ddf._meta.dtypes[\"L\"] == pd.api.types.CategoricalDtype(\n categories=[1, 2], ordered=False\n )\n else:\n assert ddf._meta.dtypes[\"L\"] == pd.api.types.CategoricalDtype(\n categories=[\"__UNKNOWN_CATEGORIES__\"], ordered=False\n )\n\n s = pickle.dumps(ddf, pickle.HIGHEST_PROTOCOL)\n ddf = pickle.loads(s)\n\n ddf = ddf.compute().reset_index(drop=True)\n\n def extract_dataframe(ix):\n df = ddf.iloc[[ix]].copy()\n for col in df.columns:\n if pd.api.types.is_categorical(df[col]):\n df[col] = df[col].cat.remove_unused_categories()\n return df.reset_index(drop=True)\n\n return [extract_dataframe(ix) for ix in ddf.index]\n\n\[email protected]()\ndef bound_load_dataframes():\n return _read_as_ddf\n\n\ndef test_load_dataframe_categoricals_with_index(dataset_with_index_factory):\n func = partial(_read_as_ddf, dataset_has_index=True)\n test_read_dataset_as_dataframes( # noqa: F405\n dataset_factory=dataset_with_index_factory,\n dataset=dataset_with_index_factory,\n store_session_factory=dataset_with_index_factory.store_factory,\n use_dataset_factory=True,\n bound_load_dataframes=func,\n use_categoricals=True,\n output_type=\"table\",\n label_filter=None,\n dates_as_object=False,\n )\n\n\ndef test_read_ddf_from_categorical_partition(store_factory):\n df = pd.DataFrame({\"x\": [\"a\"]}).astype({\"x\": \"category\"})\n store_dataframes_as_dataset(\n dfs=[df], dataset_uuid=\"dataset_uuid\", store=store_factory\n )\n ddf = read_dataset_as_ddf(\n dataset_uuid=\"dataset_uuid\", store=store_factory, table=\"table\"\n )\n df_expected = pd.DataFrame({\"x\": [\"a\"]})\n df_actual = ddf.compute(scheduler=\"sync\")\n pdt.assert_frame_equal(df_expected, df_actual)\n\n ddf = read_dataset_as_ddf(\n dataset_uuid=\"dataset_uuid\",\n store=store_factory,\n categoricals=[\"x\"],\n table=\"table\",\n )\n df_actual = ddf.compute(scheduler=\"sync\")\n pdt.assert_frame_equal(df, df_actual)\n" ]
[ [ "pandas.api.types.is_categorical", "pandas.testing.assert_frame_equal", "pandas.api.types.CategoricalDtype", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.4", "1.3", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
m-triassi/ai-projects-472
[ "feb82550c59e785ec5427b457e0c5d38e980a1d4" ]
[ "Project 2/line-em-upScoreboard.py" ]
[ "# based on code from https://stackabuse.com/minimax-and-alpha-beta-pruning-in-python\n\nimport time\nimport numpy as np\nfrom contextlib import redirect_stdout\nfrom random import randint\n\nclass Game:\n\tMINIMAX = 0\n\tALPHABETA = 1\n\tHUMAN = 2\n\tAI = 3\n\n\tdef __init__(self, recommend=True):\n\t\tn, s, t = self.ask_conditions()\n\t\tself.n = n\n\t\tself.s = s\n\t\tself.t = t\n\t\t# set some defaults that will be overwritten\n\t\tself.max_depth_x = 5\n\t\tself.max_depth_o = 5\n\t\tself.a = self.MINIMAX\n\t\tself.e_x = \"e12\"\n\t\tself.e_o = \"e12\"\n\t\tself.initialize_game(n)\n\t\tself.blocks = []\n\t\tself.block_symbol = \"🔲\"\n\t\tself.block_count = self.add_blocks()\n\t\tself.recommend = recommend\n\t\t# statistics\n\t\tself.num_moves = 0\n\t\tself.current_num_states_evald = 0\n\t\tself.total_num_states_evald = 0\n\t\tself.total_eval_time = 0\n\t\tself.total_depth_states_evald = {}\n\t\tself.current_depth_states_evald = {}\n\n\tdef initialize_game(self, n, firs='X', blocks=None):\n\t\tself.last_move = (-1, -1)\n\t\tself.current_state = np.full((n, n), \".\").tolist()\n\t\t# We're replaying a game so add back the blocks\n\t\tif blocks is not None:\n\t\t\tfor block in blocks:\n\t\t\t\tself.current_state[block[0]][block[1]] = self.block_symbol\n\n\t\t# Player X always sometimes plays first\n\t\tself.player_turn = firs\n\n\tdef add_blocks(self):\n\t\tblock_count = int(input(\"How many blocks would you like to add?[Default: 0]: \") or 0)\n\t\tif block_count != 0:\n\t\t\trandom_block = input(\"Would you like randomly placed blocks? [y/N]: \" or False) == \"y\"\n\t\telse:\n\t\t\trandom_block = False\n\n\t\tif block_count > 2*self.n:\n\t\t\tblock_count = 2*self.n\n\t\t\tprint(F\"You've attempted to add to many blocks, setting blocks to {block_count} (maximum for this board size)\")\n\n\t\tif random_block:\n\t\t\tfor block in range(0, block_count):\n\t\t\t\tx = randint(0, self.n - 1)\n\t\t\t\ty = randint(0, self.n - 1)\n\t\t\t\t# if the blocks are already present, keep generating a new pair until we find some that aren't\n\t\t\t\twhile (x ,y) in self.blocks:\n\t\t\t\t\tx = randint(0, self.n - 1)\n\t\t\t\t\ty = randint(0, self.n - 1)\n\t\t\t\tself.current_state[x][y] = self.block_symbol\n\t\t\t\tself.blocks.append((x, y))\n\t\telse:\n\t\t\tfor block in range(0, block_count):\n\t\t\t\tx = int(input(F\"X coordinate for block {block}: \"))\n\t\t\t\ty = int(input(F\"Y coordinate for block {block}: \"))\n\t\t\t\tself.current_state[x][y] = self.block_symbol\n\t\t\t\tself.blocks.append((x, y))\n\t\treturn block_count\n\n\tdef decide_depth(self, algorithm, depth, e_x,):\n\n\t\t# set algorithm\n\t\tself.a = algorithm in [self.ALPHABETA]\n\t\t# set depth\n\t\tself.max_depth_x = depth[0]\n\t\t# set e1\n\t\tself.max_depth_o = depth[1]\n\t\tif e_x == \"e1\":\n\t\t\tself.e_x = \"e1\"\n\t\t\tself.e_o = \"e2\"\n\t\telse:\n\t\t\tself.e_x = \"e2\"\n\t\t\tself.e_o = \"e1\"\n\n\tdef show_game_conditions(self, player_x, player_o):\n\t\tx_type = F\"AI d={self.max_depth_x} a={bool(self.a)} {self.e_x}\" if player_x == self.AI else \"Player\"\n\t\to_type = F\"AI d={self.max_depth_o} a={bool(self.a)} {self.e_o}\" if player_o == self.AI else \"Player\"\n\t\tprint(F\"n={self.n} b={self.block_count} s={self.s} t={self.t}\")\n\t\tprint(F\"blocks={self.blocks}\")\n\t\tprint()\n\t\tprint(F\"Player 1: {x_type}\")\n\t\tprint(F\"Player 2: {o_type}\")\n\n\tdef ask_conditions(self):\n\t\tn = int(input('How large should the board be? (n x n)[Default: 3]: ') or 3)\n\t\ts = int(input(\"How long should a winning line be?[Default: 3]: \") or 3)\n\t\tt = int(input(\"How many seconds should the AI have to evaluate the best move?[Default: 5]: \") or 5)\n\n\t\tif n > 10:\n\t\t\tn = 10\n\t\t\tprint(F\"You've chosen too large of a game board, setting to {n}\")\n\t\telif n < 3:\n\t\t\tn = 3\n\t\t\tprint(F\"You've chosen too small of a game board, setting to {n}\")\n\n\t\tif s > 10:\n\t\t\ts = 10\n\t\t\tprint(F\"You've chosen too large of a win line, setting to {s}\")\n\t\telif s < 3:\n\t\t\ts = 3\n\t\t\tprint(F\"You've chosen too small of a win line, setting to {s}\")\n\n\t\treturn (n, s, t)\n\n\tdef draw_board(self):\n\t\tlabels = range(0, self.n)\n\t\tprint()\n\t\tprint(\" \" + \" \".join(map(str, labels)))\n\t\tfor y in range(0, self.n):\n\t\t\tprint(str(labels[y]) + \" \", end='')\n\t\t\tfor x in range(0, self.n):\n\t\t\t\tprint(F'{self.current_state[x][y]} ', end=\"\")\n\t\t\tprint()\n\t\tprint()\n\n\tdef is_valid(self, px, py):\n\t\tif px < 0 or px > self.n - 1 or py < 0 or py > self.n - 1:\n\t\t\treturn False\n\t\telif self.current_state[px][py] != '.':\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\tdef is_end(self):\n\t\thorizontal = self.check_e(0, self.last_move) + self.check_w(0, self.last_move) + 1\n\t\tvertical = self.check_n(0, self.last_move) + self.check_s(0, self.last_move) + 1\n\t\tmain_diagonal = self.check_sw(0, self.last_move) + self.check_ne(0, self.last_move) + 1\n\t\tsecond_diagonal = self.check_se(0, self.last_move) + self.check_nw(0, self.last_move) + 1\n\n\t\tif horizontal >= self.s or vertical >= self.s or main_diagonal >= self.s or second_diagonal >= self.s:\n\t\t\treturn self.current_state[self.last_move[0]][self.last_move[1]]\n\n\t\tif not any(\".\" in x for x in self.current_state):\n\t\t\treturn \".\"\n\n\t\treturn None\n\n\n\tdef check_n(self, count, move):\n\t\tpx, py = move\n\t\tif py - 1 < 0:\n\t\t\treturn count\n\n\t\t## Check its a block / empty spot\n\t\tif self.current_state[px][py - 1] == self.block_symbol or self.current_state[px][py - 1] == \".\":\n\t\t\treturn count\n\n\t\t## check its not same, then return count\n\t\t## else it must be same so recurse count + 1\n\t\tif self.current_state[px][py] != self.current_state[px][py - 1]:\n\t\t\treturn count\n\t\telse:\n\t\t\tif count >= self.s:\n\t\t\t\treturn count\n\t\t\treturn self.check_n(count + 1, (px, py - 1))\n\n\tdef check_s(self, count, move):\n\t\tpx, py = move\n\t\tif py + 1 >= self.n:\n\t\t\treturn count\n\n\t\t## Check its a block / empty spot\n\t\tif self.current_state[px][py + 1] == self.block_symbol or self.current_state[px][py + 1] == \".\":\n\t\t\treturn count\n\n\t\t## check its not same, the return count\n\t\t## else it must be same so recurse count + 1\n\t\tif self.current_state[px][py] != self.current_state[px][py + 1]:\n\t\t\treturn count\n\t\telse:\n\t\t\tif count >= self.s:\n\t\t\t\treturn count\n\t\t\treturn self.check_s(count + 1, (px, py + 1))\n\n\tdef check_w(self, count, move):\n\t\tpx, py = move\n\t\tif px - 1 < 0:\n\t\t\treturn count\n\n\t\t## Check its a block / empty spot\n\t\tif self.current_state[px - 1][py] == self.block_symbol or self.current_state[px - 1][py] == \".\":\n\t\t\treturn count\n\n\t\t## check its not same, the return count\n\t\t## else it must be same so recurse count + 1\n\t\tif self.current_state[px][py] != self.current_state[px - 1][py]:\n\t\t\treturn count\n\t\telse:\n\t\t\tif count >= self.s:\n\t\t\t\treturn count\n\t\t\treturn self.check_w(count + 1, (px - 1, py))\n\n\tdef check_e(self, count, move):\n\t\tpx, py = move\n\t\tif px + 1 >= self.n:\n\t\t\treturn count\n\n\t\t## Check its a block / empty spot\n\t\tif self.current_state[px + 1][py] == self.block_symbol or self.current_state[px + 1][py] == \".\":\n\t\t\treturn count\n\n\t\t## check its not same, the return count\n\t\t## else it must be same so recurse count + 1\n\t\tif self.current_state[px][py] != self.current_state[px + 1][py]:\n\t\t\treturn count\n\t\telse:\n\t\t\tif count >= self.s:\n\t\t\t\treturn count\n\t\t\treturn self.check_e(count + 1, (px + 1, py))\n\n\tdef check_ne(self, count, move):\n\t\tpx, py = move\n\t\tif px - 1 < 0 or py + 1 >= self.n:\n\t\t\treturn count\n\n\t\t## Check its a block / empty spot\n\t\tif self.current_state[px - 1][py + 1] == self.block_symbol or self.current_state[px - 1][py + 1] == \".\":\n\t\t\treturn count\n\n\t\t## check its not same, the return count\n\t\t## else it must be same so recurse count + 1\n\t\tif self.current_state[px][py] != self.current_state[px - 1][py + 1]:\n\t\t\treturn count\n\t\telse:\n\t\t\tif count >= self.s:\n\t\t\t\treturn count\n\t\t\treturn self.check_ne(count + 1, (px - 1, py + 1))\n\n\tdef check_nw(self, count, move):\n\t\tpx, py = move\n\t\tif px - 1 < 0 or py - 1 < 0:\n\t\t\treturn count\n\n\t\t## Check its a block / empty spot\n\t\tif self.current_state[px - 1][py - 1] == self.block_symbol or self.current_state[px - 1][py - 1] == \".\":\n\t\t\treturn count\n\n\t\t## check its not same, the return count\n\t\t## else it must be same so recurse count + 1\n\t\tif self.current_state[px][py] != self.current_state[px - 1][py - 1]:\n\t\t\treturn count\n\t\telse:\n\t\t\tif count >= self.s:\n\t\t\t\treturn count\n\t\t\treturn self.check_nw(count + 1, (px - 1, py - 1))\n\n\tdef check_se(self, count, move):\n\t\tpx, py = move\n\t\tif px + 1 >= self.n or py + 1 >= self.n:\n\t\t\treturn count\n\n\t\t## Check its a block / empty spot\n\t\tif self.current_state[px + 1][py + 1] == self.block_symbol or self.current_state[px + 1][py + 1] == \".\":\n\t\t\treturn count\n\n\t\t## check its not same, the return count\n\t\t## else it must be same so recurse count + 1\n\t\tif self.current_state[px][py] != self.current_state[px + 1][py + 1]:\n\t\t\treturn count\n\t\telse:\n\t\t\tif count >= self.s:\n\t\t\t\treturn count\n\t\t\treturn self.check_se(count + 1, (px + 1, py + 1))\n\n\tdef check_sw(self, count, move):\n\t\tpx, py = move\n\t\tif px + 1 >= self.n or py - 1 < 0:\n\t\t\treturn count\n\n\t\t## Check its a block / empty spot\n\t\tif self.current_state[px + 1][py - 1] == self.block_symbol or self.current_state[px + 1][py - 1] == \".\":\n\t\t\treturn count\n\n\t\t## check its not same, the return count\n\t\t## else it must be same so recurse count + 1\n\t\tif self.current_state[px][py] != self.current_state[px + 1][py - 1]:\n\t\t\treturn count\n\t\telse:\n\t\t\tif count >= self.s:\n\t\t\t\treturn count\n\t\t\treturn self.check_sw(count + 1, (px + 1, py - 1))\n\n\tdef check_end(self):\n\t\tself.result = self.is_end()\n\t\t# Printing the appropriate message if the game has ended\n\n\t\treturn self.result\n\n\tdef input_move(self):\n\t\twhile True:\n\t\t\tprint(F'Player {self.player_turn}, enter your move:')\n\t\t\tpx = int(input('enter the x coordinate: '))\n\t\t\tpy = int(input('enter the y coordinate: '))\n\t\t\tif self.is_valid(px, py):\n\t\t\t\treturn (px, py)\n\t\t\telse:\n\t\t\t\tprint('The move is not valid! Try again.')\n\n\tdef switch_player(self):\n\t\tif self.player_turn == 'X':\n\t\t\tself.player_turn = 'O'\n\t\telif self.player_turn == 'O':\n\t\t\tself.player_turn = 'X'\n\t\treturn self.player_turn\n\n\tdef evaluate_state(self):\n\t\tif self.player_turn == \"X\":\n\t\t\theuristic = getattr(self, self.e_x)\n\t\telse:\n\t\t\theuristic = getattr(self, self.e_o)\n\n\t\tx, y = self.last_move\n\t\treturn heuristic(x, y)\n\n\tdef e1(self, x, y):\n\t\t# Heuristic 1: What is the count of the longest line making this line would make?\n\t\tlines = []\n\t\tlines.append(self.check_e(0, self.last_move) + self.check_w(0, self.last_move) + 1)\n\t\tlines.append(self.check_n(0, self.last_move) + self.check_s(0, self.last_move) + 1)\n\t\tlines.append(self.check_sw(0, self.last_move) + self.check_ne(0, self.last_move) + 1)\n\t\tlines.append(self.check_se(0, self.last_move) + self.check_nw(0, self.last_move) + 1)\n\t\treturn max(lines)/self.s\n\n\tdef e2(self, x, y):\n\t\t# Heuristic 2: How many free spaces are around the current move\n\t\t# Rational is that check for non-empty spaces as to promote blocking\n\t\t# very verbose, but pretty fast...\n\t\tb = x + 1 < self.n and self.current_state[x + 1][y] != \".\"\n\t\tb += x - 1 >= 0 and self.current_state[x - 1][y] != \".\"\n\t\tb += y + 1 < self.n and self.current_state[x][y + 1] != \".\"\n\t\tb += y - 1 < self.n and self.current_state[x][y - 1] != \".\"\n\t\tb += y + 1 < self.n and x + 1 < self.n and self.current_state[x + 1][y + 1] != \".\"\n\t\tb += x - 1 >= 0 and y - 1 >= 0 and self.current_state[x - 1][y - 1] != \".\"\n\t\tb += x - 1 >= 0 and y + 1 < self.n and self.current_state[x - 1][y + 1] != \".\"\n\t\tb += y - 1 >= 0 and x + 1 < self.n and self.current_state[x + 1][y - 1] != \".\"\n\n\t\treturn b\n\n\tdef e12(self, x, y):\n\t\tx, y = self.last_move\n\t\t# Heuristic evaluation\n\t\ta = self.e1(x, y)\n\t\tb = self.e2(x, y)\n\t\t# Clamp heuristics to between 0 and 1\n\t\t# This can only be as long as the win condition, thus divide by s\n\t\ta = a / self.s\n\t\t# There are only 8 spaces (maximum) around the current spot\n\t\tb = b / 8\n\n\t\t# weight heuristic a higher than b to promote blocking\n\t\treturn (0.75 * a + 0.25 * b) / 2\n\n\tdef record_eval(self, depth):\n\t\tself.current_num_states_evald += 1\n\t\tself.total_num_states_evald += 1\n\n\t\tcurrent_depth=0\n\t\tif self.player_turn == 'X':\n\t\t\tcurrent_depth = self.max_depth_x - depth\n\t\telse:\n\t\t\tcurrent_depth = self.max_depth_o - depth\n\n\t\tif current_depth not in self.current_depth_states_evald:\n\t\t\tself.current_depth_states_evald[current_depth] = 1\n\t\telse:\n\t\t\tself.current_depth_states_evald[current_depth] += 1\n\n\t\tif current_depth not in self.total_depth_states_evald:\n\t\t\tself.total_depth_states_evald[current_depth] = 1\n\t\telse:\n\t\t\tself.total_depth_states_evald[current_depth] += 1\n\n\n\tdef minimax(self, max=False, depth=10, start_time=time.time()):\n\t\t# Minimizing for 'X' and maximizing for 'O'\n\t\t# Possible values are:\n\t\t# -1 - win for 'X'\n\t\t# 0 - a tie\n\t\t# 1 - loss for 'X'\n\t\t# We're initially setting it to 2 or -2 as worse than the worst case:\n\t\tvalue = 2\n\t\tflip = -1\n\t\tif max:\n\t\t\tvalue = -2\n\t\t\tflip = 1\n\t\tx = None\n\t\ty = None\n\t\tresult = self.is_end()\n\t\tif result == 'X':\n\t\t\tself.record_eval(depth)\n\t\t\treturn (-1, x, y)\n\t\telif result == 'O':\n\t\t\tself.record_eval(depth)\n\t\t\treturn (1, x, y)\n\t\telif result == '.':\n\t\t\tself.record_eval(depth)\n\t\t\treturn (0, x, y)\n\t\t# remove some time from the original limit so the AI exits early / in time\n\t\t# if depth <= 0 or time.time() - start_time >= self.t - 0.01:\n\t\tif depth <= 0:\n\t\t\t# Heuristic eval, constrained to [-1, 1]\n\t\t\t# depending if we're min or max flip the value to be negative/positive\n\t\t\tvalue = self.evaluate_state() * flip\n\t\t\tself.record_eval(depth)\n\t\t\treturn (value, x, y)\n\n\t\tdepth -= 1\n\t\tfor i in range(0, self.n):\n\t\t\tfor j in range(0, self.n):\n\t\t\t\tif self.current_state[i][j] == '.':\n\t\t\t\t\tif max:\n\t\t\t\t\t\tself.current_state[i][j] = 'O'\n\t\t\t\t\t\t(v, _, _) = self.minimax(max=False, depth=depth, start_time=start_time)\n\t\t\t\t\t\tif v > value:\n\t\t\t\t\t\t\tvalue = v\n\t\t\t\t\t\t\tx = i\n\t\t\t\t\t\t\ty = j\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.current_state[i][j] = 'X'\n\t\t\t\t\t\t(v, _, _) = self.minimax(max=True, depth=depth, start_time=start_time)\n\t\t\t\t\t\tif v < value:\n\t\t\t\t\t\t\tvalue = v\n\t\t\t\t\t\t\tx = i\n\t\t\t\t\t\t\ty = j\n\t\t\t\t\tself.current_state[i][j] = '.'\n\t\treturn (value, x, y)\n\n\tdef alphabeta(self, alpha=-2, beta=2, max=False, depth=10, start_time=time.time()):\n\t\t# Minimizing for 'X' and maximizing for 'O'\n\t\t# Possible values are:\n\t\t# -1 - win for 'X'\n\t\t# 0 - a tie\n\t\t# 1 - loss for 'X'\n\t\t# We're initially setting it to 2 or -2 as worse than the worst case:\n\t\tvalue = 2\n\t\tflip = -1\n\t\tif max:\n\t\t\tflip = 1\n\t\t\tvalue = -2\n\t\tx = None\n\t\ty = None\n\t\tresult = self.is_end()\n\t\tif result == 'X':\n\t\t\tself.record_eval(depth)\n\t\t\treturn (-1, x, y)\n\t\telif result == 'O':\n\t\t\tself.record_eval(depth)\n\t\t\treturn (1, x, y)\n\t\telif result == '.':\n\t\t\tself.record_eval(depth)\n\t\t\treturn (0, x, y)\n\t\t# remove some time from the original limit so the AI exits early / in time\n\t\tif depth <= 0 or time.time() - start_time >= self.t - 0.01:\n\t\t\t# Heuristic eval, constrained to [-1, 1]\n\t\t\t# depending if we're min or max flip the value to be negative/positive\n\t\t\tvalue = self.evaluate_state() * flip\n\t\t\tself.record_eval(depth)\n\t\t\treturn (value, x, y)\n\n\t\tdepth -= 1\n\t\tfor i in range(0, self.n):\n\t\t\tfor j in range(0, self.n):\n\t\t\t\tif self.current_state[i][j] == '.':\n\t\t\t\t\tif max:\n\t\t\t\t\t\tself.current_state[i][j] = 'O'\n\t\t\t\t\t\t(v, _, _) = self.alphabeta(alpha, beta, max=False, depth=depth, start_time=start_time)\n\t\t\t\t\t\tif v > value:\n\t\t\t\t\t\t\tvalue = v\n\t\t\t\t\t\t\tx = i\n\t\t\t\t\t\t\ty = j\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.current_state[i][j] = 'X'\n\t\t\t\t\t\t(v, _, _) = self.alphabeta(alpha, beta, max=True, depth=depth, start_time=start_time)\n\t\t\t\t\t\tif v < value:\n\t\t\t\t\t\t\tvalue = v\n\t\t\t\t\t\t\tx = i\n\t\t\t\t\t\t\ty = j\n\t\t\t\t\tself.current_state[i][j] = '.'\n\t\t\t\t\tif max:\n\t\t\t\t\t\tif value >= beta:\n\t\t\t\t\t\t\treturn (value, x, y)\n\t\t\t\t\t\tif value > alpha:\n\t\t\t\t\t\t\talpha = value\n\t\t\t\t\telse:\n\t\t\t\t\t\tif value <= alpha:\n\t\t\t\t\t\t\treturn (value, x, y)\n\t\t\t\t\t\tif value < beta:\n\t\t\t\t\t\t\tbeta = value\n\t\treturn (value, x, y)\n\n\tdef play(self, algo=None):\n\n\t\tif algo == None:\n\t\t\talgo = self.ALPHABETA\n\t\twhile True:\n\t\t\t# self.draw_board()\n\t\t\tend = self.check_end()\n\t\t\tif end:\n\t\t\t\treturn end\n\n\t\t\tstart = time.time()\n\t\t\tself.current_num_states_evald = 0\n\t\t\tself.current_depth_states_evald = {}\n\t\t\tself.num_moves +=1\n\n\t\t\tif algo == self.MINIMAX:\n\t\t\t\tif self.player_turn == 'X':\n\t\t\t\t\t(_, x, y) = self.minimax(max=False, start_time=start, depth=self.max_depth_x)\n\t\t\t\telse:\n\t\t\t\t\t(_, x, y) = self.minimax(max=True, start_time=start, depth=self.max_depth_o)\n\t\t\telse: # algo == self.ALPHABETA\n\t\t\t\tif self.player_turn == 'X':\n\t\t\t\t\t(m, x, y) = self.alphabeta(max=False, start_time=start, depth=self.max_depth_x)\n\t\t\t\telse:\n\t\t\t\t\t(m, x, y) = self.alphabeta(max=True, start_time=start, depth=self.max_depth_o)\n\t\t\tend = time.time()\n\n\t\t\tround_time = round(end - start, 7)\n\t\t\tif round_time > self.t:\n\t\t\t\tprint(\"AI took to long to evaluate next move and has lost.\")\n\t\t\t\treturn\n\t\t\t# print(F'Player {self.player_turn} under AI control plays: x = {x}, y = {y}')\n\t\t\tself.print_move_stats(round_time)\n\t\t\t# print(F'===========================================================')\n\n\t\t\tself.last_move = (x, y)\n\t\t\tself.current_state[x][y] = self.player_turn\n\t\t\tself.switch_player()\n\n\tdef print_move_stats(self, round_time):\n\t\tself.total_eval_time += round_time\n\t\t# print(F'Evaluation time: {round_time}s')\n\t\t# print(F'States Evaluated: {self.current_num_states_evald}')\n\t\t# print(F'States by Depth: {self.current_depth_states_evald}')\n\n\t\ttotal_sum= 0\n\t\tfor k in self.current_depth_states_evald.keys():\n\t\t\ttotal_sum += self.current_depth_states_evald[k]*k\n\n\t\t# print(F'Average Depth: {total_sum/self.current_num_states_evald}s')\n\t\t# print(F'Average Recursive Depth: TODO')\n\n\tdef print_game_stats(self):\n\t\tavg_eval_time = self.total_eval_time/self.total_num_states_evald\n\t\tprint(F'Avg Evaluation time: {avg_eval_time}s')\n\t\tprint(F'Total States Evaluated: {self.total_num_states_evald}')\n\n\t\tsum_depth= 0\n\t\tfor k in self.total_depth_states_evald.keys():\n\t\t\tsum_depth += self.total_depth_states_evald[k]*k\n\n\t\tavg_depth = sum_depth/self.total_num_states_evald\n\t\tprint(F'Total Average depth : {avg_depth}')\n\n\t\tprint(F'Total States by Depth: {self.total_depth_states_evald}')\n\t\tprint(F'Average Recursive Depth: TODO')\n\n\t\tprint(F'Total moves: {self.num_moves}')\n\t\treturn (avg_eval_time, self.total_num_states_evald, avg_depth, self.total_depth_states_evald, self.num_moves)\n\n\n\tdef print_scoreboard(self, games_played, e1_win, e2_win, ties):\n\n\t\tprint(F'Num games {games_played}')\n\t\tprint(F'E1 vs E2 vs ties: {e1_win} : {e2_win} : {ties}')\n\n\tdef print_scoreboard_stats(self):\n\n\t\tprint(F'Avg Evaluation time: {self.total_eval_time/self.total_num_states_evald}s')\n\t\tprint(F'Total States Evaluated: {self.total_num_states_evald}')\n\n\t\tsum_depth= 0\n\t\tfor k in self.total_depth_states_evald.keys():\n\t\t\tsum_depth += self.total_depth_states_evald[k]*k\n\t\tprint(F'Total Average depth : {sum_depth/self.total_num_states_evald}')\n\n\t\tprint(F'Total States by Depth: {self.total_depth_states_evald}')\n\t\tprint(F'Average Recursive Depth: TODO')\n\n\t\tprint(F'Total moves: {self.num_moves}')\n\n\ndef main():\n\tg = Game(recommend=True)\n\tgames_played = 5 #int(int(input(\"How many games: \")) / 2)\n\td1 = int(input(\"d1: \"))\n\td2 = int(input(\"d2: \"))\n\talog =int(input(\"Algorithm [minimax: 0 or Alphabeta: 1]: \") or 1)\n\twith open(F\"traces/scoreboard_{g.n}{g.block_count}{g.s}{g.t}.txt\", 'w') as f:\n\t\twith redirect_stdout(f):\n\n\t\t\tplayer_x, player_o = 3,3\n\t\t\tg.decide_depth(alog,[d1, d2],\"e1\")\n\t\t\tg.show_game_conditions(player_x, player_o)\n\t\t\te1_win = 0\n\t\t\te2_win = 0\n\t\t\ttied = 0\n\n\t\t\tprint(\"Start: e1 goes first\")\n\t\t\tfor i in range(games_played):\n\t\t\t\tg.initialize_game(g.n, g.blocks)\n\t\t\t\tresult = g.play(algo=g.a)\n\t\t\t\tif result == \"X\":\n\t\t\t\t\te1_win += 1\n\t\t\t\telif result == \"O\":\n\t\t\t\t\te2_win += 1\n\t\t\t\telse:\n\t\t\t\t\ttied +=1\n\t\t\t\t# print(F'Game {i}: {result} wins')\n\n\t\t\tprint(\"Switch it & flip it: e2 goes first\")\n\n\t\t\tfor i in range(games_played):\n\t\t\t\tg.initialize_game(g.n,'O', g.blocks)\n\t\t\t\tresult = g.play(algo=g.a)\n\t\t\t\tif result == \"X\":\n\t\t\t\t\te1_win += 1\n\t\t\t\telif result == \"O\":\n\t\t\t\t\te2_win += 1\n\t\t\t\telse:\n\t\t\t\t\ttied +=1\n\n\t\t\t\t# print(F'Game {i+games_played}: {result} wins')\n\n\t\t\tg.print_scoreboard(games_played*2, e1_win, e2_win, tied)\n\t\t\tg.print_game_stats()\n\n\t# player_x, player_o = g.decide_player()\n\t# g.decide_depth(player_x, player_o)\n\t# g.show_game_conditions(player_x, player_o)\n\t# g.play(algo=g.a, player_x=player_x, player_o=player_o)\n\n\nif __name__ == \"__main__\":\n\tmain()\n" ]
[ [ "numpy.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wilson1yan/routing-transformer
[ "cc2700f54f1ade9b0d896f4d9cb8cbe896ae8e77" ]
[ "routing_transformer/routing_transformer.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nfrom inspect import isfunction\nfrom operator import mul\nfrom functools import partial, reduce, wraps\n\nfrom local_attention import LocalAttention\nfrom axial_positional_embedding import AxialPositionalEmbedding\nfrom product_key_memory import PKM\nfrom mixture_of_experts import MoE\nfrom routing_transformer.reversible import ReversibleSequence, SequentialSequence\n\nimport horovod.torch as hvd\n\n# constants\n\nTOKEN_SELF_ATTN_VALUE = -5e4\nKMEAN_INIT_ITERS = 10\n\n# helper functions\n\ndef identity(x, *args, **kwargs):\n return x\n\ndef default(x, d):\n if x is None:\n return d if not isfunction(d) else d()\n return x\n\ndef cast_tuple(x):\n return x if isinstance(x, tuple) else (x,)\n\ndef cache_fn(f):\n cache = None\n @wraps(f)\n def cached_fn(*args, **kwargs):\n nonlocal cache\n if cache is not None:\n return cache\n cache = f(*args, **kwargs)\n return cache\n return cached_fn\n\ndef to(t):\n return {'device': t.device, 'dtype': t.dtype}\n\ndef find_modules(nn_module, type):\n return [module for module in nn_module.modules() if isinstance(module, type)]\n\ndef is_empty(t):\n return t.nelement() == 0\n\ndef max_neg_value(tensor):\n return -torch.finfo(tensor.dtype).max\n\ndef batched_index_select(values, indices):\n last_dim = values.shape[-1]\n return values.gather(2, expand_dim(indices, -1, last_dim))\n\ndef merge_dims(ind_from, ind_to, tensor):\n shape = list(tensor.shape)\n arr_slice = slice(ind_from, ind_to + 1)\n shape[arr_slice] = [reduce(mul, shape[arr_slice])]\n return tensor.reshape(*shape)\n\ndef expand_dim(t, dim, k):\n t = t.unsqueeze(dim)\n expand_shape = [-1] * len(t.shape)\n expand_shape[dim] = k\n return t.expand(*expand_shape)\n\ndef scatter_mean(src, t, index, dim, eps = 1e-5):\n numer = src.scatter_add(dim, index, t)\n denom = src.scatter_add(dim, index, torch.ones_like(t))\n return numer / (denom + eps)\n\ndef split_at_index(dim, index, t):\n pre_slices = (slice(None),) * dim\n l = (*pre_slices, slice(None, index))\n r = (*pre_slices, slice(index, None))\n return t[l], t[r]\n\ndef reshape_dim(t, dim, split_dims):\n shape = list(t.shape)\n num_dims = len(shape)\n dim = (dim + num_dims) % num_dims\n shape[dim:dim+1] = split_dims\n return t.reshape(shape)\n\ndef ema(old, new, decay):\n if old is None:\n return new\n return old * decay + new * (1 - decay)\n\ndef ema_inplace(moving_avg, new, decay):\n if is_empty(moving_avg):\n moving_avg.data.copy_(new)\n return\n moving_avg.data.mul_(decay).add_(new, alpha= (1 - decay))\n\n# helper classes\n\nclass Chunk(nn.Module):\n def __init__(self, chunks, fn, along_dim = -1):\n super().__init__()\n self.dim = along_dim\n self.chunks = chunks\n self.fn = fn\n\n def forward(self, x, **kwargs):\n if self.chunks <= 1:\n return self.fn(x, **kwargs)\n chunks = x.chunk(self.chunks, dim = self.dim)\n return torch.cat([self.fn(c, **kwargs) for c in chunks], dim = self.dim)\n\nclass PreNorm(nn.ModuleList):\n def __init__(self, dim, fn):\n super().__init__()\n self.norm = nn.LayerNorm(dim)\n self.fn = fn\n def forward(self, x, **kwargs):\n x = self.norm(x)\n return self.fn(x, **kwargs)\n\nclass ProjectInOut(nn.Module):\n def __init__(self, fn, dim_in, dim_out, project_out = True):\n super().__init__()\n self.fn = fn\n self.project_in = nn.Linear(dim_in, dim_out)\n self.project_out = nn.Linear(dim_out, dim_in) if project_out else identity\n\n def forward(self, x, **kwargs):\n x = self.project_in(x)\n x, loss = self.fn(x, **kwargs)\n x = self.project_out(x)\n return x, loss\n\n# kmeans related function and class\n\ndef update_kmeans_on_backwards(module):\n module.kmean_modules = find_modules(module, Kmeans)\n def hook(_, grad_in, grad_out):\n for m in module.kmean_modules:\n m.update()\n\n return module.register_backward_hook(hook)\n\ndef similarity(x, means):\n return torch.einsum('bhld,hcd->bhlc', x, means)\n\ndef dists_and_buckets(x, means):\n dists = similarity(x, means)\n _, buckets = torch.max(dists, dim=-1)\n return dists, buckets\n\ndef batched_bincount(index, num_classes, dim=-1):\n shape = list(index.shape)\n shape[dim] = num_classes\n out = index.new_zeros(shape)\n out.scatter_add_(dim, index, torch.ones_like(index, dtype=index.dtype))\n return out\n\ndef kmeans_iter(x, means, buckets = None):\n b, h, l, d, dtype, num_clusters = *x.shape, x.dtype, means.shape[1]\n\n if buckets is None:\n _, buckets = dists_and_buckets(x, means)\n\n bins = batched_bincount(buckets, num_clusters).sum(0, keepdim=True)\n zero_mask = bins.long() == 0\n\n means_ = buckets.new_zeros(b, h, num_clusters, d, dtype=dtype)\n means_.scatter_add_(-2, expand_dim(buckets, -1, d), x)\n means_ = F.normalize(means_.sum(0, keepdim=True), dim=-1).type(dtype)\n\n means = torch.where(zero_mask.unsqueeze(-1), means, means_)\n means = means.squeeze(0)\n return means\n\ndef distribution(dists, window_size):\n _, topk_indices = dists.topk(k=window_size, dim=-2)\n indices = topk_indices.transpose(-2, -1)\n return indices.reshape(*indices.size()[:2], -1)\n\n\nclass Kmeans(nn.Module):\n def __init__(self, num_heads, head_dim, num_clusters, ema_decay = 0.999, commitment = 1e-4):\n super().__init__()\n self.commitment = commitment\n self.ema_decay = ema_decay\n\n self.register_buffer('means', torch.randn(num_heads, num_clusters, head_dim))\n self.initted = False\n self.num_new_means = 0\n self.new_means = None\n\n @torch.no_grad()\n def init(self, x):\n if self.initted:\n return\n _, h, _, d, device, dtype = *x.shape, x.device, x.dtype\n\n num_clusters = self.means.shape[1]\n\n means = x.transpose(0, 1).contiguous().view(h, -1, d)\n num_samples = means.shape[1]\n\n if num_samples >= num_clusters:\n indices = torch.randperm(num_samples, device=device)[:num_clusters]\n else:\n indices = torch.randint(0, num_samples, (num_clusters,), device=device)\n\n means = means[:, indices]\n\n for _ in range(KMEAN_INIT_ITERS):\n means = kmeans_iter(x, means)\n\n self.num_new_means = 0\n self.means.data.copy_(hvd.broadcast(means, 0))\n\n self.initted = True\n\n @torch.no_grad()\n def update(self, new_means = None):\n new_means = default(new_means, self.new_means)\n assert new_means is not None, 'new kmeans has not been supplied'\n ema_inplace(self.means, new_means, self.ema_decay)\n\n del self.new_means\n self.new_means = None\n self.num_new_means = 0\n\n def forward(self, x, update_means = False):\n self.init(x)\n\n b, dtype = x.shape[0], x.dtype\n means = self.means.type(dtype)\n x = F.normalize(x, 2, dim=-1).type(dtype)\n\n with torch.no_grad():\n dists, buckets = dists_and_buckets(x, means)\n\n routed_means = batched_index_select(expand_dim(means, 0, b), buckets)\n loss = F.mse_loss(x, routed_means) * self.commitment\n\n if update_means:\n with torch.no_grad():\n means = kmeans_iter(x, means, buckets)\n self.new_means = ema(self.new_means, means, self.num_new_means / (self.num_new_means + 1))\n self.num_new_means += 1\n\n return dists, loss\n\n# kmeans attention class\n\nclass KmeansAttention(nn.Module):\n def __init__(self, num_clusters, window_size, num_heads, head_dim, causal = False, dropout = 0., ema_decay = 0.999, commitment = 1e-4, context_window_size = None, receives_context = False, num_mem_kv = 0, shared_qk = False):\n super().__init__()\n self.num_heads = num_heads\n self.num_clusters = num_clusters\n self.head_dim = head_dim\n\n self.window_size = window_size\n self.context_window_size = default(context_window_size, window_size)\n self.causal = causal\n\n self.shared_qk = shared_qk\n self.receives_context = receives_context\n self.kmeans = Kmeans(num_heads, head_dim, num_clusters, ema_decay, commitment)\n self.dropout = nn.Dropout(dropout)\n\n self.num_mem_kv = max(num_mem_kv, 1 if causal and not shared_qk else 0)\n self.mem_key = nn.Parameter(torch.randn(num_heads, num_clusters, self.num_mem_kv, head_dim))\n self.mem_value = nn.Parameter(torch.randn(num_heads, num_clusters, self.num_mem_kv, head_dim))\n\n def forward(self, q, k, v, query_mask = None, key_mask = None, **kwargs):\n b, h, t, d, kv_t, wsz, c_wsz, nc, device, dtype = *q.shape, k.shape[2], self.window_size, self.context_window_size, self.num_clusters, q.device, q.dtype\n is_reverse = kwargs.pop('_reverse', False)\n\n out = torch.zeros_like(q, dtype=dtype)\n\n update_kmeans = self.training and not is_reverse\n\n key_mask = default(key_mask, query_mask) if not self.receives_context else key_mask\n kv_wsz = wsz if not self.receives_context else c_wsz\n\n wsz = min(wsz, t)\n kv_wsz = min(kv_wsz, kv_t)\n\n if not self.shared_qk or self.receives_context:\n dists, aux_loss = self.kmeans(torch.cat((q, k), dim=2), update_kmeans)\n q_dists, k_dists = split_at_index(2, t, dists)\n indices = distribution(q_dists, wsz)\n kv_indices = distribution(k_dists, kv_wsz)\n else:\n dists, aux_loss = self.kmeans(q, update_kmeans)\n k = F.normalize(k, dim=-1).to(q)\n indices = distribution(dists, wsz)\n kv_indices = indices\n\n q = batched_index_select(q, indices)\n k = batched_index_select(k, kv_indices)\n v = batched_index_select(v, kv_indices)\n\n reshape_with_window = lambda x: x.reshape(b, h, nc, -1, d)\n q, k, v = map(reshape_with_window, (q, k, v))\n\n m_k, m_v = map(lambda x: expand_dim(x, 0, b).to(q), (self.mem_key, self.mem_value))\n k, v = map(lambda x: torch.cat(x, dim=3), ((m_k, k), (m_v, v)))\n\n dots = torch.einsum('bhnid,bhnjd->bhnij', q, k) * (d ** -0.5)\n\n mask_value = max_neg_value(dots)\n\n if query_mask is not None or key_mask is not None:\n query_mask = default(query_mask, lambda: torch.ones((b, t), device=device).bool())\n key_mask = default(key_mask, lambda: torch.ones((b, kv_t), device=device).bool())\n\n q_mask = expand_dim(query_mask, 1, h).gather(2, indices)\n kv_mask = expand_dim(key_mask, 1, h).gather(2, kv_indices)\n q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (q_mask, kv_mask))\n mask = q_mask[:, :, :, :, None] * kv_mask[:, :, :, None, :]\n mask = F.pad(mask, (self.num_mem_kv, 0), value=True)\n dots.masked_fill_(~mask, mask_value)\n del mask\n\n if self.causal:\n q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (indices, kv_indices))\n mask = q_mask[:, :, :, :, None] >= kv_mask[:, :, :, None, :]\n mask = F.pad(mask, (self.num_mem_kv, 0), value=True)\n dots.masked_fill_(~mask, mask_value)\n del mask\n\n if self.shared_qk:\n q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (indices, kv_indices))\n mask = q_mask[:, :, :, :, None] == kv_mask[:, :, :, None, :]\n mask = F.pad(mask, (self.num_mem_kv, 0), value=False)\n dots.masked_fill_(mask, TOKEN_SELF_ATTN_VALUE)\n del mask\n\n dots = dots.softmax(dim=-1)\n dots = self.dropout(dots)\n\n bo = torch.einsum('bhcij,bhcjd->bhcid', dots, v)\n so = torch.reshape(bo, (b, h, -1, bo.shape[-1])).type(dtype)\n out = scatter_mean(out, so, indices.unsqueeze(-1).expand_as(so), -2)\n return out, aux_loss\n\n# feedforward\n\nclass GELU_(nn.Module):\n def forward(self, x):\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n\nGELU = nn.GELU if hasattr(nn, 'GELU') else GELU_\n\nclass FeedForward(nn.Module):\n def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False):\n super().__init__()\n activation = default(activation, GELU)\n\n self.glu = glu\n self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))\n self.act = activation()\n self.dropout = nn.Dropout(dropout)\n self.w2 = nn.Linear(dim * mult, dim)\n\n def forward(self, x, **kwargs):\n if not self.glu:\n x = self.w1(x)\n x = self.act(x)\n else:\n x, v = self.w1(x).chunk(2, dim=-1)\n x = self.act(x) * v\n\n x = self.dropout(x)\n x = self.w2(x)\n return x\n\n# self attention\n\nclass SelfAttention(nn.Module):\n def __init__(self, dim, depth, max_seq_len, heads, local_attn_heads, window_size, dim_head = None, local_attn_window_size = None, causal = False, attn_dropout = 0., dropout = 0., kmeans_ema_decay = 0.999, commitment_factor = 1e-4, receives_context = False, context_window_size = None, rel_pos_emb = True, num_mem_kv = 0, shared_qk = False):\n super().__init__()\n assert dim_head or (dim % heads) == 0, 'hidden dimension must be divisible by number of heads'\n assert (max_seq_len % window_size) == 0, 'maximum sequence length must be divisible by the target window size'\n assert local_attn_heads <= heads, 'number of local attention heads must be less than total heads'\n assert not (receives_context and local_attn_heads > 0), 'local attention cannot be used for self attention with context'\n assert not (receives_context and causal), 'contextual attention layer cannot be causal'\n\n local_attn_window_size = default(local_attn_window_size, window_size // 2)\n context_window_size = default(context_window_size, window_size)\n\n self.shared_qk = shared_qk\n self.receives_context = receives_context\n self.heads = heads\n self.local_attn_heads = local_attn_heads\n self.global_attn_heads = heads - local_attn_heads\n\n self.window_size = window_size\n\n dim_head = default(dim_head, dim // heads)\n dim_heads = dim_head * heads\n self.dim_head = dim_head\n\n num_clusters = max_seq_len // window_size\n\n # local\n\n local_dim_heads = dim_head * self.local_attn_heads\n\n if self.local_attn_heads > 0:\n rel_pos_emb_config = (dim_head, local_attn_heads) if rel_pos_emb else None\n self.local_attn = LocalAttention(local_attn_window_size, causal = True, dropout = attn_dropout, rel_pos_emb_config = rel_pos_emb_config)\n self.local_to_qkv = nn.Linear(dim, 3 * local_dim_heads)\n\n # global\n\n global_dim_heads = dim_head * self.global_attn_heads\n\n if self.global_attn_heads > 0:\n self.global_attn = KmeansAttention(num_clusters, window_size, self.global_attn_heads, dim_head, causal = causal, dropout = attn_dropout, ema_decay = kmeans_ema_decay, commitment = commitment_factor, receives_context = receives_context, num_mem_kv = num_mem_kv, shared_qk = shared_qk)\n\n self.to_q = nn.Linear(dim, global_dim_heads, bias = False)\n self.to_v = nn.Linear(dim, global_dim_heads, bias = False)\n\n if not self.shared_qk:\n self.to_k = nn.Linear(dim, global_dim_heads, bias = False)\n\n # out\n\n self.to_out = nn.Linear(dim_heads, dim, bias = False)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, context = None, input_mask = None, context_mask = None, **kwargs):\n assert not (self.receives_context and context is None), 'context must be passed if self attention is set to receive context'\n b, t, e, h, dh = *x.shape, self.heads, self.dim_head\n has_local, has_global = map(lambda x: x > 0, (self.local_attn_heads, self.global_attn_heads))\n\n split_heads = lambda v: reshape_dim(v, -1, (-1, dh)).transpose(1, 2).contiguous()\n\n if has_local:\n local_qkv = self.local_to_qkv(x).chunk(3, dim=-1)\n lq, lk, lv = map(split_heads, local_qkv)\n\n if has_global:\n kv_input = x if not self.receives_context else context\n\n q, v = self.to_q(x), self.to_v(kv_input)\n\n if not self.shared_qk:\n k = self.to_k(kv_input)\n else:\n k = self.to_q(kv_input) if self.receives_context else q\n\n q, k, v = map(split_heads, (q, k, v))\n\n out = []\n total_loss = torch.tensor(0., requires_grad=True, **to(x))\n\n if has_local:\n local_out = self.local_attn(lq, lk, lv, input_mask = input_mask)\n out.append(local_out)\n\n if has_global:\n global_out, loss = self.global_attn(q, k, v, query_mask = input_mask, key_mask = context_mask)\n total_loss = total_loss + loss\n\n out.append(global_out)\n\n out = torch.cat(out, dim=1)\n out = out.reshape(b, h, t, -1).transpose(1, 2).reshape(b, t, -1)\n out = self.to_out(out)\n return self.dropout(out), total_loss\n\nclass RoutingTransformer(nn.Module):\n def __init__(self, dim, depth, max_seq_len, heads = 8, dim_head = None, window_size = 64, local_attn_window_size = None, causal = False, weight_tie = False, attn_dropout = 0., ff_dropout = 0., attn_layer_dropout = 0., layer_dropout = 0., n_local_attn_heads = 0, ff_glu = False, reversible = False, ff_chunks = 1, kmeans_ema_decay = 0.999, commitment_factor = 1e-4, receives_context = False, context_window_size = None, _register_kmeans_update = False, rel_pos_emb = True, pkm_layers = tuple(), pkm_num_keys = 128, moe_layers = tuple(), moe_num_experts = 4, moe_loss_coef = 1e-2, num_mem_kv = 0, shared_qk = None, context_shared_qk = False):\n super().__init__()\n shared_qk = default(shared_qk, causal) # default to shared qk when causal, due to experimental results\n\n local_attn_window_size = default(local_attn_window_size, window_size // 2)\n if type(n_local_attn_heads) is not tuple:\n n_local_attn_heads = tuple([n_local_attn_heads] * depth)\n\n assert len(n_local_attn_heads) == depth, 'local attention heads tuple must have the same length as the depth'\n assert all([(local_heads <= heads) for local_heads in n_local_attn_heads]), 'number of local attn heads must be less than the maximum number of heads'\n\n layers = nn.ModuleList([])\n fn_wrapper = partial(PreNorm, dim)\n\n get_attn = lambda local_heads: SelfAttention(dim, depth, max_seq_len, heads, local_heads, window_size, causal = causal, dim_head = dim_head, local_attn_window_size = local_attn_window_size, attn_dropout = attn_dropout, dropout = attn_layer_dropout, kmeans_ema_decay = kmeans_ema_decay, commitment_factor = commitment_factor, rel_pos_emb = rel_pos_emb, num_mem_kv = num_mem_kv, shared_qk = shared_qk)\n get_ff = lambda: Chunk(ff_chunks, FeedForward(dim, dropout = ff_dropout, glu = ff_glu), along_dim=1)\n get_context_attn = lambda: SelfAttention(dim, depth, max_seq_len, heads, 0, window_size, dim_head = dim_head, local_attn_window_size = local_attn_window_size, attn_dropout = attn_dropout, dropout = attn_layer_dropout, kmeans_ema_decay = kmeans_ema_decay, commitment_factor = commitment_factor, receives_context = True, context_window_size = context_window_size, num_mem_kv = num_mem_kv, shared_qk = context_shared_qk)\n get_context_ff = lambda: Chunk(ff_chunks, FeedForward(dim, dropout = ff_dropout, glu = ff_glu), along_dim=1)\n get_pkm = lambda: PKM(dim, num_keys = pkm_num_keys)\n get_moe = lambda: MoE(dim, num_experts = moe_num_experts, loss_coef = moe_loss_coef)\n\n if weight_tie:\n assert len(set(n_local_attn_heads)) == 1, 'you can only weight tie if number of local attention heads for all layers is the same'\n get_attn, get_ff, get_context_attn, get_context_ff, get_pkm, get_moe = map(cache_fn, (get_attn, get_ff, get_context_attn, get_context_ff, get_pkm, get_moe))\n\n for ind, local_heads in zip(range(depth), n_local_attn_heads):\n layer = ind + 1\n use_pkm = layer in cast_tuple(pkm_layers)\n use_moe = layer in cast_tuple(moe_layers)\n\n get_parallel_fn = get_pkm if use_pkm else get_ff\n get_parallel_fn = get_moe if use_moe else get_parallel_fn\n\n layers.append(nn.ModuleList([\n fn_wrapper(get_attn(local_heads)),\n fn_wrapper(get_parallel_fn())\n ]))\n\n if not receives_context:\n continue\n\n layers.append(nn.ModuleList([\n fn_wrapper(get_context_attn()),\n fn_wrapper(get_context_ff())\n ]))\n\n execute_type = ReversibleSequence if reversible else SequentialSequence\n\n attn_context_layer = ((True, False),) if receives_context else tuple()\n route_attn = ((True, False), *attn_context_layer) * depth\n route_context = ((False, False), *attn_context_layer) * depth\n\n context_route_map = {'context': route_context, 'context_mask': route_context} if receives_context else {}\n attn_route_map = {'input_mask': route_attn}\n self.layers = execute_type(layers, args_route = {**attn_route_map, **context_route_map}, layer_dropout = layer_dropout)\n\n if _register_kmeans_update:\n update_kmeans_on_backwards(self)\n\n has_local_attn = any([num > 0 for num in n_local_attn_heads])\n self.pad_to_multiple = local_attn_window_size if has_local_attn else 0\n\n def forward(self, x, **kwargs):\n x, loss = self.layers(x, **kwargs)\n return x, loss\n\nclass RoutingTransformerLM(nn.Module):\n def __init__(self, num_tokens, dim, depth, max_seq_len, heads = 8, dim_head = None, window_size = 64, local_attn_window_size = None, causal = False, emb_dim = None, weight_tie = False, attn_dropout = 0., ff_dropout = 0., attn_layer_dropout = 0., layer_dropout = 0., ff_mult = 4, ff_activation = None, ff_glu = False, return_embeddings = False, n_local_attn_heads = 0, reversible = False, ff_chunks = 1, kmeans_ema_decay = 0.999, commitment_factor = 1e-4, receives_context = False, context_window_size = None, rel_pos_emb = True, _register_kmeans_update = True, pkm_layers = tuple(), pkm_num_keys = 128, moe_layers = tuple(), moe_num_experts = 4, moe_loss_coef = 1e-2, num_mem_kv = 0, shared_qk = None, context_shared_qk = False):\n super().__init__()\n assert (max_seq_len % window_size) == 0, 'max sequence length must be divisible by the window size, to calculate number of kmeans cluster'\n emb_dim = default(emb_dim, dim)\n self.max_seq_len = max_seq_len\n\n self.token_emb = nn.Embedding(num_tokens, emb_dim)\n self.axial_pos_emb = AxialPositionalEmbedding(emb_dim, axial_shape=(max_seq_len // window_size, window_size))\n self.routing_transformer = RoutingTransformer(dim, depth, max_seq_len, heads = heads, dim_head = dim_head, window_size = window_size, local_attn_window_size = local_attn_window_size, causal = causal, weight_tie = weight_tie, ff_dropout = ff_dropout, attn_dropout = attn_dropout, attn_layer_dropout = attn_layer_dropout, layer_dropout = layer_dropout, n_local_attn_heads = n_local_attn_heads, ff_glu = ff_glu, reversible = reversible, ff_chunks = ff_chunks, kmeans_ema_decay = kmeans_ema_decay, receives_context = receives_context, context_window_size = context_window_size, rel_pos_emb = rel_pos_emb, pkm_layers = pkm_layers, pkm_num_keys = pkm_num_keys, moe_layers = moe_layers, moe_num_experts = moe_num_experts, moe_loss_coef = moe_loss_coef, num_mem_kv = num_mem_kv, shared_qk = shared_qk, context_shared_qk = context_shared_qk, _register_kmeans_update = _register_kmeans_update)\n\n if emb_dim != dim:\n self.routing_transformer = ProjectInOut(self.routing_transformer, emb_dim, dim, project_out = not return_embeddings)\n\n self.out = nn.Linear(emb_dim, num_tokens) if not return_embeddings else identity\n\n def forward(self, x, **kwargs):\n x = self.token_emb(x)\n x = x + self.axial_pos_emb(x)\n x, loss = self.routing_transformer(x, **kwargs)\n return self.out(x), loss\n" ]
[ [ "torch.randint", "torch.max", "torch.cat", "torch.randperm", "torch.nn.Embedding", "torch.no_grad", "torch.finfo", "torch.pow", "torch.nn.Dropout", "torch.ones", "torch.einsum", "torch.randn", "torch.reshape", "torch.ones_like", "torch.nn.functional.pad", "torch.nn.ModuleList", "torch.zeros_like", "torch.nn.Linear", "torch.nn.functional.mse_loss", "torch.nn.functional.normalize", "torch.nn.LayerNorm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kruus/pymde
[ "0bfa9c308660bda2fa5161ffce00ce22ef6e773b" ]
[ "pymde/util.py" ]
[ "\"\"\"Internal utilities.\"\"\"\nimport functools\nimport numbers\n\nimport numpy as np\nimport scipy.sparse\nimport torch\n\nfrom pymde.average_distortion import _project_gradient\n\n\n_DEVICE = torch.device(\"cpu\")\n\n\nclass SolverError(Exception):\n pass\n\n\ndef get_default_device():\n return str(_DEVICE)\n\n\ndef _canonical_device(device):\n if isinstance(device, str):\n device = torch.device(device)\n elif not isinstance(device, torch.device):\n raise ValueError(\"device must be a str or a torch.device object.\")\n\n if device.type == \"cuda\" and device.index is None:\n device = torch.device(\"cuda\", torch.cuda.current_device())\n return device\n\n\ndef set_default_device(device):\n global _DEVICE\n _DEVICE = _canonical_device(device)\n\n\ndef _module_device(module):\n data = list(module.buffers())\n if not data:\n return None\n device = str(data[0].device)\n if any(str(datum.device) != device for datum in data):\n return None\n return device\n\n\ndef _is_numeric(arg):\n return (\n isinstance(arg, numbers.Number)\n or isinstance(arg, np.ndarray)\n or isinstance(arg, np.matrix)\n or isinstance(arg, torch.Tensor)\n )\n\n\ndef to_tensor(args, device=None):\n \"\"\"Convert an arg or sequence of args to torch Tensors\n \"\"\"\n singleton = not isinstance(args, (list, tuple))\n if singleton:\n args = [args]\n\n tensor_args = []\n for arg in args:\n if isinstance(arg, torch.Tensor):\n tensor_args.append(arg)\n elif _is_numeric(arg):\n if isinstance(arg, np.ndarray) and arg.dtype == np.float64:\n tensor_args.append(\n torch.tensor(arg, dtype=torch.float32, device=device)\n )\n else:\n tensor_args.append(torch.tensor(arg, device=device))\n else:\n raise ValueError(\"Received non-numeric argument \", arg)\n return tensor_args[0] if singleton else tensor_args\n\n\ndef tensor_arguments(func):\n \"\"\"Cast numeric args and kwargs of func to Tensors.\"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n tensor_args = to_tensor(args)\n tensor_kwargs = {}\n for key, arg in kwargs.items():\n if isinstance(arg, torch.Tensor):\n tensor_kwargs[key] = arg\n elif _is_numeric(arg):\n tensor_kwargs[key] = torch.tensor(arg, device=_DEVICE)\n else:\n raise ValueError(\n \"Received non-numeric argument (name %s, value %s)\"\n % (key, arg)\n )\n return func(*tensor_args, **tensor_kwargs)\n\n return wrapper\n\n\ndef all_edges(n):\n \"\"\"Return a tensor of all (n choose 2) edges\n\n Constructs all possible edges among n items. For example, if ``n`` is 4,\n the return value will be equal to\n\n .. code:: python3\n\n torch.Tensor([[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]])\n \"\"\"\n return torch.triu_indices(n, n, 1).T\n\n\n@tensor_arguments\ndef natural_length(n, m):\n return (2.0 * n * m / (n - 1)).sqrt()\n\n\ndef in_stdemb(X):\n cov = (1.0 / X.shape[0]) * X.T @ X\n eye = torch.eye(2, dtype=X.dtype, device=X.device)\n mean = X.mean(axis=0)\n zero = torch.tensor(0.0, dtype=X.dtype, device=X.device)\n return torch.isclose(cov, eye).all() and torch.isclose(mean, zero).all()\n\n\ndef proj_standardized(X, demean=False, inplace=False):\n if demean:\n if inplace:\n X.sub_(X.mean(axis=0))\n else:\n X -= X.mean(axis=0)\n\n # pytorch 1.8.0 has a bug in which torch.svd fails when requires_grad\n # is true on the input (even if called under torch.no_grad)\n requires_grad = X.requires_grad\n X.requires_grad_(False)\n\n n = torch.tensor(X.shape[0], dtype=X.dtype, device=X.device)\n m = X.shape[1]\n # TODO: Gracefully handle the rare svd failure\n # TODO: debug alternative eigenvec approach ...\n # (evals, V = torch.eig(X.T @ X, eigenvectors=True)\n # proj = X @ V @ torch.diag(evals[:, 0].sqrt().pow(-1)) ...\n # proj *= torch.sqrt(n)\n if inplace:\n s = torch.zeros(m, device=X.device, dtype=X.dtype)\n V = torch.zeros((m, m), device=X.device, dtype=X.dtype)\n try:\n U, _, V = torch.svd(X, out=(X, s, V))\n except RuntimeError as e:\n X.requires_grad_(requires_grad)\n raise SolverError(str(e))\n torch.matmul(U[:, :m], V.T[:, :m], out=X)\n X.mul_(torch.sqrt(n))\n X.requires_grad_(requires_grad)\n return X\n else:\n try:\n U, _, V = torch.svd(X)\n except RuntimeError as e:\n X.requires_grad_(requires_grad)\n raise SolverError(str(e))\n proj = torch.sqrt(n) * U[:, :m] @ V.T[:, :m]\n X.requires_grad_(requires_grad)\n return proj\n\n\ndef adjacency_matrix(n, m, edges, weights):\n if isinstance(weights, torch.Tensor):\n weights = weights.detach().cpu().numpy()\n if isinstance(edges, torch.Tensor):\n edges = edges.detach().cpu().numpy()\n A = scipy.sparse.coo_matrix(\n (weights, (edges[:, 0], edges[:, 1])), shape=(n, n), dtype=np.float32\n )\n A = A + A.T\n return A.tocoo()\n\n\n@tensor_arguments\ndef procrustes(X_source, X_target):\n \"\"\"min |X_source Q - X_target|_F s.t. Q^TQ = I\"\"\"\n U, _, V = torch.svd(X_target.T @ X_source)\n return V @ U.T\n\n\n@tensor_arguments\ndef _rotate_2d(X, degrees):\n theta = torch.deg2rad(degrees)\n rot = torch.tensor(\n [\n [torch.cos(theta), -torch.sin(theta)],\n [torch.sin(theta), torch.cos(theta)],\n ],\n device=X.device,\n )\n return X @ rot\n\n\n@tensor_arguments\ndef _rotate_3d(X, alpha, beta, gamma):\n alpha = torch.deg2rad(alpha.float())\n beta = torch.deg2rad(beta.float())\n gamma = torch.deg2rad(gamma.float())\n rot_x = torch.tensor(\n [\n [1, 0, 0],\n [0, torch.cos(alpha), torch.sin(alpha)],\n [0, -torch.sin(alpha), torch.cos(alpha)],\n ],\n device=X.device,\n )\n rot_y = torch.tensor(\n [\n [torch.cos(beta), 0.0, -torch.sin(beta)],\n [0, 1, 0],\n [torch.sin(beta), 0.0, torch.cos(beta)],\n ],\n device=X.device,\n )\n rot_z = torch.tensor(\n [\n [torch.cos(gamma), torch.sin(gamma), 0.0],\n [-torch.sin(gamma), torch.cos(gamma), 0.0],\n [0, 0, 1],\n ],\n device=X.device,\n )\n rot_3d = rot_x @ rot_y @ rot_z\n return X @ rot_3d\n\n\n@tensor_arguments\ndef rotate(X, degrees):\n \"\"\"Rotate a 2 or 3D embedding\n\n Rotates a 2/3D embedding by ``degrees``. If ``X`` is a 2D embedding,\n ``degrees`` should be a scalar; if it is 3D, ``degrees`` should be\n a length-3 ``torch.Tensor``, with one angle for each axis (the embedding\n will be rotated along the x-axis first, then the y-axis, then the z-axis).\n\n Arguments\n ---------\n X : torch.Tensor\n The embedding to rotate.\n degrees: torch.Tensor\n The angles of rotation.\n\n Returns\n -------\n torch.Tensor\n The rotated embedding\n \"\"\"\n if X.shape[1] not in [2, 3]:\n raise ValueError(\n \"Only 2 or 3 dimensional embeddings can be \"\n \"rotated using this method.\"\n )\n\n if X.shape[1] == 2:\n if degrees.numel() != 1:\n raise ValueError(\"`degrees` must be a scalar.\")\n return _rotate_2d(X, degrees)\n else:\n if degrees.numel() != 3:\n raise ValueError(\"`degrees` must be a length-3 tensor.\")\n return _rotate_3d(X, degrees[0], degrees[1], degrees[2])\n\n\n@tensor_arguments\ndef center(X):\n \"\"\"Center an embedding\n\n Returns a new embedding, equal to the given embedding minus the mean\n of its rows.\n \"\"\"\n return X - X.mean(dim=0)[None, :]\n\n\n@tensor_arguments\ndef align(source, target):\n \"\"\"Align an source embedding to a target embedding\n\n Align the source embedding to another target embedding, via\n rotation. The alignment is done by solving an\n orthogonal Procrustes problem.\n\n Arguments\n ---------\n source: torch.Tensor\n The embedding to be aligned.\n target: torch.Tensor\n The target embedding, of the same shape as source.\n\n Returns\n -------\n torch.Tensor\n The rotation of source best aligned to the target.\n \"\"\"\n source_mean = source.mean(dim=0)\n source = source - source_mean[None, :]\n source_col_rms = source.norm(dim=0)\n source = source / source_col_rms[None, :]\n\n target = center(target)\n target = target / target.norm(dim=0)\n\n Q = procrustes(source, target)\n rotated = source @ Q\n return (rotated * source_col_rms[None, :]) + source_mean\n\n\n@tensor_arguments\ndef scale_delta(delta, d_nat):\n # scale delta so RMS(delta) == d_nat\n N = delta.nelement()\n rms = torch.sqrt(1 / N * torch.sum(delta ** 2))\n return delta * d_nat / rms\n\n\nclass LinearOperator(object):\n def __init__(self, matvec, device):\n self._matvec = matvec\n self.device = device\n\n def matvec(self, vecs):\n return self._matvec(vecs)\n\n\ndef make_hvp(f, edges, X, constraint):\n X_shape = X.shape\n\n def avg_dist_flat(X_flat):\n X_reshaped = X_flat.view(X_shape)\n if constraint is not None:\n # a noop in the forward pass, but projects the gradient onto\n # the tangent space of the constraint in the backward pass\n X_reshaped = _project_gradient(X_reshaped, constraint)\n # using custom average distortion yields a zero for hvp, since\n # gradient graph is disconnected\n differences = X_reshaped[edges[:, 0]] - X_reshaped[edges[:, 1]]\n norms = differences.pow(2).sum(dim=1).sqrt()\n return f(norms).mean()\n\n X_flat = X.view(-1).detach()\n\n def hvp(vecs):\n vecs = torch.split(vecs, 1, dim=1)\n products = []\n for v in vecs:\n _, product = torch.autograd.functional.vhp(\n avg_dist_flat, X_flat, v.squeeze()\n )\n products.append(product)\n return torch.stack(products, dim=1)\n\n return hvp\n\n\ndef hutchpp(linear_operator, dimension, n_queries):\n A = linear_operator\n d = dimension\n m = n_queries\n S = torch.randn(d, m // 3, device=A.device)\n G = torch.randn(d, m // 3, device=A.device)\n Q, _ = torch.qr(A.matvec(S))\n proj = G - Q @ (Q.T @ G)\n return torch.trace(Q.T @ A.matvec(Q)) + (3.0 / m) * torch.trace(\n proj.T @ A.matvec(proj)\n )\n\n\ndef random_edges(n, p, seed=0):\n randomstate = np.random.default_rng(seed)\n edge_idx = randomstate.choice(\n int(n * (n - 1) / 2), p, replace=False, shuffle=False\n )\n u = (\n n\n - 2\n - np.floor(np.sqrt(-8 * edge_idx + 4 * n * (n - 1) - 7) / 2.0 - 0.5)\n )\n v = edge_idx + u + 1 - n * (n - 1) / 2 + (n - u) * ((n - u) - 1) / 2\n return torch.tensor(np.stack([u, v], axis=1).astype(np.int64))\n\n\nclass Distortion(torch.autograd.Function):\n \"\"\"Manual implementation of the average distortion gradient, for testing\"\"\"\n\n @staticmethod\n def forward(ctx, X, f, A, lhs, rhs):\n distances = A.T @ X\n norms = distances.norm(dim=1)\n\n with torch.enable_grad():\n X.requires_grad_(False)\n norms.requires_grad_(True)\n norms.grad = None\n distortion = f(norms).mean()\n distortion.backward()\n g = norms.grad / norms\n X.requires_grad_(True)\n D = g.diag()\n\n grad_E = A @ (D @ (A.T @ X))\n ctx.grad_E = grad_E\n return distortion\n\n def backward(ctx, grad_output):\n return ctx.grad_E * grad_output, None, None, None, None\n\n\n_distortion = Distortion.apply\n" ]
[ [ "torch.svd", "numpy.sqrt", "torch.zeros", "torch.sin", "torch.sum", "torch.split", "torch.device", "torch.isclose", "numpy.random.default_rng", "torch.sqrt", "torch.randn", "torch.eye", "numpy.stack", "torch.tensor", "torch.triu_indices", "torch.cos", "torch.enable_grad", "torch.cuda.current_device", "torch.deg2rad", "torch.stack", "torch.matmul" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zjzh/Paddle
[ "fdab43b56692c93a5a732108cca66638796ed66f" ]
[ "python/paddle/distributed/auto_parallel/planner.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport time\nimport random\nimport logging\nfrom functools import reduce\nfrom itertools import chain, product\nfrom collections import OrderedDict\n\nimport numpy as np\n\nimport paddle\nimport paddle.distributed.auto_parallel as auto\nfrom .cost_model import estimate_cost\nfrom .dist_op import DistributedOperator\nfrom .process_group import _g_process_group_map\nfrom .process_group import ProcessGroup, get_process_group\nfrom .operators.common import is_elementwise_op\nfrom .operators.common import get_distributed_operator_impl_container\nfrom .utils import update_op_dims_mapping_by_default_dist_impl\nfrom .utils import update_op_dims_mapping_by_elementwise_like_dist_impl\nfrom .utils import get_all_distributed_main_program\nfrom .dist_context import DistributedContext, DistributedOperatorContext\nfrom .dist_attribute import OperatorDistributedAttribute, TensorDistributedAttribute\n\npaddle.enable_static()\npaddle.seed(123)\nrandom.seed(123)\nnp.random.seed(123)\n\n\nclass PlanFilter:\n @staticmethod\n def check_dims_mapping_for_tensor(process_mesh_topology, tensor_shape,\n dims_mapping):\n valid = True\n assert len(tensor_shape) == len(dims_mapping)\n\n for idx, dim_mapping in enumerate(dims_mapping):\n if dim_mapping != -1:\n if tensor_shape[idx] % process_mesh_topology[\n dim_mapping] != 0 or dims_mapping.count(\n dim_mapping) > 1:\n valid = False\n if dim_mapping != -1 and process_mesh_topology[0] == 1:\n valid = False\n\n return valid\n\n @staticmethod\n def check_dims_mapping_for_op(op, op_dist_attr, vars):\n process_mesh = op_dist_attr.process_mesh\n assert process_mesh is not None, \"The process mesh should not be None.\"\n for var_name in op.input_arg_names:\n dims_mapping = op_dist_attr.get_input_dims_mapping(var_name)\n if not PlanFilter.check_dims_mapping_for_tensor(\n process_mesh.topology, vars[var_name].shape, dims_mapping):\n return False\n if vars[var_name].is_data and len(dims_mapping) > 1:\n for dim in dims_mapping[1:]:\n if dim != -1:\n return False\n\n for var_name in op.output_arg_names:\n dims_mapping = op_dist_attr.get_output_dims_mapping(var_name)\n if not PlanFilter.check_dims_mapping_for_tensor(\n process_mesh.topology, vars[var_name].shape, dims_mapping):\n return False\n\n return True\n\n @staticmethod\n def check_dims_mapping_for_special_op(op, op_dist_attr, vars):\n if op.type == \"layer_norm\":\n bias_dims_mapping = op_dist_attr.get_input_dims_mapping(\n op.input(\"Bias\")[0])\n scale_dims_mapping = op_dist_attr.get_input_dims_mapping(\n op.input(\"Scale\")[0])\n x_dims_mapping = op_dist_attr.get_input_dims_mapping(\n op.input(\"X\")[0])\n mean_dims_mapping = op_dist_attr.get_output_dims_mapping(\n op.output(\"Mean\")[0])\n variance_dims_mapping = op_dist_attr.get_output_dims_mapping(\n op.output(\"Variance\")[0])\n y_dims_mapping = op_dist_attr.get_output_dims_mapping(\n op.output(\"Y\")[0])\n if x_dims_mapping != y_dims_mapping:\n return False\n\n if scale_dims_mapping[0] != x_dims_mapping[-1]:\n return False\n\n if bias_dims_mapping[0] != y_dims_mapping[-1]:\n return False\n\n if mean_dims_mapping[0] != x_dims_mapping[0]:\n return False\n\n if variance_dims_mapping[0] != x_dims_mapping[0]:\n return False\n\n return True\n\n\nclass PlanSpace:\n not_enum_ops = [\"create_py_reader\", \"create_double_buffer_reader\", \"read\"]\n special_vars = [\n \"lod_tensor_blocking_queue_0\", \"create_py_reader_0\", \"double_buffer_0\"\n ]\n\n @staticmethod\n def _enum_dims_mapping(process_mesh_topology, visited, path, depth, res,\n tensor_shape):\n \"\"\"Enumerate dims mapping of tensor by the given process_mesh_topology\"\"\"\n nums = list(range(-1, len(process_mesh_topology)))\n if depth == len(tensor_shape):\n valid = True\n for idx, item in enumerate(path):\n if item != -1:\n if tensor_shape[idx] % process_mesh_topology[\n item] != 0 or path.count(item) > 1:\n valid = False\n if valid:\n res.append(copy.deepcopy(path))\n return\n\n for i in range(len(nums)):\n if not visited[i]:\n if i != 0:\n visited[i] = True\n path.append(nums[i])\n PlanSpace._enum_dims_mapping(process_mesh_topology, visited,\n path, depth + 1, res, tensor_shape)\n visited[i] = False\n path.pop()\n\n @staticmethod\n def enum_process_mesh_topology(processes):\n \"\"\"Enumerate all process meshes with the given processes.\"\"\"\n assert processes >= 1, \"The processes must be number and greater than 0.\"\n # compute divisors\n divisors = []\n for i in range(1, processes + 1):\n if processes % i == 0:\n divisors.append(i)\n\n # compute valid process mesh\n results = []\n for i in range(len(divisors) - 1, 0, -1):\n result = []\n result.append(divisors[i])\n if i == len(divisors) - 1:\n results.append(copy.deepcopy(result))\n continue\n\n j = 1\n while j < len(divisors):\n if len(result) == 1:\n result.append(divisors[j])\n elif len(result) == 2:\n if processes % (result[0] * result[1]) == 0:\n if processes // (result[0] * result[1]) == 1:\n results.append(copy.deepcopy(result))\n break\n else:\n result.append(processes // (result[0] * result[1]))\n results.append(copy.deepcopy(result))\n result.pop(-1)\n result.pop(-1)\n j += 1\n else:\n if result[0] * result[1] < processes:\n result.pop(-1)\n j += 1\n else:\n break\n return results\n\n @staticmethod\n def _enum_valid_dist_attr_for_op(program, op, process_mesh):\n \"\"\"Enumerate the valid distributed attribute for op based on the given process mesh.\"\"\"\n vars = program.global_block().vars\n dims_mapping_dict = OrderedDict()\n op_valid_dist_attrs = []\n dist_op_impl_container = get_distributed_operator_impl_container(\n op.type)\n\n # enumerate all valid dims mapping of tensor when process mesh given\n for var_name in chain(op.input_arg_names, op.output_arg_names):\n visited = [\n False\n for _ in range(\n len(list(range(-1, len(process_mesh.topology)))))\n ]\n depth = 0\n path = []\n dims_mapping_list = []\n PlanSpace._enum_dims_mapping(process_mesh.topology, visited, path,\n depth, dims_mapping_list,\n vars[var_name].shape)\n dims_mapping_dict[var_name] = copy.deepcopy(dims_mapping_list)\n\n # compose dims mapping\n composed_dims_mapping_list = list(\n product(\n *[dims_mapping_dict[key] for key in dims_mapping_dict.keys()]))\n for composed_dims_mapping in composed_dims_mapping_list:\n op_dist_attr = OperatorDistributedAttribute()\n op_dist_attr.process_mesh = process_mesh\n var_names = list(dims_mapping_dict.keys())\n\n for idx, dims_mapping in enumerate(composed_dims_mapping):\n if var_names[idx] in op.input_arg_names:\n op_dist_attr.set_input_dims_mapping(var_names[idx],\n dims_mapping)\n elif var_names[idx] in op.output_arg_names:\n op_dist_attr.set_output_dims_mapping(var_names[idx],\n dims_mapping)\n else:\n raise ValueError(\n \"The {varname} is not input or output of op {op}.\".\n format(\n varname='var_names[idx]', op='op'))\n\n dist_op = DistributedOperator(op, op_dist_attr)\n if dist_op_impl_container is None:\n if is_elementwise_op(op.type):\n changed = True\n valid = True\n try:\n changed = update_op_dims_mapping_by_elementwise_like_dist_impl(\n dist_op)\n except Exception as e:\n valid = False\n if valid and not changed:\n if PlanFilter.check_dims_mapping_for_op(\n op, dist_op.dist_attr, vars\n ) and PlanFilter.check_dims_mapping_for_special_op(\n op, dist_op.dist_attr, vars):\n dist_op.dist_attr.impl_type = \"elementwise\"\n dist_op.dist_attr.impl_idx = 0\n op_valid_dist_attrs.append(dist_op.dist_attr)\n continue\n else:\n changed = True\n valid = True\n try:\n changed = update_op_dims_mapping_by_default_dist_impl(\n dist_op)\n except Exception as e:\n valid = False\n if valid and not changed:\n if PlanFilter.check_dims_mapping_for_op(\n op, dist_op.dist_attr, vars\n ) and PlanFilter.check_dims_mapping_for_special_op(\n op, dist_op.dist_attr, vars):\n dist_op.dist_attr.impl_type = \"default\"\n dist_op.dist_attr.impl_idx = 0\n op_valid_dist_attrs.append(dist_op.dist_attr)\n continue\n\n # if op has distributed implements, find all valid dist attr of this op\n impls = dist_op_impl_container.impls\n for idx, impl in enumerate(impls):\n if impl.is_auto_compatible(dist_op):\n if PlanFilter.check_dims_mapping_for_op(\n op, dist_op.dist_attr, vars):\n dist_op.dist_attr.impl_type = dist_op.serial_op.type\n dist_op.dist_attr.impl_idx = idx\n op_valid_dist_attrs.append(dist_op.dist_attr)\n\n # set default dist attr for some special ops whose distributed attributes can not be enumerated\n if not op_valid_dist_attrs:\n op_dist_attr = OperatorDistributedAttribute()\n op_dist_attr.process_mesh = process_mesh\n dist_op = DistributedOperator(op, op_dist_attr)\n for var_name in op.input_arg_names:\n op_dist_attr.set_input_dims_mapping(\n vars[var_name], [-1 for i in vars[var_name].shape])\n for var_name in op.output_arg_names:\n op_dist_attr.set_output_dims_mapping(\n vars[var_name], [-1 for i in vars[var_name].shape])\n dist_op.dist_attr.impl_type = \"default\"\n dist_op.dist_attr.impl_idx = 0\n op_valid_dist_attrs.append(dist_op.dist_attr)\n\n return op_valid_dist_attrs\n\n @staticmethod\n def enum_valid_dist_attr_for_program(program,\n process_mesh_topology,\n is_pipeline=False):\n \"\"\"Enumerate valid distributed attributes for all ops in program.\"\"\"\n valid_dist_attr_dict = OrderedDict()\n ops = program.global_block().ops\n vars = program.global_block().vars\n\n processes = reduce(lambda x, y: x * y, process_mesh_topology)\n global_group = [i for i in range(processes)]\n global_process_mesh = None\n pipeline_process_meshes = None\n\n # in the pipeline mode, there are some process meshes\n if is_pipeline:\n pipeline_stages = process_mesh_topology[-1]\n op_count_per_stage = len(ops) // pipeline_stages\n if len(process_mesh_topology) > 1:\n process_mesh_shape = process_mesh_topology[:-1]\n per_process_mesh_group = processes // pipeline_stages\n pipeline_process_meshes = [auto.ProcessMesh(mesh=np.array(global_group[i*per_process_mesh_group: \\\n (i+1)*per_process_mesh_group]).reshape(process_mesh_shape).tolist()) for i in range(pipeline_stages)]\n elif len(process_mesh_topology) == 1:\n pipeline_process_meshes = [\n auto.ProcessMesh(mesh=[i]) for i in range(pipeline_stages)\n ]\n else:\n if len(process_mesh_topology) > 1:\n global_process_mesh = auto.ProcessMesh(mesh=np.array(\n global_group).reshape(process_mesh_topology).tolist())\n else:\n global_process_mesh = auto.ProcessMesh(mesh=global_group)\n\n # enumerate valid distributed attribute for each op in the program\n for idx, op in enumerate(ops):\n op_valid_dist_attrs = None\n op_process_mesh = global_process_mesh\n pipeline_stage = -1\n if pipeline_process_meshes is not None:\n pipeline_stage = idx // op_count_per_stage if idx // op_count_per_stage < len(\n pipeline_process_meshes) else idx // op_count_per_stage - 1\n if pipeline_stage >= len(pipeline_process_meshes):\n pipeline_stage = len(pipeline_process_meshes) - 1\n op_process_mesh = pipeline_process_meshes[pipeline_stage]\n\n if op.type in PlanSpace.not_enum_ops:\n op_dist_attr = OperatorDistributedAttribute()\n op_dist_attr.process_mesh = op_process_mesh\n for var_name in op.input_arg_names:\n if var_name in PlanSpace.special_vars:\n op_dist_attr.set_input_dims_mapping(var_name, [])\n else:\n dims_mapping = [-1 for i in vars[var_name].shape]\n op_dist_attr.set_input_dims_mapping(var_name,\n dims_mapping)\n\n for var_name in op.output_arg_names:\n if var_name in PlanSpace.special_vars:\n op_dist_attr.set_output_dims_mapping(var_name, [])\n else:\n dims_mapping = [-1 for i in vars[var_name].shape]\n op_dist_attr.set_output_dims_mapping(var_name,\n dims_mapping)\n op_valid_dist_attrs = [op_dist_attr]\n pipeline_stage = 0 if pipeline_stage != -1 else pipeline_stage\n else:\n op_valid_dist_attrs = PlanSpace._enum_valid_dist_attr_for_op(\n program, op, op_process_mesh)\n\n assert op_valid_dist_attrs is not None, \"Enumerate {} valid distributed attribute failed.\".format(\n op)\n valid_dist_attr_dict[op.desc.id(\n )] = [op_valid_dist_attrs, pipeline_stage]\n\n return valid_dist_attr_dict, pipeline_process_meshes, global_process_mesh\n\n\nclass SearchAlgorithm:\n def __init__(self, name):\n self._name = name\n\n @property\n def name(self):\n self.name = name\n\n def search(self):\n raise NotImplementedError(\"Please Implement this method in subclass.\")\n\n\nclass MCMC(SearchAlgorithm):\n def __init__(self, serial_program_info, parallelizer, max_search_times=5):\n super(MCMC, self).__init__(\"mcmc\")\n self._serial_program_info = serial_program_info\n self._max_search_times = max_search_times\n self._parallelizer = parallelizer\n\n @property\n def serial_program_info(self):\n return self._serial_program_info\n\n @property\n def parallelizer(self):\n return self._parallelizer\n\n @property\n def max_search_times(self):\n return self._max_search_times\n\n def make_special_op_unshard(self, op, ops, vars, dist_context,\n valid_dist_attr_dict):\n if op.type == \"softmax_with_cross_entropy\":\n for var_name in op.input_arg_names:\n dims_mapping = dist_context.get_op_dist_attr_for_program(\n op).get_input_dims_mapping(var_name)\n if dims_mapping != dist_context.get_tensor_dist_attr_for_program(\n vars[var_name]).dims_mapping:\n has_changed = False\n for search_op in ops:\n if var_name in search_op.output_arg_names:\n op_dist_attr_list = valid_dist_attr_dict[\n search_op.desc.id()][0]\n for op_dist_attr in op_dist_attr_list:\n if op_dist_attr.get_output_dims_mapping(\n var_name) == dims_mapping:\n dist_context.set_op_dist_attr_for_program(\n search_op, op_dist_attr)\n tensor_dist_attr = TensorDistributedAttribute(\n )\n tensor_dist_attr.process_mesh = op_dist_attr.process_mesh\n tensor_dist_attr.dims_mapping = op_dist_attr.get_output_dims_mapping(\n var_name)\n dist_context.set_tensor_dist_attr_for_program(\n vars[var_name], tensor_dist_attr)\n has_changed = True\n break\n if has_changed:\n break\n if not has_changed:\n raise ValueError(\n \"Change softmax_with_cross_entropy dist attr failed\")\n\n def init_program(self, valid_dist_attr_dict, program,\n pipeline_process_meshes, global_process_mesh):\n ops = program.global_block().ops\n vars = program.global_block().vars\n new_dist_context = DistributedContext()\n\n for op in ops:\n op_valid_dist_attr_list = valid_dist_attr_dict[op.desc.id()][0]\n random_op_dist_attr = np.random.randint(\n len(op_valid_dist_attr_list))\n init_op_dist_attr = op_valid_dist_attr_list[random_op_dist_attr]\n new_dist_context.set_op_dist_attr_for_program(op, init_op_dist_attr)\n for var_name in op.input_arg_names:\n if var_name == \"lod_tensor_blocking_queue_0\":\n continue\n if new_dist_context.get_tensor_dist_attr_for_program(vars[\n var_name]) is None:\n tensor_dist_attr = TensorDistributedAttribute()\n tensor_dist_attr.process_mesh = init_op_dist_attr.process_mesh\n tensor_dist_attr.dims_mapping = init_op_dist_attr.get_input_dims_mapping(\n var_name)\n new_dist_context.set_tensor_dist_attr_for_program(\n vars[var_name], tensor_dist_attr)\n\n for var_name in op.output_arg_names:\n tensor_dist_attr = TensorDistributedAttribute()\n tensor_dist_attr.process_mesh = init_op_dist_attr.process_mesh\n tensor_dist_attr.dims_mapping = init_op_dist_attr.get_output_dims_mapping(\n var_name)\n new_dist_context.set_tensor_dist_attr_for_program(\n vars[var_name], tensor_dist_attr)\n\n # NOTE: this is a temporary solution to make softmax_with_cross_entropy unshard\n self.make_special_op_unshard(op, ops, vars, new_dist_context,\n valid_dist_attr_dict)\n\n # add process meshes to distributed context\n if global_process_mesh is not None:\n new_dist_context.add_process_mesh(global_process_mesh)\n elif pipeline_process_meshes is not None:\n for process_mesh in pipeline_process_meshes:\n new_dist_context.add_process_mesh(process_mesh)\n\n return new_dist_context\n\n def estimate_searched_strategy_cost(self,\n dist_context,\n pipeline_process_meshes=None):\n cost = None\n # get all distributed programs\n all_dist_main_program = get_all_distributed_main_program(\n self.serial_program_info, dist_context, self.parallelizer)\n pipeline_config = [\n process_mesh.processes for process_mesh in pipeline_process_meshes\n ] if pipeline_process_meshes is not None else None\n microbatch_size = 1\n for program in all_dist_main_program:\n searched_batch_size = False\n for var in program.list_vars():\n if var.is_data and \"@RESHARD\" in var.name:\n microbatch_size = var.shape[0]\n searched_batch_size = True\n break\n if searched_batch_size:\n break\n\n from .utils import get_standalone_cost_data\n standalone_cost_data = get_standalone_cost_data(all_dist_main_program)\n\n # cost model does not support cluster argument\n cost = estimate_cost(\n all_dist_main_program,\n cluster=None,\n pipeline_config=pipeline_config,\n standalone_cost_data=standalone_cost_data,\n batch_size=microbatch_size)\n\n return cost\n\n def set_tensor_dist_attr(self, op, op_dist_attr, vars, dist_context):\n # set output tensor distributed attribute\n for var_name in op.output_arg_names:\n process_mesh = op_dist_attr.process_mesh\n tensor_dist_attr = TensorDistributedAttribute()\n tensor_dist_attr.process_mesh = process_mesh\n tensor_dist_attr.dims_mapping = op_dist_attr.get_output_dims_mapping(\n var_name)\n dist_context.set_tensor_dist_attr_for_program(vars[var_name],\n tensor_dist_attr)\n\n # set input tensor distributed attribute if input is data or parameter\n for var_name in op.input_arg_names:\n if vars[var_name].is_parameter or vars[var_name].is_data:\n process_mesh = op_dist_attr.process_mesh\n tensor_dist_attr = TensorDistributedAttribute()\n tensor_dist_attr.process_mesh = process_mesh\n tensor_dist_attr.dims_mapping = op_dist_attr.get_input_dims_mapping(\n var_name)\n dist_context.set_tensor_dist_attr_for_program(vars[var_name],\n tensor_dist_attr)\n\n def change_process_mesh(self, op, changed_process_mesh, vars, dist_context):\n dist_context.get_op_dist_attr_for_program(\n op).process_mesh = changed_process_mesh\n for var_name in op.output_arg_names:\n dist_context.get_tensor_dist_attr_for_program(vars[\n var_name]).process_mesh = changed_process_mesh\n for var_name in op.input_arg_names:\n if vars[var_name].is_parameter or vars[var_name].is_data:\n dist_context.get_tensor_dist_attr_for_program(vars[\n var_name]).process_mesh = changed_process_mesh\n\n def search_once(self,\n program,\n valid_dist_attr_dict,\n dist_context,\n pipeline_process_meshes=None):\n raw_ops = program.global_block().ops\n ops = []\n for op in raw_ops:\n if op.type not in PlanSpace.not_enum_ops:\n ops.append(op)\n assert ops, \"The ops of program have no distributed attributes.\"\n vars = program.global_block().vars\n new_dist_context = copy.deepcopy(dist_context)\n new_dist_context._dist_op_context = DistributedOperatorContext()\n new_valid_dist_attr_dict = None\n random_selected_op_idx = np.random.randint(len(ops))\n selected_op = ops[random_selected_op_idx]\n op_valid_dist_attr_list = valid_dist_attr_dict[selected_op.desc.id()][0]\n pipeline_stage = valid_dist_attr_dict[selected_op.desc.id()][1]\n random_selected_dist_attr_idx = np.random.randint(\n len(op_valid_dist_attr_list))\n selected_op_dist_attr = copy.deepcopy(op_valid_dist_attr_list[\n random_selected_dist_attr_idx])\n\n start_idx = ops[0].desc.id()\n if pipeline_stage > -1:\n # in pipeline mode, the above phase just select a dims mapping\n # 0 represents not changed, 1 represents to be the same with before stage, 2 represents to be the same with the latter stage\n new_valid_dist_attr_dict = copy.deepcopy(valid_dist_attr_dict)\n changed_mode = np.random.randint(3)\n if changed_mode == 0:\n # not change the process mesh, just change dims mapping\n new_dist_context.set_op_dist_attr_for_program(\n selected_op, selected_op_dist_attr)\n self.set_tensor_dist_attr(selected_op, selected_op_dist_attr,\n vars, new_dist_context)\n\n elif changed_mode == 1:\n changed_stage = pipeline_stage - 1\n if changed_stage == -1 or random_selected_op_idx == len(ops) - 1 or \\\n (random_selected_op_idx + 1 == len(ops) - 1 and new_valid_dist_attr_dict[ops[random_selected_op_idx + 1].desc.id()][1] == pipeline_stage + 1 ):\n new_dist_context.set_op_dist_attr_for_program(\n selected_op, selected_op_dist_attr)\n self.set_tensor_dist_attr(selected_op,\n selected_op_dist_attr, vars,\n new_dist_context)\n\n else:\n selected_op_process_mesh = pipeline_process_meshes[\n pipeline_stage]\n next_op_id = ops[random_selected_op_idx + 1].desc.id()\n if new_valid_dist_attr_dict[next_op_id][\n 1] == pipeline_stage + 1 and random_selected_op_idx + 1 != len(\n ops) - 1:\n new_valid_dist_attr_dict[next_op_id][1] = pipeline_stage\n for op_dist_attr in new_valid_dist_attr_dict[\n next_op_id][0]:\n op_dist_attr.process_mesh = selected_op_process_mesh\n # set next op dist attr in the discontext and output/input tensor process mesh\n self.change_process_mesh(\n ops[random_selected_op_idx + 1],\n selected_op_process_mesh, vars, new_dist_context)\n\n # change the selected op stage and output dist attr\n new_valid_dist_attr_dict[selected_op.desc.id()][\n 1] = changed_stage\n new_process_mesh = pipeline_process_meshes[changed_stage]\n selected_op_dist_attr.process_mesh = new_process_mesh\n for op_dist_attr in new_valid_dist_attr_dict[\n selected_op.desc.id()][0]:\n op_dist_attr.process_mesh = new_process_mesh\n new_dist_context.set_op_dist_attr_for_program(\n selected_op, selected_op_dist_attr)\n\n self.set_tensor_dist_attr(selected_op,\n selected_op_dist_attr, vars,\n new_dist_context)\n\n # change the pre op stage\n for idx in range(random_selected_op_idx - 1, -1, -1):\n stage = new_valid_dist_attr_dict[ops[idx].desc.id()][1]\n valid_dist_attr_list = new_valid_dist_attr_dict[ops[\n idx].desc.id()][0]\n new_process_mesh = pipeline_process_meshes[\n changed_stage]\n if stage == changed_stage + 1:\n new_valid_dist_attr_dict[ops[idx].desc.id()][\n 1] = changed_stage\n for op_dist_attr in valid_dist_attr_list:\n op_dist_attr.process_mesh = new_process_mesh\n new_dist_context.get_op_dist_attr_for_program(ops[\n idx]).process_mesh = new_process_mesh\n # change process mesh of the output and input tensor\n self.change_process_mesh(ops[idx], new_process_mesh,\n vars, new_dist_context)\n else:\n break\n\n else:\n changed_stage = pipeline_stage + 1\n if changed_stage == len(\n pipeline_process_meshes) or random_selected_op_idx == 0 or \\\n (new_valid_dist_attr_dict[ops[random_selected_op_idx - 1].desc.id()][1] == pipeline_stage - 1 and (random_selected_op_idx == 1)):\n new_dist_context.set_op_dist_attr_for_program(\n selected_op, selected_op_dist_attr)\n self.set_tensor_dist_attr(selected_op,\n selected_op_dist_attr, vars,\n new_dist_context)\n\n else:\n selected_op_process_mesh = pipeline_process_meshes[\n pipeline_stage]\n pre_op_id = ops[random_selected_op_idx - 1].desc.id()\n if new_valid_dist_attr_dict[pre_op_id][\n 1] == pipeline_stage - 1 and random_selected_op_idx != 1:\n new_valid_dist_attr_dict[pre_op_id][1] = pipeline_stage\n for op_dist_attr in new_valid_dist_attr_dict[pre_op_id][\n 0]:\n op_dist_attr.process_mesh = selected_op_process_mesh\n # set pre op dist attr in the discontext and output tensor process mesh\n self.change_process_mesh(\n ops[random_selected_op_idx - 1],\n selected_op_process_mesh, vars, new_dist_context)\n\n # change the selected op stage and output tensor dist attr\n new_valid_dist_attr_dict[selected_op.desc.id()][\n 1] = changed_stage\n new_process_mesh = pipeline_process_meshes[changed_stage]\n selected_op_dist_attr.process_mesh = new_process_mesh\n for op_dist_attr in new_valid_dist_attr_dict[\n selected_op.desc.id()][0]:\n op_dist_attr.process_mesh = new_process_mesh\n new_dist_context.set_op_dist_attr_for_program(\n selected_op, selected_op_dist_attr)\n self.set_tensor_dist_attr(selected_op,\n selected_op_dist_attr, vars,\n new_dist_context)\n\n # change the next op stage\n for idx in range(random_selected_op_idx + 1, len(ops)):\n stage = new_valid_dist_attr_dict[ops[idx].desc.id()][1]\n valid_dist_attr_list = new_valid_dist_attr_dict[ops[\n idx].desc.id()][0]\n new_process_mesh = pipeline_process_meshes[\n changed_stage]\n if stage == changed_stage - 1:\n new_valid_dist_attr_dict[ops[idx].desc.id()][\n 1] = changed_stage\n for op_dist_attr in valid_dist_attr_list:\n op_dist_attr.process_mesh = new_process_mesh\n\n new_dist_context.get_op_dist_attr_for_program(ops[\n idx]).process_mesh = new_process_mesh\n # change the output tensor dist attr\n self.change_process_mesh(ops[idx], new_process_mesh,\n vars, new_dist_context)\n else:\n break\n else:\n new_dist_context.set_op_dist_attr_for_program(selected_op,\n selected_op_dist_attr)\n self.set_tensor_dist_attr(selected_op, selected_op_dist_attr, vars,\n new_dist_context)\n\n for op in ops:\n # make softmax_with_cross_entropy unshard\n if op.type == \"softmax_with_cross_entropy\":\n self.make_special_op_unshard(op, ops, vars, new_dist_context,\n valid_dist_attr_dict)\n break\n\n if new_valid_dist_attr_dict is None:\n return valid_dist_attr_dict, new_dist_context\n else:\n return new_valid_dist_attr_dict, new_dist_context\n\n def _search_core(self,\n valid_dist_attr_dict,\n init_dist_context,\n pipeline_process_meshes=None):\n times = 0\n best_dist_context = init_dist_context\n cost = self.estimate_searched_strategy_cost(\n init_dist_context, pipeline_process_meshes).runtime\n min_cost = cost\n while times < self.max_search_times:\n times += 1\n new_dist_context = self.search_once(\n self.serial_program_info.train_program, valid_dist_attr_dict,\n best_dist_context, pipeline_process_meshes)[1]\n cur_cost = self.estimate_searched_strategy_cost(\n new_dist_context, pipeline_process_meshes).runtime\n if (min_cost - cur_cost) > 0:\n best_dist_context = copy.deepcopy(new_dist_context)\n min_cost = cur_cost\n times = 0\n return best_dist_context, min_cost\n\n def search(self):\n logging.info(\"Start MCMC searching.\")\n start_time = time.time()\n train_program = self.serial_program_info.train_program\n cluster = self.serial_program_info.cluster\n processes = paddle.distributed.get_world_size(\n ) if cluster is None else len(cluster.get_all_devices(\"GPU\"))\n assert processes > 0, \"Get process failed.\"\n\n process_mesh_topology_list = PlanSpace.enum_process_mesh_topology(\n processes)\n searched_dist_context = None\n min_cost = None\n\n searched_pipeline_dist_context = None\n pipeline_min_cost = None\n for process_mesh_topology in process_mesh_topology_list:\n logging.info(\n \"MCMC search: search process mesh {} with pipeline mode.\".\n format(process_mesh_topology))\n valid_dist_attr_dict, pipeline_process_meshes, global_process_mesh = PlanSpace.enum_valid_dist_attr_for_program(\n train_program, process_mesh_topology, True)\n init_dist_context = self.init_program(\n valid_dist_attr_dict, train_program, pipeline_process_meshes,\n global_process_mesh)\n best_dist_context, cost = self._search_core(valid_dist_attr_dict,\n init_dist_context,\n pipeline_process_meshes)\n logging.info(\n \"MCMC search: the min cost is {} in the process mesh {} with pipeline mode.\".\n format(cost, process_mesh_topology))\n best_dist_context._dist_op_context = DistributedOperatorContext()\n pipeline_min_cost = cost if pipeline_min_cost is None else pipeline_min_cost\n searched_pipeline_dist_context = best_dist_context if searched_pipeline_dist_context is None else searched_pipeline_dist_context\n if pipeline_min_cost > cost:\n searched_pipeline_dist_context = best_dist_context\n pipeline_min_cost = cost\n\n searched_non_pipeline_dist_context = None\n non_pipeline_min_cost = None\n for process_mesh_topology in process_mesh_topology_list:\n # if process_mesh_topology shape is 3, include pipeline mode by default\n if len(process_mesh_topology) == 3:\n continue\n logging.info(\n \"MCMC search: search process mesh {} without pipeline mode.\".\n format(process_mesh_topology))\n valid_dist_attr_dict, pipeline_process_meshes, global_process_mesh = PlanSpace.enum_valid_dist_attr_for_program(\n train_program, process_mesh_topology, False)\n init_dist_context = self.init_program(\n valid_dist_attr_dict, train_program, pipeline_process_meshes,\n global_process_mesh)\n best_dist_context, cost = self._search_core(valid_dist_attr_dict,\n init_dist_context,\n pipeline_process_meshes)\n logging.info(\n \"MCMC search: the min cost is {} in the process mesh {} without pipeline mode.\".\n format(cost, process_mesh_topology))\n best_dist_context._dist_op_context = DistributedOperatorContext()\n non_pipeline_min_cost = cost if non_pipeline_min_cost is None else non_pipeline_min_cost\n searched_non_pipeline_dist_context = best_dist_context if searched_non_pipeline_dist_context is None else searched_non_pipeline_dist_context\n if non_pipeline_min_cost > cost:\n searched_non_pipeline_dist_context = best_dist_context\n non_pipeline_min_cost = cost\n\n if non_pipeline_min_cost > pipeline_min_cost:\n searched_dist_context = searched_pipeline_dist_context\n min_cost = pipeline_min_cost\n logging.info(\n \"Better set FLAGS_benchmark=1 to avoid hang problem in the pipeline mode.\"\n )\n else:\n searched_dist_context = searched_non_pipeline_dist_context\n min_cost = non_pipeline_min_cost\n\n # rebuild g_process_group\n pg0 = get_process_group(0)\n for process_mesh in searched_dist_context._process_meshes:\n pg0.add_ranks(process_mesh.processes)\n end_time = time.time()\n logging.info(\n \"End MCMC searching: the min cost is {} and the search time is {}s.\".\n format(min_cost, end_time - start_time))\n return searched_dist_context, min_cost\n\n\nclass Planner:\n def __init__(self, serial_program_info, parallelizer,\n algorithm_config=None):\n self._serial_program_info = serial_program_info\n self._parallelizer = parallelizer\n self._algorithm_config = algorithm_config\n self._algorithm_searcher = self.create_algorithm_searcher(\n algorithm_config)\n\n @property\n def serial_program_info(self):\n return self._serial_program_info\n\n @property\n def algorithm_config(self):\n return self._algorithm_config\n\n @property\n def algorithm_searcher(self):\n return self._algorithm_searcher\n\n @property\n def parallelizer(self):\n return self._parallelizer\n\n def create_algorithm_searcher(self, algorithm_config):\n name = algorithm_config.get(\"name\", None)\n assert name is not None, \"Invalid algorithm config.\"\n\n algorithm_searcher = None\n if name == \"mcmc\":\n # NOTE: Only GPU clusters are supported now.\n max_search_times = algorithm_config.get(\"max_search_times\", None)\n algorithm_searcher = MCMC(\n self.serial_program_info, self.parallelizer,\n max_search_times) if max_search_times is not None else MCMC(\n self.serial_program_info, self.parallelizer)\n else:\n raise NotImplementedError(\n \"Other search algorithms have not been supported now.\")\n\n return algorithm_searcher\n\n def search(self):\n return self.algorithm_searcher.search()\n" ]
[ [ "numpy.array", "numpy.random.seed", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
maxim5/time_series_machine_learning
[ "819d0dc71078e2bd4d991c5759e52a3a973267b0", "819d0dc71078e2bd4d991c5759e52a3a973267b0" ]
[ "poloniex/api.py", "train/evaluator.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# See the full API here:\n# https://poloniex.com/support/api/\n\n__author__ = 'maxim'\n\n\nfrom six import string_types\nimport pandas as pd\n\nfrom util import *\n\n\nAVAILABLE_PERIODS = [300, 900, 1800, 7200, 14400, 86400]\n\n\ndef get_chart_data(pair, start_time, end_time, period):\n url = 'https://poloniex.com/public?command=returnChartData&currencyPair=%s&start=%d&end=%d&period=%d' % \\\n (pair, start_time, end_time, period_to_seconds(period))\n info('Fetching %s: %s' % (pair, url))\n df = pd.read_json(url, convert_dates=False)\n info('Fetched %s (%s)' % (pair, period_to_human(period)))\n return df\n\n\ndef get_24h_volume():\n url = 'https://poloniex.com/public?command=return24hVolume'\n info('Fetching %s' % url)\n return pd.read_json(url)\n\n\ndef period_to_human(period):\n if isinstance(period, string_types):\n return period\n if period == 300:\n return '5m'\n if period == 900:\n return '15m'\n if period == 1800:\n return '30m'\n if period == 7200:\n return '2h'\n if period == 14400:\n return '4h'\n if period == 86400:\n return 'day'\n return str(period)\n\n\ndef period_to_seconds(period):\n if isinstance(period, int):\n return period\n if period == '5m':\n return 300\n if period == '15m':\n return 900\n if period == '30m':\n return 1800\n if period == '2h':\n return 7200\n if period == '4h':\n return 14400\n if period == 'day':\n return 86400\n return int(period)\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'maxim'\n\n\nimport numpy as np\nimport pandas as pd\n\n\nclass Evaluator(object):\n def __init__(self, risk_factor=1.0):\n super(Evaluator, self).__init__()\n self._risk_factor = risk_factor\n\n def eval(self, model, test_set):\n prediction = model.predict(test_set.x)\n stats = self._compute_stats(prediction, test_set.y)\n return self._evaluate(stats), stats\n\n def stats_str(self, stats):\n return 'Mean absolute error: %.6f\\n' % stats['mae'] + \\\n 'SD absolute error: %.6f\\n' % stats['stdae'] + \\\n 'Sign accuracy: %.6f\\n' % stats['sign_accuracy'] + \\\n 'Mean squared error: %.6f\\n' % stats['mse'] + \\\n 'Sqrt of MSE: %.6f\\n' % stats['sqrt_mse'] + \\\n 'Mean error: %.6f\\n' % stats['me'] + \\\n 'Residuals stats: %s\\n' % _series_stats(stats['raw_residuals']) + \\\n 'Relative residuals: %s\\n' % _series_stats(stats['rel_residuals'])\n\n def _compute_stats(self, prediction, truth):\n residuals = np.abs(prediction - truth)\n return {\n 'mae': np.mean(residuals),\n 'stdae': np.std(residuals),\n 'sign_accuracy': np.mean(np.equal(np.sign(prediction), np.sign(truth))),\n 'mse': np.mean(np.power(prediction - truth, 2.0)),\n 'sqrt_mse': np.mean(np.power(prediction - truth, 2.0)) ** 0.5,\n 'me': np.mean(prediction - truth),\n 'raw_residuals': residuals,\n 'rel_residuals': residuals / np.maximum(np.abs(truth), 1e-3),\n }\n\n def _evaluate(self, stats):\n return stats['mae'] + self._risk_factor * stats['stdae']\n\n\ndef _series_stats(series):\n stats = pd.Series(series).describe(percentiles=[0.25, 0.5, 0.75, 0.9])\n return 'mean=%.4f std=%.4f percentile=[0%%=%.4f 25%%=%.4f 50%%=%.4f 75%%=%.4f 90%%=%.4f 100%%=%.4f]' % \\\n (stats['mean'], stats['std'], stats['min'], stats['25%'], stats['50%'], stats['75%'], stats['90%'], stats['max'])\n" ]
[ [ "pandas.read_json" ], [ "numpy.abs", "pandas.Series", "numpy.power", "numpy.sign", "numpy.std", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
mrsempress/OBMO_GUPNet
[ "4b03f372743a50c439aaedbb203c67394ab6a293" ]
[ "lib/datasets/kitti.py" ]
[ "import os\nimport numpy as np\nimport torch\nimport torch.utils.data as data\nimport torch.nn.functional as F\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nfrom lib.datasets.utils import angle2class\nfrom lib.datasets.utils import gaussian_radius\nfrom lib.datasets.utils import draw_umich_gaussian\nfrom lib.datasets.utils import get_angle_from_box3d,check_range\nfrom lib.datasets.kitti_utils import get_objects_from_label\nfrom lib.datasets.kitti_utils import Calibration\nfrom lib.datasets.kitti_utils import get_affine_transform\nfrom lib.datasets.kitti_utils import affine_transform\nfrom lib.datasets.kitti_utils import compute_box_3d\nimport pdb\n\nclass KITTI(data.Dataset):\n def __init__(self, root_dir, split, cfg):\n # basic configuration\n self.num_classes = 3\n self.max_objs = 50\n self.class_name = ['Pedestrian', 'Car', 'Cyclist']\n self.cls2id = {'Pedestrian': 0, 'Car': 1, 'Cyclist': 2}\n self.resolution = np.array([1280, 384]) # W * H\n self.use_3d_center = cfg['use_3d_center']\n self.writelist = cfg['writelist']\n if cfg['class_merging']:\n self.writelist.extend(['Van', 'Truck'])\n if cfg['use_dontcare']:\n self.writelist.extend(['DontCare'])\n ''' \n ['Car': np.array([3.88311640418,1.62856739989,1.52563191462]),\n 'Pedestrian': np.array([0.84422524,0.66068622,1.76255119]),\n 'Cyclist': np.array([1.76282397,0.59706367,1.73698127])] \n ''' \n ##l,w,h\n self.cls_mean_size = np.array([[1.76255119 ,0.66068622 , 0.84422524 ],\n [1.52563191462 ,1.62856739989, 3.88311640418],\n [1.73698127 ,0.59706367 , 1.76282397 ]]) \n \n # data split loading\n assert split in ['train', 'val', 'trainval', 'test']\n self.split = split\n split_dir = os.path.join(root_dir, 'KITTI', 'ImageSets', split + '.txt')\n self.idx_list = [x.strip() for x in open(split_dir).readlines()]\n\n # path configuration\n self.data_dir = os.path.join(root_dir, 'KITTI', 'testing' if split == 'test' else 'training')\n self.image_dir = os.path.join(self.data_dir, 'image_2')\n self.depth_dir = os.path.join(self.data_dir, 'depth')\n self.calib_dir = os.path.join(self.data_dir, 'calib')\n self.label_dir = os.path.join(self.data_dir, 'label_2')\n\n # data augmentation configuration\n self.data_augmentation = True if split in ['train', 'trainval'] else False\n self.random_flip = cfg['random_flip']\n self.random_crop = cfg['random_crop']\n self.scale = cfg['scale']\n self.shift = cfg['shift']\n\n # statistics\n self.mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)\n self.std = np.array([0.229, 0.224, 0.225], dtype=np.float32)\n\n # others\n self.downsample = 4\n \n def get_image(self, idx):\n img_file = os.path.join(self.image_dir, '%06d.png' % idx)\n assert os.path.exists(img_file)\n return Image.open(img_file) # (H, W, 3) RGB mode\n\n\n def get_label(self, idx):\n label_file = os.path.join(self.label_dir, '%06d.txt' % idx)\n assert os.path.exists(label_file)\n return get_objects_from_label(label_file)\n\n def get_calib(self, idx):\n calib_file = os.path.join(self.calib_dir, '%06d.txt' % idx)\n assert os.path.exists(calib_file)\n return Calibration(calib_file)\n\n\n def __len__(self):\n return self.idx_list.__len__()\n\n def __getitem__(self, item):\n # ============================ get inputs ===========================\n index = int(self.idx_list[item]) # index mapping, get real data id\n # image loading\n img = self.get_image(index)\n img_size = np.array(img.size)\n\n # data augmentation for image\n center = np.array(img_size) / 2\n crop_size = img_size\n random_crop_flag, random_flip_flag = False, False\n if self.data_augmentation:\n if np.random.random() < self.random_flip:\n random_flip_flag = True\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n\n if np.random.random() < self.random_crop:\n random_crop_flag = True\n crop_size = img_size * np.clip(np.random.randn()*self.scale + 1, 1 - self.scale, 1 + self.scale)\n center[0] += img_size[0] * np.clip(np.random.randn() * self.shift, -2 * self.shift, 2 * self.shift)\n center[1] += img_size[1] * np.clip(np.random.randn() * self.shift, -2 * self.shift, 2 * self.shift)\n\n # add affine transformation for 2d images.\n trans, trans_inv = get_affine_transform(center, crop_size, 0, self.resolution, inv=1)\n img = img.transform(tuple(self.resolution.tolist()),\n method=Image.AFFINE,\n data=tuple(trans_inv.reshape(-1).tolist()),\n resample=Image.BILINEAR)\n coord_range = np.array([center-crop_size/2,center+crop_size/2]).astype(np.float32) \n # image encoding\n img = np.array(img).astype(np.float32) / 255.0\n img = (img - self.mean) / self.std\n img = img.transpose(2, 0, 1) # C * H * W\n\n calib = self.get_calib(index)\n features_size = self.resolution // self.downsample# W * H\n # ============================ get labels ==============================\n if self.split!='test':\n objects = self.get_label(index)\n # data augmentation for labels\n if random_flip_flag:\n calib.flip(img_size)\n for object in objects:\n [x1, _, x2, _] = object.box2d\n object.box2d[0], object.box2d[2] = img_size[0] - x2, img_size[0] - x1\n object.ry = np.pi - object.ry\n object.pos[0] *= -1\n if object.ry > np.pi: object.ry -= 2 * np.pi\n if object.ry < -np.pi: object.ry += 2 * np.pi\n # labels encoding\n heatmap = np.zeros((self.num_classes, features_size[1], features_size[0]), dtype=np.float32) # C * H * W\n size_2d = np.zeros((self.max_objs, 2), dtype=np.float32)\n offset_2d = np.zeros((self.max_objs, 2), dtype=np.float32)\n depth = np.zeros((self.max_objs, 1), dtype=np.float32)\n heading_bin = np.zeros((self.max_objs, 1), dtype=np.int64)\n heading_res = np.zeros((self.max_objs, 1), dtype=np.float32)\n src_size_3d = np.zeros((self.max_objs, 3), dtype=np.float32)\n size_3d = np.zeros((self.max_objs, 3), dtype=np.float32)\n offset_3d = np.zeros((self.max_objs, 2), dtype=np.float32)\n height2d = np.zeros((self.max_objs, 1), dtype=np.float32)\n cls_ids = np.zeros((self.max_objs), dtype=np.int64)\n indices = np.zeros((self.max_objs), dtype=np.int64)\n mask_2d = np.zeros((self.max_objs), dtype=np.uint8)\n mask_3d = np.zeros((self.max_objs), dtype=np.uint8)\n object_num = len(objects) if len(objects) < self.max_objs else self.max_objs\n for i in range(object_num):\n # filter objects by writelist\n if objects[i].cls_type not in self.writelist:\n continue\n \n # filter inappropriate samples by difficulty\n if objects[i].level_str == 'UnKnown' or objects[i].pos[-1] < 2:\n continue\n \n # process 2d bbox & get 2d center\n bbox_2d = objects[i].box2d.copy()\n # add affine transformation for 2d boxes.\n bbox_2d[:2] = affine_transform(bbox_2d[:2], trans)\n bbox_2d[2:] = affine_transform(bbox_2d[2:], trans)\n # modify the 2d bbox according to pre-compute downsample ratio\n bbox_2d[:] /= self.downsample\n \n # process 3d bbox & get 3d center\n center_2d = np.array([(bbox_2d[0] + bbox_2d[2]) / 2, (bbox_2d[1] + bbox_2d[3]) / 2], dtype=np.float32) # W * H\n center_3d = objects[i].pos + [0, -objects[i].h / 2, 0] # real 3D center in 3D space\n center_3d = center_3d.reshape(-1, 3) # shape adjustment (N, 3)\n center_3d, _ = calib.rect_to_img(center_3d) # project 3D center to image plane\n center_3d = center_3d[0] # shape adjustment\n center_3d = affine_transform(center_3d.reshape(-1), trans)\n center_3d /= self.downsample \n \n # generate the center of gaussian heatmap [optional: 3d center or 2d center]\n center_heatmap = center_3d.astype(np.int32) if self.use_3d_center else center_2d.astype(np.int32)\n if center_heatmap[0] < 0 or center_heatmap[0] >= features_size[0]: continue\n if center_heatmap[1] < 0 or center_heatmap[1] >= features_size[1]: continue\n \n # generate the radius of gaussian heatmap\n w, h = bbox_2d[2] - bbox_2d[0], bbox_2d[3] - bbox_2d[1]\n radius = gaussian_radius((w, h))\n radius = max(0, int(radius))\n \n if objects[i].cls_type in ['Van', 'Truck', 'DontCare']:\n draw_umich_gaussian(heatmap[1], center_heatmap, radius)\n continue\n \n cls_id = self.cls2id[objects[i].cls_type]\n cls_ids[i] = cls_id\n draw_umich_gaussian(heatmap[cls_id], center_heatmap, radius)\n \n # encoding 2d/3d offset & 2d size\n indices[i] = center_heatmap[1] * features_size[0] + center_heatmap[0]\n offset_2d[i] = center_2d - center_heatmap\n size_2d[i] = 1. * w, 1. * h\n \n # encoding depth\n depth[i] = objects[i].pos[-1]\n \n # encoding heading angle\n #heading_angle = objects[i].alpha\n heading_angle = calib.ry2alpha(objects[i].ry, (objects[i].box2d[0]+objects[i].box2d[2])/2)\n if heading_angle > np.pi: heading_angle -= 2 * np.pi # check range\n if heading_angle < -np.pi: heading_angle += 2 * np.pi\n heading_bin[i], heading_res[i] = angle2class(heading_angle)\n \n # encoding 3d offset & size_3d\n offset_3d[i] = center_3d - center_heatmap\n src_size_3d[i] = np.array([objects[i].h, objects[i].w, objects[i].l], dtype=np.float32)\n mean_size = self.cls_mean_size[self.cls2id[objects[i].cls_type]]\n size_3d[i] = src_size_3d[i] - mean_size\n\n #objects[i].trucation <=0.5 and objects[i].occlusion<=2 and (objects[i].box2d[3]-objects[i].box2d[1])>=25:\n if objects[i].trucation <=0.5 and objects[i].occlusion<=2: \n mask_2d[i] = 1 \n targets = {'depth': depth,\n 'size_2d': size_2d,\n 'heatmap': heatmap,\n 'offset_2d': offset_2d,\n 'indices': indices,\n 'size_3d': size_3d,\n 'offset_3d': offset_3d,\n 'heading_bin': heading_bin,\n 'heading_res': heading_res,\n 'cls_ids': cls_ids,\n 'mask_2d': mask_2d} \n else:\n targets = {}\n # collect return data\n inputs = img\n info = {'img_id': index,\n 'img_size': img_size,\n 'bbox_downsample_ratio': img_size/features_size} \n return inputs, calib.P2, coord_range, targets, info #calib.P2\n\n\n\n\n\nif __name__ == '__main__':\n from torch.utils.data import DataLoader\n cfg = {'random_flip':0.0, 'random_crop':1.0, 'scale':0.4, 'shift':0.1, 'use_dontcare': False,\n 'class_merging': False, 'writelist':['Pedestrian', 'Car', 'Cyclist'], 'use_3d_center':False}\n dataset = KITTI('../../data', 'train', cfg)\n dataloader = DataLoader(dataset=dataset, batch_size=1)\n print(dataset.writelist)\n\n for batch_idx, (inputs, P2, coord_range, targets, info) in enumerate(dataloader):\n # test image\n img = inputs[0].numpy().transpose(1, 2, 0)\n img = (img * dataset.std + dataset.mean) * 255\n img = Image.fromarray(img.astype(np.uint8))\n img.show()\n # print(targets['size_3d'][0][0])\n\n # test heatmap\n heatmap = targets['heatmap'][0] # image id\n heatmap = Image.fromarray(heatmap[0].numpy() * 255) # cats id\n heatmap.show()\n\n break\n\n\n # print ground truth fisrt\n objects = dataset.get_label(0)\n for object in objects:\n print(object.to_kitti_format())\n" ]
[ [ "numpy.random.random", "torch.utils.data.DataLoader", "numpy.random.randn", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Makarov-AA/CV-SUMMER-CAMP-2021
[ "fa7154842a4b6b295b8fb1a9986329694a4410d0" ]
[ "practice/4_tracking/tracker.py" ]
[ "import numpy as np\nimport math\nimport logging as log\nimport sys\nfrom tqdm import tqdm\nfrom scipy.spatial.distance import cosine\nfrom common.feature_distance import calc_features_similarity\nfrom common.common_objects import DetectedObject, validate_detected_object, Bbox\nfrom common.common_objects import get_bbox_center, get_dist, calc_bbox_area, get_bbox_size\nfrom common.find_best_assignment import solve_assignment_problem\nfrom common.annotation import AnnotationObject, AnnotationStorage\n\nclass Track:\n __next_track_id = 0\n def __init__(self, first_obj):\n self.objects = []\n self._track_id = Track.__next_track_id\n Track.__next_track_id += 1\n\n self.objects.append(first_obj)\n\n def _validate(self):\n assert len(self.objects) > 0\n for o in self.objects:\n validate_detected_object(o)\n for i in range(len(self.objects) - 1):\n self.objects[i].frame_index < self.objects[i+1].frame_index\n\n def add_object(self, o):\n self._validate()\n validate_detected_object(o)\n\n last_frame_index = self.objects[-1].frame_index\n if not last_frame_index < o.frame_index:\n raise RuntimeError(\"Add object={} to track with the last_frame_index={}\".format(o, last_frame_index))\n\n self.objects.append(o)\n\n def last(self):\n return self.objects[-1]\n\n def get_id(self):\n return self._track_id\n\n def get_bbox_for_frame(self, cur_frame_ind):\n \"\"\"Finds bbox for frame index using linear approximation\"\"\"\n self._validate()\n i_found = None\n for i, o in enumerate(self.objects):\n if o.frame_index == cur_frame_ind:\n return o.bbox\n if o.frame_index > cur_frame_ind:\n i_found = i\n break\n if i_found is None: # cur_frame_ind after the last frame_index in track\n return None\n\n if i_found == 0: # cur_frame_ind before the first frame_index in track\n return None\n log.debug(\"using linear approximation for track id={}, frame_index={}\".format(self._track_id, cur_frame_ind))\n o1 = self.objects[i_found-1]\n o2 = self.objects[i_found]\n assert o1.frame_index < cur_frame_ind < o2.frame_index\n\n dindex = o2.frame_index - o1.frame_index\n d_cur_index1 = cur_frame_ind - o1.frame_index\n d_cur_index2 = o2.frame_index - cur_frame_ind\n\n bbox1 = o1.bbox\n bbox2 = o2.bbox\n\n res_bbox = [None, None, None, None]\n for k in range(4):\n # linear approximation for all bbox fields\n res_bbox[k] = (bbox1[k] * d_cur_index2 + bbox2[k] * d_cur_index1) / dindex\n res_bbox = Bbox(res_bbox[0], res_bbox[1], res_bbox[2], res_bbox[3])\n\n return res_bbox\n\nclass Tracker:\n def __init__(self, num_frames_to_remove_track, num_objects_to_make_track_valid, affinity_threshold):\n self.tracks = []\n self.track_archive = []\n self.num_frames_to_remove_track = num_frames_to_remove_track\n self.num_objects_to_make_track_valid = num_objects_to_make_track_valid\n self.affinity_threshold = affinity_threshold\n\n def add_objects(self, det_objs):\n log.debug(\"begin: handling {} objects\".format(len(det_objs)))\n if len(det_objs) == 0:\n return\n\n frame_index = det_objs[0].frame_index\n assert all(o.frame_index == frame_index for o in det_objs), \"All det_objs should have the same frame_index\"\n\n affinity_matrix = self._build_affinity_matrix(det_objs)\n self._validate_affinity_matrix(affinity_matrix, len(self.tracks), len(det_objs))\n\n self._log_affinity_matrix(affinity_matrix)\n\n decision, best_affinity = self._solve_assignment_problem(affinity_matrix)\n self._log_decision(decision, best_affinity, det_objs, frame_index)\n\n self._apply_decision(decision, det_objs, frame_index)\n self._move_obsolete_tracks_to_archive(frame_index)\n log.debug(\"end: handling {} objects\".format(len(det_objs)))\n\n @staticmethod\n def _validate_affinity_matrix(affinity_matrix, num_tracks, num_det_objs):\n assert isinstance(affinity_matrix, list)\n assert len(affinity_matrix) == num_tracks\n for affinity_row in affinity_matrix:\n assert isinstance(affinity_row, list)\n assert len(affinity_row) == num_det_objs\n assert all(isinstance(v, float) for v in affinity_row)\n assert all(v >= 0 for v in affinity_row)\n\n def _build_affinity_matrix(self, det_objs):\n affinity_matrix = []\n for t in self.tracks:\n affinity_row = []\n for o in det_objs:\n cur_affinity = self._calc_affinity(t, o)\n affinity_row.append(cur_affinity)\n\n affinity_matrix.append(affinity_row)\n\n return affinity_matrix\n\n def _calc_affinity(self, track, obj):\n affinity_appearance = self._calc_affinity_appearance(track, obj)\n affinity_position = self._calc_affinity_position(track, obj)\n affinity_shape = self._calc_affinity_shape(track, obj)\n return affinity_appearance * affinity_position * affinity_shape\n\n def _calc_affinity_appearance(self, track, obj):\n return calc_features_similarity(track.last().appearance_feature, obj.appearance_feature)\n\n def _calc_affinity_position(self, track, obj):\n last_track = track.last()\n bbox_area = calc_bbox_area(last_track.bbox)\n D = get_dist(get_bbox_center(last_track.bbox), get_bbox_center(obj.bbox))\n return math.exp(-(D ** 2) / bbox_area)\n\n def _calc_affinity_shape(self, track, obj):\n last_track = track.last()\n w_1, h_1 = get_bbox_size(last_track.bbox)\n w_2, h_2 = get_bbox_size(obj.bbox)\n return math.exp(-((w_1 - w_2) / w_1 + (h_1 - h_2) / h_1))\n\n @staticmethod\n def _log_affinity_matrix(affinity_matrix):\n with np.printoptions(precision=2, suppress=True, threshold=sys.maxsize, linewidth=sys.maxsize):\n log.debug(\"Affinity matrix =\\n{}\".format(np.array(affinity_matrix)))\n\n def _solve_assignment_problem(self, affinity_matrix):\n decision, best_affinity = solve_assignment_problem(affinity_matrix, self.affinity_threshold)\n return decision, best_affinity\n\n def _log_decision(self, decision, best_affinity, det_objs, frame_index):\n log.debug(\"Logging decision for frame index={}\".format(frame_index))\n num_tracks = len(self.tracks)\n for track_index in range(num_tracks):\n assert track_index in decision\n obj_index = decision[track_index] # index of the object assigned to the track\n if obj_index is not None:\n assert 0 <= obj_index < len(det_objs)\n obj_bbox = det_objs[obj_index].bbox\n else:\n obj_bbox = None\n\n cur_best_affinity = best_affinity[track_index]\n if cur_best_affinity is not None:\n best_affinity_str = \"{:.3f}\".format(cur_best_affinity)\n else:\n best_affinity_str = str(cur_best_affinity)\n\n log.debug(\"track_index={}, track id={}, last_bbox={}, decision={}, best_affinity={} => {}\".format(\n track_index, self.tracks[track_index].get_id(),\n self.tracks[track_index].last().bbox,\n decision[track_index],\n best_affinity_str,\n obj_bbox))\n\n def _apply_decision(self, decision, det_objs, frame_index):\n set_updated_tracks_indexes = set()\n num_det_objs = len(det_objs)\n num_tracks = len(self.tracks)\n object_indexes_not_mapped_to_tracks = set(range(num_det_objs)) # all indexes from 0 to num_det_objs-1\n for track_index in range(num_tracks):\n assert track_index in decision\n\n obj_index = decision[track_index] # index of the object assigned to the track\n if obj_index is None:\n # no objects are mapped for this track\n continue\n\n assert 0 <= obj_index < num_det_objs\n if obj_index not in object_indexes_not_mapped_to_tracks:\n raise RuntimeError(\"ERROR: Algorithm assigned the object {} to several tracks\".format(obj_index))\n\n object_indexes_not_mapped_to_tracks.remove(obj_index)\n\n o = det_objs[obj_index]\n self.tracks[track_index].add_object(o)\n\n # create new tracks for all the objects not mapped to tracks\n for obj_index in object_indexes_not_mapped_to_tracks:\n o = det_objs[obj_index]\n self._create_new_track(o)\n\n def _create_new_track(self, o):\n new_track = Track(o)\n self.tracks.append(new_track)\n log.debug(\"created new track: id={} object: frame_index={}, {}\".format(\n new_track.get_id(), o.frame_index, o.bbox))\n\n def _move_obsolete_tracks_to_archive(self, frame_index):\n new_tracks = []\n for t in self.tracks:\n last_frame_index = t.last().frame_index\n if frame_index - last_frame_index >= self.num_frames_to_remove_track:\n log.debug(\"Move the track id={} to archive: the current frame_index={}, \"\n \"the last frame_index in track={}\".format(\n t.get_id(), frame_index, last_frame_index))\n self.track_archive.append(t)\n else:\n new_tracks.append(t)\n\n self.tracks = new_tracks\n\n def is_track_valid(self, track):\n assert isinstance(track, Track)\n return len(track.objects) > self.num_objects_to_make_track_valid\n\n def get_all_valid_tracks(self):\n res = []\n for t in self.track_archive:\n if self.is_track_valid(t):\n res.append(t)\n\n for t in self.tracks:\n if self.is_track_valid(t):\n res.append(t)\n\n return res\n\ndef convert_tracks_to_annotation_storage(tracks):\n ann_objects_by_frame_index = {}\n for cur_track in tqdm(tracks, desc=\"Converting\"):\n track_id = cur_track.get_id()\n\n first_frame_index = cur_track.objects[0].frame_index\n last_frame_index = cur_track.objects[-1].frame_index\n\n for frame_index in range(first_frame_index, last_frame_index+1):\n bbox = cur_track.get_bbox_for_frame(frame_index)\n tl_x = math.floor(bbox.tl_x)\n tl_y = math.floor(bbox.tl_y)\n br_x = math.ceil(bbox.br_x)\n br_y = math.ceil(bbox.br_y)\n detect_obj = DetectedObject(frame_index=frame_index,\n bbox=Bbox(tl_x, tl_y, br_x, br_y),\n appearance_feature=[])\n ann_obj = AnnotationObject(detect_obj=detect_obj,\n track_id=track_id)\n if frame_index not in ann_objects_by_frame_index:\n ann_objects_by_frame_index[frame_index] = {}\n\n ann_objects_by_frame_index[frame_index][track_id] = ann_obj\n\n annotation_objects = []\n for frame_index in sorted(ann_objects_by_frame_index.keys()):\n cur_ann_objects = ann_objects_by_frame_index[frame_index]\n for track_id in sorted(cur_ann_objects.keys()):\n annotation_objects.append(cur_ann_objects[track_id])\n\n annotation_storage = AnnotationStorage.create_annotation_storage_from_list(annotation_objects)\n return annotation_storage\n" ]
[ [ "numpy.array", "numpy.printoptions" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cyberdeception/deepdig
[ "482061bc5039181a95631fcb8515d990eb2e16a9" ]
[ "ml/code/svm/classifiers/GloveClassifierv1.py" ]
[ "# This is a Python framework to compliment \"Peek-a-Boo, I Still See You: Why Efficient Traffic Analysis Countermeasures Fail\".\n# Copyright (C) 2012 Kevin P. Dyer (kpdyer.com)\n# See LICENSE for more details.\n\nimport arffWriter\nimport wekaAPI\nfrom glove import Glove\nimport numpy as np\n\n\nclass GloveClassifier:\n @staticmethod\n def traceToInstance(trace):\n myglove = Glove.load(\"mygloveModel\")\n myVectors = []\n vectorDict = {}\n paragraph = []\n for packet in trace.getPackets():\n key = str(packet.getLength()) + \"_\" + str(packet.getDirection())\n if key in myglove.dictionary:\n word_idx = myglove.dictionary[str(key)]\n myVectors.append(list(myglove.word_vectors[word_idx]))\n # for each packet len get the vectors and sum it up by colum to get a 100 dim vector to represent a trace therefor an instance\n #myVectors = myglove.transform_paragraph(paragraph, epochs=50, ignore_missing=True)\n if len(myVectors) == 0:\n return None\n mymeanVector = np.mean(myVectors, axis=0)\n # print mymeanVector.shape\n count = 0\n for l in mymeanVector:\n vectorDict[\"v\" + str(count)] = l;\n count = count + 1;\n instance = trace.getHistogram()\n # print instance\n instance['class'] = 'webpage' + str(trace.getId())\n newinstances = dict(instance.items() + vectorDict.items())\n # some instances just contain nan values that should be discarded\n if np.isnan(vectorDict[\"v1\"]):\n return None\n return newinstances\n\n @staticmethod\n def classify(runID, trainingSet, testingSet):\n [trainingFile, testingFile] = arffWriter.writeArffFiles(runID, trainingSet, testingSet)\n return wekaAPI.execute(trainingFile, testingFile, \"weka.classifiers.bayes.NaiveBayes\", ['-K'])\n\n\n\"\"\" @staticmethod\n def classify(runID, trainingSet, testingSet):\n [trainingFile, testingFile] = arffWriter.writeArffFiles(runID, trainingSet, testingSet)\n return wekaAPI.execute(trainingFile,\n testingFile,\n \"weka.Run weka.classifiers.functions.LibSVM\",\n ['-K', '2', # RBF kernel\n '-G', '0.0000019073486328125', # Gamma\n '-C', '131072']) # Cost\n\"\"\"\n" ]
[ [ "numpy.isnan", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
raynardj/torchember
[ "635a6578760ede37c2b02b47be84e3bf773a18a4" ]
[ "torchember/whyhat.py" ]
[ "# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/20_why_hat.ipynb (unless otherwise specified).\n\n__all__ = ['md5hash', 'ModelInput', 'InputEmb', 'InputOneHot', 'InputConti', 'YEncoder', 'YOneHot', 'YConti',\n 'RichColumn', 'RichDF', 'TabularModel', 'TabularNN']\n\n# Cell\nimport pandas as pd\nimport numpy as np\nfrom pathlib import Path\nimport os\nimport json\nfrom .core import color\nfrom .helper import tracker\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\n# Cell\nfrom hashlib import md5\nfrom datetime import datetime\nfrom torch import nn\nimport torch\nimport numpy as np\ndef md5hash(x):\n return md5(x.encode()).hexdigest()\n\n# Cell\nclass ModelInput(nn.Module):\n def __init__(self,rich_col):\n super().__init__()\n self.rich_col = rich_col\n rich_col.input_module =self\n\nclass InputEmb(ModelInput):\n def __init__(self,rich_col):\n super().__init__(rich_col)\n self.emb = nn.Embedding(len(rich_col.top_freq)+1,rich_col.hidden_size)\n\n def forward(self,x):\n return self.emb(x)\n\nclass InputOneHot(ModelInput):\n def __init__(self,rich_col):\n super().__init__(rich_col)\n self.eye = torch.eye(len(self.rich_col))\n\n def forward(self,x):\n return self.eye[x]\n\nclass InputConti(ModelInput):\n def __init__(self,rich_col):\n super().__init__(rich_col)\n rich_col.mean = rich_col.col.mean()\n rich_col.std = rich_col.col.std()\n self.bn=nn.BatchNorm1d(1)\n self.tanh = nn.Tanh()\n\n def forward(self,x):\n x = self.tanh(self.bn(x))\n return x.detach()\n\n\n# Cell\nclass YEncoder:\n \"\"\"\n Encode the why into the required shape\n input of the __call__, numpy array\n \"\"\"\n def __init__(self,rich_col):\n super().__init__()\n self.rich_col = rich_col\n assert rich_col.is_y,f\"{rich_col.name} isn't a y column set\"\n rich_col.y_encoder = self\n\n def __call__(self,x):\n raise NotImplementedError(\"Defind __call__ of YEncoder first\")\n\nclass YOneHot(YEncoder):\n def __init__(self, rich_col):\n super().__init__(rich_col)\n self.eye = np.eye(len(rich_col)).astype(np.int)\n\n def __call__(self, x):\n return self.eye[x]\n\nclass YConti(YEncoder):\n def __init__(self, rich_col):\n super().__init__(rich_col)\n self.mean = rich_col.col.mean()\n self.std = rich_col.col.std()\n\n def __call__(self,x):\n return np.clip((x-self.mean)/self.std,-2,2)\n\n# Cell\nclass RichColumn(object):\n \"\"\"\n A pandas series manager\n \"\"\"\n def __init__(self,column, is_y = False,min_occur = 5, is_emb = True,hidden_size=20):\n self.col = column\n self.col.rc = self\n self.name = self.col.name\n self.min_occur = min_occur\n self.hidden_size = hidden_size\n self.is_emb = is_emb\n self.is_y = is_y\n self.use = True\n self.is_conti = True\n self.defined = False\n\n def kill(self):\n \"\"\"\n set column to kill mode, that it would not be involved in the learning\n \"\"\"\n self.defined = True\n self.use = False\n\n def conti(self):\n \"\"\"\n set column to contineous data\n \"\"\"\n self.defined = True\n self.is_conti = True\n\n def disc(self):\n \"\"\"\n set column to discrete data\n \"\"\"\n self.defined = True\n self.is_conti = False\n\n def is_number(self):\n \"\"\"\n Is this column's data type in any form of number\n \"\"\"\n return self.col.dtype in (int,float,\n np.float16,np.float32,np.float64,np.float64,\n np.int0,np.int8,np.int16,np.int32,np.int64)\n\n def __bool__(self):\n \"\"\"\n is this column going to join the learning\n \"\"\"\n return self.use\n\n def __len__(self):\n \"\"\"\n width of column when entering the model, or used as target\n \"\"\"\n if self.is_conti:\n return 1\n else:\n if self.is_emb and (self.is_y==False):\n return self.hidden_size\n else:\n width = len(self.top_freq)+1\n width =1 if width==2 else width\n return width\n\n def __repr__(self,):\n return f\"<Rich Column:{self.name}>\"\n\n def top_freq_(self):\n freq = self.freq()\n self.top_freq = freq[freq[self.name]>=self.min_occur].reset_index()\n self.tokens = dict((v,k+1) for k,v in enumerate(self.top_freq[\"index\"]))\n self.token_arr = np.array([\"<mtk>\",]+list(self.top_freq[\"index\"]))\n return self.top_freq\n\n def freq(self):\n return pd.DataFrame(data=self.col.value_counts())\n\n @property\n def conf_dict(self):\n return dict((i,getattr(self,i)) for i in [\"name\",\"defined\",\"is_conti\",\"is_y\",\"is_emb\",\"use\"])\n\n def set_conf(self,conf_dict):\n for k,v in conf_dict.items():\n setattr(self,k,v)\n return self\n\n def encode(self,x):\n if self.is_conti:\n return x if x else self.mean\n else:\n try:\n return self.tokens[x]\n except:\n return 0\n\n def decode(self,idx):\n return self.token_arr[idx]\n\n def build_learn(self):\n \"\"\"\n prepare the column for learning\n \"\"\"\n if self.is_y == False:\n if self.is_conti:\n self.mean = self.col.mean()\n InputConti(self)\n else:\n InputEmb(self)\n else:\n if self.is_conti:\n self.mean = self.col.mean()\n YConti(self)\n else:\n YOneHot(self)\n return self\n\n# Cell\nclass RichDF(object):\n \"\"\"\n A pandas dataframe manager\n \"\"\"\n def __init__(self,df,fname=None):\n self.df = df\n self.columns = dict()\n if fname==None:\n fname=f\"why_hat_{self.ts_str}\"\n self.t = tracker(\"torchember\",fname)\n self.t.data = self.t.log_path\n\n for colname in self.df:\n self.columns.update({colname:RichColumn(df[colname])})\n\n @property\n def ts_str(self):\n return datetime.now().strftime(\"%m%d_%H%M%S\")\n\n @property\n def col_conf(self):\n return dict((k,{\"use\":v.use,\"is_cont\":v.is_conti}) for k,v in self.columns.items())\n\n def __getitem__(self,col_name):\n return self.columns[col_name]\n\n def kill(self,colname):\n \"\"\"\n Not using this column\n \"\"\"\n self.df[colname].rc.kill()\n\n def conti(self,colname):\n self.df[colname].rc.conti()\n\n def disc(self,colname):\n self.df[colname].rc.disc()\n\n def save_col(self,rcol):\n self.t[md5hash(rcol.name)]=rcol.conf_dict\n\n def set_col(self,rcol):\n if rcol.defined:\n print(f\"{rcol.name} defined, use:{rcol.use}, contineus?:{rcol.is_conti}\")\n print(color.bold(\"=\"*30))\n print(color.cyan(rcol.name))\n print(color.red(f\"number? {rcol.is_number()}\"))\n print(rcol.top_freq_().head(5))\n\n print(color.red(\"Is this a [C]ontineous, [D]iscrete or a column we do[N]'t need? default N\"))\n x = input().lower()\n if x==\"c\":\n rcol.conti()\n print(color.blue(f\"{rcol.name} set to contineous data\"))\n self.save_col(rcol)\n elif x ==\"d\":\n rcol.disc()\n print(color.blue(f\"{rcol.name} set to discrite data\"))\n self.save_col(rcol)\n elif (x ==\"\") or (x==\"n\"):\n rcol.kill()\n print(color.blue(f\"{rcol.name} will not be involved in learning\"))\n self.save_col(rcol)\n else:\n print(color.yellow(f\"option [{x}] not found, try Again?\"))\n\n def save(self,colname):\n col=self.df[colname]\n self.t[md5hash(colname)] = col.rc.conf_dict\n\n def read(self,colname):\n col=self.df[colname]\n col.rc.set_conf(self.t[md5hash(colname)])\n if col.rc.is_conti:\n col.rc.top_freq_()\n\n def shuffle_df(self):\n self.df = self.df\\\n .sample(frac=1.)\\\n .reset_index().drop(\"index\",axis=1)\n\n def tour(self):\n \"\"\"\n Go through column 1 by 1 to decide the processing for its data\n \"\"\"\n for colname in self.df:\n col = self.df[colname]\n current = self.t[md5hash(colname)]\n if current != None:\n col.rc.set_conf(current)\n if col.rc.is_conti==False:\n col.rc.top_freq_()\n if col.rc.defined==False:\n self.set_col(col.rc)\n\n def set_y(self, *colnames):\n \"\"\"\n set columns to y\n all the columns that use==True and is_y==False will be treated as x\n \"\"\"\n for colname in colnames:\n rc = self.columns[colname]\n rc.is_y = True\n rc.use = True\n rc.is_emb = False\n self.save(colname)\n\n def set_x(self, *colnames):\n \"\"\"\n set columns to x\n of course,every columns' default status is x,\n so you don't have to set this if you accidentally set x to y\n \"\"\"\n for colname in colnames:\n rc = self.columns[colname]\n rc.use = True\n rc.is_y = False\n self.save(colname)\n\n @property\n def Xs(self):\n \"\"\"\n Return the next x rich column\n \"\"\"\n for col,rc in self.columns.items():\n if (rc.is_y) ==False and rc.use:\n yield rc\n\n @property\n def Ys(self):\n \"\"\"\n Return the next y rich column\n \"\"\"\n for col,rc in self.columns.items():\n if rc.is_y and rc.use:\n yield rc\n\n\n# Cell\nclass TabularModel(nn.Module):\n def __init__(self,rdf):\n super().__init__()\n self.rdf=rdf\n self.inputs = nn.ModuleDict(modules = dict((x.name,x.input_module) for x in rdf.Xs))\n\n self.build_dial_x()\n self.build_dial_y()\n\n self.input_width = len(self.dial)\n self.target_width = len(self.dial_y)\n\n self.hidden_size = max(self.input_width,self.target_width,20)\n self.dnn = nn.Sequential(*[\n nn.Linear(self.input_width,self.hidden_size),\n nn.BatchNorm1d(self.hidden_size),\n nn.ReLU(),\n nn.Linear(self.hidden_size,self.target_width),\n nn.BatchNorm1d(self.target_width),\n ])\n\n def forward(self,Xs):\n \"\"\"\n Xs dictionary of inputs\n \"\"\"\n ipts = list(self.inputs[xcol.name](Xs[xcol.name]) for xcol in self.rdf.Xs)\n concat = torch.cat(ipts,dim=1)\n return self.dnn(concat)\n\n def build_dial_x(self):\n all_width = 0\n self.dial = dict()\n for x in self.rdf.Xs:\n for i in range(len(x)):\n self.dial.update({all_width:dict({\"colname\":x.name,\n \"rich_col\":x,\n \"sub_idx\":i,\n \"remark\":f\"input<{i}> of column {x.name}\"})})\n all_width+=1\n return all_width\n\n def build_dial_y(self):\n all_width = 0\n self.dial_y = dict()\n for y in self.rdf.Ys:\n for i in range(len(y)):\n self.dial_y.update({all_width:dict({\"colname\":y.name,\n \"rich_col\":y,\n \"sub_idx\":i,\n \"remark\":f\"target<{i}> of column {y.name}\"})})\n all_width+=1\n return all_width\n\nclass TabularNN:\n def __init__(self, rich_df,batch_size=128):\n self.rich_df = rich_df\n self.l = len(rich_df.df)\n self.batch_size = batch_size\n self.x = list(x.build_learn() for x in self.rich_df.Xs)\n self.y = list(y.build_learn() for y in self.rich_df.Ys)\n self.assert_xy()\n self.assert_y_consistency()\n self.reset_i()\n self.epoch = 0\n self.rich_df.shuffle_df()\n self.model = TabularModel(self.rich_df)\n\n def reset_i(self):\n \"\"\"reset iterator\"\"\"\n self.s=0\n self.e=1\n\n def __repr__(self):\n return f\">>TabularNN\"\n\n def assert_xy(self):\n assert len(self.x)>0, \"You have you set some X\"\n assert len(self.y)>0, \"You have you set some Y\"\n\n def assert_y_consistency(self):\n conti_list = list(rc.is_conti for rc in self.rich_df.Ys)\n assert float(sum(conti_list))/len(conti_list) in [1.,0.],\"Y has to be all discrete columns, or contineous columns\"\n # decide loss function based on Y\n if conti_list[0]:\n self.crit = nn.MSELoss()\n else:\n self.crit = nn.BCEWithLogitsLoss()\n\n def build_model_nn(self):\n self.nn = TabularModel(self)\n\n def batch_df(self):\n start = self.batch_size*self.s\n end = self.batch_size*self.e\n if start>self.l:\n self.epoch+=1\n self.reset_i()\n start = self.batch_size*self.s\n end = self.batch_size*self.e\n yield self.rich_df.df[start:end]\n\n def batch_array(self):\n df_b = next(self.batch_df())\n x_data = dict()\n y_data = dict()\n\n for x in self.x:\n if x.is_conti:\n df_b[x.name]= df_b[x.name].fillna(x.mean)\n arr = df_b[x.name].apply(x.encode).values\n x_tensor = torch.FloatTensor(arr)[:,None] if x.is_conti else torch.LongTensor(arr)\n x_data.update({x.name:x_tensor})\n\n for y in self.y:\n arr = df_b[y.name].apply(y.encode).values\n y_tensor = torch.FloatTensor(arr) if y.is_conti else torch.LongTensor(arr)\n y_data.update({y.name:y_tensor[:,None]})\n yield x_data,y_data\n\n def batch_y_pred(self):\n x_data,y_data = next(self.batch_array())\n yield self.model(x_data)\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.LongTensor", "torch.cat", "numpy.clip", "torch.nn.Tanh", "torch.nn.Linear", "torch.nn.BCEWithLogitsLoss", "torch.FloatTensor", "torch.nn.ReLU", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rez-spb/OpenSeeFace
[ "be86991e7ac2855bf0f42f5816af6eb9a12706fb" ]
[ "facetracker.py" ]
[ "import copy\nimport os\nimport sys\nimport argparse\nimport traceback\nimport gc\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument(\"-i\", \"--ip\", help=\"Set IP address for sending tracking data\", default=\"127.0.0.1\")\nparser.add_argument(\"-p\", \"--port\", type=int, help=\"Set port for sending tracking data\", default=11573)\nif os.name == 'nt':\n parser.add_argument(\"-l\", \"--list-cameras\", type=int, help=\"Set this to 1 to list the available cameras and quit, set this to 2 or higher to output only the names\", default=0)\n parser.add_argument(\"-a\", \"--list-dcaps\", type=int, help=\"Set this to -1 to list all cameras and their available capabilities, set this to a camera id to list that camera's capabilities\", default=None)\n parser.add_argument(\"-W\", \"--width\", type=int, help=\"Set camera and raw RGB width\", default=640)\n parser.add_argument(\"-H\", \"--height\", type=int, help=\"Set camera and raw RGB height\", default=360)\n parser.add_argument(\"-F\", \"--fps\", type=int, help=\"Set camera frames per second\", default=24)\n parser.add_argument(\"-D\", \"--dcap\", type=int, help=\"Set which device capability line to use or -1 to use the default camera settings\", default=None)\n parser.add_argument(\"-B\", \"--blackmagic\", type=int, help=\"When set to 1, special support for Blackmagic devices is enabled\", default=0)\nelse:\n parser.add_argument(\"-W\", \"--width\", type=int, help=\"Set raw RGB width\", default=640)\n parser.add_argument(\"-H\", \"--height\", type=int, help=\"Set raw RGB height\", default=360)\nparser.add_argument(\"-c\", \"--capture\", help=\"Set camera ID (0, 1...) or video file\", default=\"0\")\nparser.add_argument(\"-M\", \"--mirror-input\", action=\"store_true\", help=\"Process a mirror image of the input video\")\nparser.add_argument(\"-m\", \"--max-threads\", type=int, help=\"Set the maximum number of threads\", default=1)\nparser.add_argument(\"-t\", \"--threshold\", type=float, help=\"Set minimum confidence threshold for face tracking\", default=None)\nparser.add_argument(\"-d\", \"--detection-threshold\", type=float, help=\"Set minimum confidence threshold for face detection\", default=0.6)\nparser.add_argument(\"-v\", \"--visualize\", type=int, help=\"Set this to 1 to visualize the tracking, to 2 to also show face ids, to 3 to add confidence values or to 4 to add numbers to the point display\", default=0)\nparser.add_argument(\"-P\", \"--pnp-points\", type=int, help=\"Set this to 1 to add the 3D fitting points to the visualization\", default=0)\nparser.add_argument(\"-s\", \"--silent\", type=int, help=\"Set this to 1 to prevent text output on the console\", default=0)\nparser.add_argument(\"--faces\", type=int, help=\"Set the maximum number of faces (slow)\", default=1)\nparser.add_argument(\"--scan-retinaface\", type=int, help=\"When set to 1, scanning for additional faces will be performed using RetinaFace in a background thread, otherwise a simpler, faster face detection mechanism is used. When the maximum number of faces is 1, this option does nothing.\", default=0)\nparser.add_argument(\"--scan-every\", type=int, help=\"Set after how many frames a scan for new faces should run\", default=3)\nparser.add_argument(\"--discard-after\", type=int, help=\"Set the how long the tracker should keep looking for lost faces\", default=10)\nparser.add_argument(\"--max-feature-updates\", type=int, help=\"This is the number of seconds after which feature min/max/medium values will no longer be updated once a face has been detected.\", default=900)\nparser.add_argument(\"--no-3d-adapt\", type=int, help=\"When set to 1, the 3D face model will not be adapted to increase the fit\", default=1)\nparser.add_argument(\"--try-hard\", type=int, help=\"When set to 1, the tracker will try harder to find a face\", default=0)\nparser.add_argument(\"--video-out\", help=\"Set this to the filename of an AVI file to save the tracking visualization as a video\", default=None)\nparser.add_argument(\"--video-scale\", type=int, help=\"This is a resolution scale factor applied to the saved AVI file\", default=1, choices=[1,2,3,4])\nparser.add_argument(\"--video-fps\", type=float, help=\"This sets the frame rate of the output AVI file\", default=24)\nparser.add_argument(\"--raw-rgb\", type=int, help=\"When this is set, raw RGB frames of the size given with \\\"-W\\\" and \\\"-H\\\" are read from standard input instead of reading a video\", default=0)\nparser.add_argument(\"--log-data\", help=\"You can set a filename to which tracking data will be logged here\", default=\"\")\nparser.add_argument(\"--log-output\", help=\"You can set a filename to console output will be logged here\", default=\"\")\nparser.add_argument(\"--model\", type=int, help=\"This can be used to select the tracking model. Higher numbers are models with better tracking quality, but slower speed, except for model 4, which is wink optimized. Models 1 and 0 tend to be too rigid for expression and blink detection. Model -2 is roughly equivalent to model 1, but faster. Model -3 is between models 0 and -1.\", default=3, choices=[-3, -2, -1, 0, 1, 2, 3, 4])\nparser.add_argument(\"--model-dir\", help=\"This can be used to specify the path to the directory containing the .onnx model files\", default=None)\nparser.add_argument(\"--gaze-tracking\", type=int, help=\"When set to 1, experimental blink detection and gaze tracking are enabled, which makes things slightly slower\", default=1)\nparser.add_argument(\"--face-id-offset\", type=int, help=\"When set, this offset is added to all face ids, which can be useful for mixing tracking data from multiple network sources\", default=0)\nparser.add_argument(\"--repeat-video\", type=int, help=\"When set to 1 and a video file was specified with -c, the tracker will loop the video until interrupted\", default=0)\nparser.add_argument(\"--dump-points\", type=str, help=\"When set to a filename, the current face 3D points are made symmetric and dumped to the given file when quitting the visualization with the \\\"q\\\" key\", default=\"\")\nparser.add_argument(\"--benchmark\", type=int, help=\"When set to 1, the different tracking models are benchmarked, starting with the best and ending with the fastest and with gaze tracking disabled for models with negative IDs\", default=0)\nif os.name == 'nt':\n parser.add_argument(\"--use-dshowcapture\", type=int, help=\"When set to 1, libdshowcapture will be used for video input instead of OpenCV\", default=1)\n parser.add_argument(\"--blackmagic-options\", type=str, help=\"When set, this additional option string is passed to the blackmagic capture library\", default=None)\n parser.add_argument(\"--priority\", type=int, help=\"When set, the process priority will be changed\", default=None, choices=[0, 1, 2, 3, 4, 5])\nargs = parser.parse_args()\n\nos.environ[\"OMP_NUM_THREADS\"] = str(args.max_threads)\n\nclass OutputLog(object):\n def __init__(self, fh, output):\n self.fh = fh\n self.output = output\n def write(self, buf):\n if not self.fh is None:\n self.fh.write(buf)\n self.output.write(buf)\n self.flush()\n def flush(self):\n if not self.fh is None:\n self.fh.flush()\n self.output.flush()\noutput_logfile = None\nif args.log_output != \"\":\n output_logfile = open(args.log_output, \"w\")\nsys.stdout = OutputLog(output_logfile, sys.stdout)\nsys.stderr = OutputLog(output_logfile, sys.stderr)\n\nif os.name == 'nt':\n import dshowcapture\n if args.blackmagic == 1:\n dshowcapture.set_bm_enabled(True)\n if not args.blackmagic_options is None:\n dshowcapture.set_options(args.blackmagic_options)\n if not args.priority is None:\n import psutil\n classes = [psutil.IDLE_PRIORITY_CLASS, psutil.BELOW_NORMAL_PRIORITY_CLASS, psutil.NORMAL_PRIORITY_CLASS, psutil.ABOVE_NORMAL_PRIORITY_CLASS, psutil.HIGH_PRIORITY_CLASS, psutil.REALTIME_PRIORITY_CLASS]\n p = psutil.Process(os.getpid())\n p.nice(classes[args.priority])\n\nif os.name == 'nt' and (args.list_cameras > 0 or not args.list_dcaps is None):\n cap = dshowcapture.DShowCapture()\n info = cap.get_info()\n unit = 10000000.;\n if not args.list_dcaps is None:\n formats = {0: \"Any\", 1: \"Unknown\", 100: \"ARGB\", 101: \"XRGB\", 200: \"I420\", 201: \"NV12\", 202: \"YV12\", 203: \"Y800\", 300: \"YVYU\", 301: \"YUY2\", 302: \"UYVY\", 303: \"HDYC (Unsupported)\", 400: \"MJPEG\", 401: \"H264\" }\n for cam in info:\n if args.list_dcaps == -1:\n type = \"\"\n if cam['type'] == \"Blackmagic\":\n type = \"Blackmagic: \"\n print(f\"{cam['index']}: {type}{cam['name']}\")\n if args.list_dcaps != -1 and args.list_dcaps != cam['index']:\n continue\n for caps in cam['caps']:\n format = caps['format']\n if caps['format'] in formats:\n format = formats[caps['format']]\n if caps['minCX'] == caps['maxCX'] and caps['minCY'] == caps['maxCY']:\n print(f\" {caps['id']}: Resolution: {caps['minCX']}x{caps['minCY']} FPS: {unit/caps['maxInterval']:.3f}-{unit/caps['minInterval']:.3f} Format: {format}\")\n else:\n print(f\" {caps['id']}: Resolution: {caps['minCX']}x{caps['minCY']}-{caps['maxCX']}x{caps['maxCY']} FPS: {unit/caps['maxInterval']:.3f}-{unit/caps['minInterval']:.3f} Format: {format}\")\n else:\n if args.list_cameras == 1:\n print(\"Available cameras:\")\n for cam in info:\n type = \"\"\n if cam['type'] == \"Blackmagic\":\n type = \"Blackmagic: \"\n if args.list_cameras == 1:\n print(f\"{cam['index']}: {type}{cam['name']}\")\n else:\n print(f\"{type}{cam['name']}\")\n cap.destroy_capture()\n sys.exit(0)\n\nimport numpy as np\nimport time\nimport cv2\nimport socket\nimport struct\nimport json\nfrom input_reader import InputReader, VideoReader, DShowCaptureReader, try_int\nfrom tracker import Tracker, get_model_base_path\n\nif args.benchmark > 0:\n model_base_path = get_model_base_path(args.model_dir)\n im = cv2.imread(os.path.join(model_base_path, \"benchmark.bin\"), cv2.IMREAD_COLOR)\n results = []\n for model_type in [3, 2, 1, 0, -1, -2, -3]:\n tracker = Tracker(224, 224, threshold=0.1, max_threads=args.max_threads, max_faces=1, discard_after=0, scan_every=0, silent=True, model_type=model_type, model_dir=args.model_dir, no_gaze=(model_type == -1), detection_threshold=0.1, use_retinaface=0, max_feature_updates=900, static_model=True if args.no_3d_adapt == 1 else False)\n tracker.detected = 1\n tracker.faces = [(0, 0, 224, 224)]\n total = 0.0\n for i in range(100):\n start = time.perf_counter()\n r = tracker.predict(im)\n total += time.perf_counter() - start\n print(1. / (total / 100.))\n sys.exit(0)\n\ntarget_ip = args.ip\ntarget_port = args.port\n\nif args.faces >= 40:\n print(\"Transmission of tracking data over network is not supported with 40 or more faces.\")\n\nfps = 0\ndcap = None\nuse_dshowcapture_flag = False\nif os.name == 'nt':\n fps = args.fps\n dcap = args.dcap\n use_dshowcapture_flag = True if args.use_dshowcapture == 1 else False\n input_reader = InputReader(args.capture, args.raw_rgb, args.width, args.height, fps, use_dshowcapture=use_dshowcapture_flag, dcap=dcap)\n if args.dcap == -1 and type(input_reader) == DShowCaptureReader:\n fps = min(fps, input_reader.device.get_fps())\nelse:\n input_reader = InputReader(args.capture, args.raw_rgb, args.width, args.height, fps, use_dshowcapture=use_dshowcapture_flag)\nif type(input_reader.reader) == VideoReader:\n fps = 0\n\nlog = None\nout = None\nfirst = True\nheight = 0\nwidth = 0\ntracker = None\nsock = None\ntotal_tracking_time = 0.0\ntracking_time = 0.0\ntracking_frames = 0\nframe_count = 0\n\nfeatures = [\"eye_l\", \"eye_r\", \"eyebrow_steepness_l\", \"eyebrow_updown_l\", \"eyebrow_quirk_l\", \"eyebrow_steepness_r\", \"eyebrow_updown_r\", \"eyebrow_quirk_r\", \"mouth_corner_updown_l\", \"mouth_corner_inout_l\", \"mouth_corner_updown_r\", \"mouth_corner_inout_r\", \"mouth_open\", \"mouth_wide\"]\n\nif args.log_data != \"\":\n log = open(args.log_data, \"w\")\n log.write(\"Frame,Time,Width,Height,FPS,Face,FaceID,RightOpen,LeftOpen,AverageConfidence,Success3D,PnPError,RotationQuat.X,RotationQuat.Y,RotationQuat.Z,RotationQuat.W,Euler.X,Euler.Y,Euler.Z,RVec.X,RVec.Y,RVec.Z,TVec.X,TVec.Y,TVec.Z\")\n for i in range(66):\n log.write(f\",Landmark[{i}].X,Landmark[{i}].Y,Landmark[{i}].Confidence\")\n for i in range(66):\n log.write(f\",Point3D[{i}].X,Point3D[{i}].Y,Point3D[{i}].Z\")\n for feature in features:\n log.write(f\",{feature}\")\n log.write(\"\\r\\n\")\n log.flush()\n\nis_camera = args.capture == str(try_int(args.capture))\n\ntry:\n attempt = 0\n frame_time = time.perf_counter()\n target_duration = 0\n if fps > 0:\n target_duration = 1. / float(fps)\n repeat = args.repeat_video != 0 and type(input_reader.reader) == VideoReader\n need_reinit = 0\n failures = 0\n source_name = input_reader.name\n while repeat or input_reader.is_open():\n if not input_reader.is_open() or need_reinit == 1:\n input_reader = InputReader(args.capture, args.raw_rgb, args.width, args.height, fps, use_dshowcapture=use_dshowcapture_flag, dcap=dcap)\n if input_reader.name != source_name:\n print(f\"Failed to reinitialize camera and got {input_reader.name} instead of {source_name}.\")\n sys.exit(1)\n need_reinit = 2\n time.sleep(0.02)\n continue\n if not input_reader.is_ready():\n time.sleep(0.02)\n continue\n\n ret, frame = input_reader.read()\n if ret and args.mirror_input:\n frame = cv2.flip(frame, 1)\n if not ret:\n if repeat:\n if need_reinit == 0:\n need_reinit = 1\n continue\n elif is_camera:\n attempt += 1\n if attempt > 30:\n break\n else:\n time.sleep(0.02)\n if attempt == 3:\n need_reinit = 1\n continue\n else:\n break;\n\n attempt = 0\n need_reinit = 0\n frame_count += 1\n now = time.time()\n\n if first:\n first = False\n height, width, channels = frame.shape\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n tracker = Tracker(width, height, threshold=args.threshold, max_threads=args.max_threads, max_faces=args.faces, discard_after=args.discard_after, scan_every=args.scan_every, silent=False if args.silent == 0 else True, model_type=args.model, model_dir=args.model_dir, no_gaze=False if args.gaze_tracking != 0 and args.model != -1 else True, detection_threshold=args.detection_threshold, use_retinaface=args.scan_retinaface, max_feature_updates=args.max_feature_updates, static_model=True if args.no_3d_adapt == 1 else False, try_hard=args.try_hard == 1)\n if not args.video_out is None:\n out = cv2.VideoWriter(args.video_out, cv2.VideoWriter_fourcc('F','F','V','1'), args.video_fps, (width * args.video_scale, height * args.video_scale))\n\n try:\n inference_start = time.perf_counter()\n faces = tracker.predict(frame)\n if len(faces) > 0:\n inference_time = (time.perf_counter() - inference_start)\n total_tracking_time += inference_time\n tracking_time += inference_time / len(faces)\n tracking_frames += 1\n packet = bytearray()\n detected = False\n for face_num, f in enumerate(faces):\n f = copy.copy(f)\n f.id += args.face_id_offset\n if f.eye_blink is None:\n f.eye_blink = [1, 1]\n right_state = \"O\" if f.eye_blink[0] > 0.30 else \"-\"\n left_state = \"O\" if f.eye_blink[1] > 0.30 else \"-\"\n if args.silent == 0:\n print(f\"Confidence[{f.id}]: {f.conf:.4f} / 3D fitting error: {f.pnp_error:.4f} / Eyes: {left_state}, {right_state}\")\n detected = True\n if not f.success:\n pts_3d = np.zeros((70, 3), np.float32)\n packet.extend(bytearray(struct.pack(\"d\", now)))\n packet.extend(bytearray(struct.pack(\"i\", f.id)))\n packet.extend(bytearray(struct.pack(\"f\", width)))\n packet.extend(bytearray(struct.pack(\"f\", height)))\n packet.extend(bytearray(struct.pack(\"f\", f.eye_blink[0])))\n packet.extend(bytearray(struct.pack(\"f\", f.eye_blink[1])))\n packet.extend(bytearray(struct.pack(\"B\", 1 if f.success else 0)))\n packet.extend(bytearray(struct.pack(\"f\", f.pnp_error)))\n packet.extend(bytearray(struct.pack(\"f\", f.quaternion[0])))\n packet.extend(bytearray(struct.pack(\"f\", f.quaternion[1])))\n packet.extend(bytearray(struct.pack(\"f\", f.quaternion[2])))\n packet.extend(bytearray(struct.pack(\"f\", f.quaternion[3])))\n packet.extend(bytearray(struct.pack(\"f\", f.euler[0])))\n packet.extend(bytearray(struct.pack(\"f\", f.euler[1])))\n packet.extend(bytearray(struct.pack(\"f\", f.euler[2])))\n packet.extend(bytearray(struct.pack(\"f\", f.translation[0])))\n packet.extend(bytearray(struct.pack(\"f\", f.translation[1])))\n packet.extend(bytearray(struct.pack(\"f\", f.translation[2])))\n if not log is None:\n log.write(f\"{frame_count},{now},{width},{height},{args.fps},{face_num},{f.id},{f.eye_blink[0]},{f.eye_blink[1]},{f.conf},{f.success},{f.pnp_error},{f.quaternion[0]},{f.quaternion[1]},{f.quaternion[2]},{f.quaternion[3]},{f.euler[0]},{f.euler[1]},{f.euler[2]},{f.rotation[0]},{f.rotation[1]},{f.rotation[2]},{f.translation[0]},{f.translation[1]},{f.translation[2]}\")\n for (x,y,c) in f.lms:\n packet.extend(bytearray(struct.pack(\"f\", c)))\n if args.visualize > 1:\n frame = cv2.putText(frame, str(f.id), (int(f.bbox[0]), int(f.bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,0,255))\n if args.visualize > 2:\n frame = cv2.putText(frame, f\"{f.conf:.4f}\", (int(f.bbox[0] + 18), int(f.bbox[1] - 6)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255))\n for pt_num, (x,y,c) in enumerate(f.lms):\n packet.extend(bytearray(struct.pack(\"f\", y)))\n packet.extend(bytearray(struct.pack(\"f\", x)))\n if not log is None:\n log.write(f\",{y},{x},{c}\")\n if pt_num == 66 and (f.eye_blink[0] < 0.30 or c < 0.30):\n continue\n if pt_num == 67 and (f.eye_blink[1] < 0.30 or c < 0.30):\n continue\n x = int(x + 0.5)\n y = int(y + 0.5)\n if args.visualize != 0 or not out is None:\n if args.visualize > 3:\n frame = cv2.putText(frame, str(pt_num), (int(y), int(x)), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (255,255,0))\n color = (0, 255, 0)\n if pt_num >= 66:\n color = (255, 255, 0)\n if not (x < 0 or y < 0 or x >= height or y >= width):\n frame[int(x), int(y)] = color\n x += 1\n if not (x < 0 or y < 0 or x >= height or y >= width):\n frame[int(x), int(y)] = color\n y += 1\n if not (x < 0 or y < 0 or x >= height or y >= width):\n frame[int(x), int(y)] = color\n x -= 1\n if not (x < 0 or y < 0 or x >= height or y >= width):\n frame[int(x), int(y)] = color\n if args.pnp_points != 0 and (args.visualize != 0 or not out is None) and f.rotation is not None:\n if args.pnp_points > 1:\n projected = cv2.projectPoints(f.face_3d[0:66], f.rotation, f.translation, tracker.camera, tracker.dist_coeffs)\n else:\n projected = cv2.projectPoints(f.contour, f.rotation, f.translation, tracker.camera, tracker.dist_coeffs)\n for [(x,y)] in projected[0]:\n x = int(x + 0.5)\n y = int(y + 0.5)\n if not (x < 0 or y < 0 or x >= height or y >= width):\n frame[int(x), int(y)] = (0, 255, 255)\n x += 1\n if not (x < 0 or y < 0 or x >= height or y >= width):\n frame[int(x), int(y)] = (0, 255, 255)\n y += 1\n if not (x < 0 or y < 0 or x >= height or y >= width):\n frame[int(x), int(y)] = (0, 255, 255)\n x -= 1\n if not (x < 0 or y < 0 or x >= height or y >= width):\n frame[int(x), int(y)] = (0, 255, 255)\n for (x,y,z) in f.pts_3d:\n packet.extend(bytearray(struct.pack(\"f\", x)))\n packet.extend(bytearray(struct.pack(\"f\", -y)))\n packet.extend(bytearray(struct.pack(\"f\", -z)))\n if not log is None:\n log.write(f\",{x},{-y},{-z}\")\n if f.current_features is None:\n f.current_features = {}\n for feature in features:\n if not feature in f.current_features:\n f.current_features[feature] = 0\n packet.extend(bytearray(struct.pack(\"f\", f.current_features[feature])))\n if not log is None:\n log.write(f\",{f.current_features[feature]}\")\n if not log is None:\n log.write(\"\\r\\n\")\n log.flush()\n\n if detected and len(faces) < 40:\n sock.sendto(packet, (target_ip, target_port))\n\n if not out is None:\n video_frame = frame\n if args.video_scale != 1:\n video_frame = cv2.resize(frame, (width * args.video_scale, height * args.video_scale), interpolation=cv2.INTER_NEAREST)\n out.write(video_frame)\n if args.video_scale != 1:\n del video_frame\n\n if args.visualize != 0:\n cv2.imshow('OpenSeeFace Visualization', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n if args.dump_points != \"\" and not faces is None and len(faces) > 0:\n np.set_printoptions(threshold=sys.maxsize, precision=15)\n pairs = [\n (0, 16),\n (1, 15),\n (2, 14),\n (3, 13),\n (4, 12),\n (5, 11),\n (6, 10),\n (7, 9),\n (17, 26),\n (18, 25),\n (19, 24),\n (20, 23),\n (21, 22),\n (31, 35),\n (32, 34),\n (36, 45),\n (37, 44),\n (38, 43),\n (39, 42),\n (40, 47),\n (41, 46),\n (48, 52),\n (49, 51),\n (56, 54),\n (57, 53),\n (58, 62),\n (59, 61),\n (65, 63)\n ]\n points = copy.copy(faces[0].face_3d)\n for a, b in pairs:\n x = (points[a, 0] - points[b, 0]) / 2.0\n y = (points[a, 1] + points[b, 1]) / 2.0\n z = (points[a, 2] + points[b, 2]) / 2.0\n points[a, 0] = x\n points[b, 0] = -x\n points[[a, b], 1] = y\n points[[a, b], 2] = z\n points[[8, 27, 28, 29, 33, 50, 55, 60, 64], 0] = 0.0\n points[30, :] = 0.0\n with open(args.dump_points, \"w\") as fh:\n fh.write(repr(points))\n break\n failures = 0\n except Exception as e:\n if e.__class__ == KeyboardInterrupt:\n if args.silent == 0:\n print(\"Quitting\")\n break\n traceback.print_exc()\n failures += 1\n if failures > 30:\n break\n\n collected = False\n del frame\n\n duration = time.perf_counter() - frame_time\n while duration < target_duration:\n if not collected:\n gc.collect()\n collected = True\n duration = time.perf_counter() - frame_time\n sleep_time = target_duration - duration\n if sleep_time > 0:\n time.sleep(sleep_time)\n duration = time.perf_counter() - frame_time\n frame_time = time.perf_counter()\nexcept KeyboardInterrupt:\n if args.silent == 0:\n print(\"Quitting\")\n\ninput_reader.close()\nif not out is None:\n out.release()\ncv2.destroyAllWindows()\n\nif args.silent == 0 and tracking_frames > 0:\n average_tracking_time = 1000 * tracking_time / tracking_frames\n print(f\"Average tracking time per detected face: {average_tracking_time:.2f} ms\")\n print(f\"Tracking time: {total_tracking_time:.3f} s\\nFrames: {tracking_frames}\")\n" ]
[ [ "numpy.set_printoptions", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
petavazohi/PyChemia
[ "e779389418771c25c830aed360773c63bb069372", "e779389418771c25c830aed360773c63bb069372", "e779389418771c25c830aed360773c63bb069372" ]
[ "scripts/Report_Orbital.py", "pychemia/utils/netcdf.py", "pychemia/core/composition.py" ]
[ "#!/usr/bin/env python\n\nimport sys\nimport numpy as np\nimport pychemia\nfrom pychemia.visual.searcher import *\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib.pyplot as plt\nfrom pychemia.crystal import CrystalSymmetry\nimport matplotlib.lines as mlines\nimport matplotlib.patches as mpatches\nfrom matplotlib.collections import PatchCollection\n\n\ndef get_all_status(population):\n ret = []\n for entry in population.pcdb.entries.find({}, {'status': 1}):\n for x in entry['status']:\n if x not in ret:\n ret.append(x)\n return ret\n\n\ndef spacegroup2poly(spacegroup):\n \"\"\"\n Return the size of polygon appropiated to represent a given\n crystal system\n\n :param spacegroup:\n :return:\n \"\"\"\n if spacegroup <= 2:\n return 3\n elif spacegroup <= 15:\n return 5\n elif spacegroup <= 74:\n return 7\n elif spacegroup <= 142:\n return 9\n elif spacegroup <= 167:\n return 8\n elif spacegroup <= 194:\n return 6\n else:\n return 4\n\n\ndef label(ax, xy, spacegroup, energy):\n \"\"\"\n Write the energy and spacegroup number on the center of each\n candidate polygon\n\n :param ax:\n :param xy:\n :param spacegroup:\n :param energy:\n :return:\n \"\"\"\n y = xy[1] - 0.0 # shift y-value for label so that it's below the artist\n ax.text(xy[0], y, \"%d\" % spacegroup, ha=\"center\", family='sans-serif', size=8)\n y = xy[1] - 0.2 # shift y-value for label so that it's below the artist\n ax.text(xy[0], y, \"%.3f\" % energy, ha=\"center\", family='sans-serif', size=8)\n\n\ndef add_structure(ax, patches, position, spacegroup, energy):\n \"\"\"\n Add one polygon to the patches list\n\n :param ax:\n :param patches:\n :param position:\n :param spacegroup:\n :param energy:\n :return:\n \"\"\"\n polygon = mpatches.RegularPolygon(position, spacegroup2poly(spacegroup), 0.5, clip_on=True)\n patches.append(polygon)\n label(ax, position, spacegroup, energy)\n\n\ndef change_symbol(change):\n \"\"\"\n Return the text that inform about the conditions that create that candidate\n\n :param change:\n :return:\n \"\"\"\n\n if change == 'promoted':\n return 'P'\n elif change == 'modified':\n return 'M'\n elif change == 'replace_by_random':\n return 'RR'\n elif change == 'replace_by_other':\n return 'MV'\n elif change == 'duplicate':\n return '___'\n else:\n print(change)\n\n\ndef get_generation(searcher, tag):\n \"\"\"\n Return the identifiers of all members of a population associated with\n a given searcher tag\n\n :param searcher:\n :param tag:\n :return:\n \"\"\"\n lista = [x['_id'] for x in searcher.get_all_generations(tag) if searcher.population.is_evaluated(x['_id'])]\n return searcher.population.ids_sorted(lista)\n\n\ndef get_generation_limits(searcher, gen_size):\n inigen = 0\n fingen = 0\n i = 0\n while True:\n n = len(get_generation(searcher, i))\n print(' [Generation %d: Number candidates = %d]' % (i, n))\n if n < gen_size:\n fingen = i\n break\n i += 1\n return inigen, fingen\n\n\ndef plot_generation_chart(searcher, gen_size):\n colors = []\n patches = []\n population = searcher.population\n\n inigen, fingen = get_generation_limits(searcher, gen_size)\n\n if inigen == fingen:\n return\n\n fig, ax = plt.subplots(figsize=(gen_size, 2 * (fingen - inigen)))\n\n avg_energies = np.zeros(fingen)\n\n # Structures\n sys.stdout.write(\"Plotting Structures for generations:\")\n for j in range(inigen, fingen):\n sys.stdout.write(' %d' % j)\n thegen = get_generation(population, j)[:gen_size]\n plt.text(-0.9, -2 * j, \"Gen %d\" % j, ha=\"center\", family='sans-serif', size=12)\n energies = []\n for i in range(gen_size):\n struct, properties, status = population.pcdb.get_dicts(thegen[i])\n # print properties['spacegroup'], properties['energy_pa']\n spacegroup = properties['spacegroup']\n energy = properties['energy_pa']\n energies.append(energy)\n add_structure(ax, patches, [i, -2 * j], spacegroup, energy)\n colors.append(energy)\n avg_energies[j] = np.mean(energies[:navg_energy])\n\n collection = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.3)\n collection.set_array(np.array(colors))\n ax.add_collection(collection)\n sys.stdout.write('\\n')\n\n # Duplicates\n sys.stdout.write(\"Marking duplicates for generations:\")\n for j in range(inigen, fingen):\n sys.stdout.write(' %d' % j)\n thegen = get_generation(population, j)[:gen_size]\n dups = population.get_duplicates(thegen)\n for i in dups:\n x = [list(thegen).index(i), list(thegen).index(dups[i])]\n y = [-2 * j + 0.5, -2 * j + 0.5]\n plt.plot(x, y, 'k-')\n sys.stdout.write('\\n')\n\n # Changes\n sys.stdout.write(\"Plotting changes for generations:\")\n for j in range(inigen, fingen - 1):\n sys.stdout.write(' %d' % j)\n sys.stdout.flush()\n next_generation = get_generation(population, j + 1)\n thegen = get_generation(population, j)[:gen_size]\n for i in range(gen_size):\n # add a line\n for entry in population.pcdb.db.generation_changes.find({'from': thegen[i]}):\n csymbol = change_symbol(entry['change'])\n if csymbol is None:\n print(entry)\n plt.text(i, -2 * j - 0.6, csymbol, ha=\"center\", family='sans-serif', size=6)\n if 'to' in entry and entry['to'] in next_generation[:gen_size]:\n next_id = list(next_generation).index(entry['to'])\n x, y = np.array([[i, next_id], [-2 * j - 0.7, -2 * j - 1.3]])\n line = mlines.Line2D(x, y, lw=3., alpha=0.3)\n ax.add_line(line)\n elif entry['change'] == 'promoted' and entry['from'] in next_generation[:gen_size]:\n next_id = list(next_generation[:gen_size]).index(entry['from'])\n x, y = np.array([[i, next_id], [-2 * j - 0.7, -2 * j - 1.3]])\n line = mlines.Line2D(x, y, lw=3., alpha=0.3)\n ax.add_line(line)\n sys.stdout.write('\\n')\n\n plt.title('%s (tag=%s)' % (population.name, population.tag))\n plt.text(float(gen_size) / 2, 1, '%s (tag=%s)' % (population.name, population.tag),\n ha=\"center\",\n family='sans-serif',\n size=16)\n plt.subplots_adjust(left=0, right=1, bottom=0, top=1)\n plt.axis('equal')\n plt.axis('off')\n pdf.savefig()\n plt.clf()\n plt.close()\n\n # Average energy of best structures\n print('Plot: Average energy of best structures')\n plt.figure(figsize=letterpage)\n plt.plot(range(fingen), avg_energies, 'ro')\n plt.plot(range(fingen), avg_energies, 'b--')\n plt.xlabel('Generation')\n plt.ylabel('Average energy of %d best structures' % navg_energy)\n plt.title('%s (tag=%s)' % (population.name, population.tag))\n plt.xticks(range(fingen))\n pdf.savefig()\n plt.close()\n\n\ndef plot_evolution_circular(searcher, target_function='energy_pa', tag='spacegroup'):\n fig, ax = plt.subplots(figsize=(10, 10))\n ax.cla()\n plt.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0, wspace=None, hspace=None)\n ax.set_xlim((-1.1, 1.1))\n ax.set_ylim((-1.1, 1.1))\n ax.set_axis_off()\n ax.set_aspect(1.0)\n\n for gen in range(searcher.current_generation):\n radius = 1.0 / (1.0 + np.exp(-6 * float(gen + 1) / searcher.current_generation))\n radius = (float(gen + 1) / searcher.current_generation) ** 2\n entries = {}\n counter = 0\n nele = len(searcher.lineage)\n for lin in range(nele):\n entry = searcher.population.get_entry(searcher.lineage[str(lin)][gen])\n _id = entry['_id']\n entries[_id] = {}\n entries[_id][target_function] = entry['properties'][target_function]\n if tag is not None:\n if tag in entry['properties']:\n entries[_id][tag] = entry['properties'][tag]\n elif tag in entry['structure']:\n entries[_id][tag] = entry['structure'][tag]\n elif tag == 'spacegroup':\n st = searcher.population.get_structure(_id)\n ss = CrystalSymmetry(st)\n entries[_id][tag] = ss.number(1E-3)\n else:\n raise ValueError('Tag not found')\n entries[_id]['x'] = np.cos(counter * 2 * np.pi / nele)\n entries[_id]['y'] = np.sin(counter * 2 * np.pi / nele)\n counter += 1\n\n ids = [i for i in entries]\n xx = np.array([entries[i]['x'] for i in ids])\n yy = np.array([entries[i]['y'] for i in ids])\n aa = np.array([4E3 for i in ids])\n cc = np.array([entries[i][target_function] for i in ids])\n best_candidate = entries[ids[cc.argsort()[0]]]\n plt.scatter(radius * xx, radius * yy, s=radius ** 2 * aa, c=cc, alpha=0.5 * radius, edgecolors='none')\n if tag is not None:\n for i in entries:\n if i == best_candidate:\n ax.text(radius * entries[i]['x'], radius * entries[i]['y'], str(entries[i][tag]),\n va='center',\n ha='center', size=int(radius * 24), color='red')\n else:\n ax.text(radius * entries[i]['x'], radius * entries[i]['y'], str(entries[i][tag]),\n va='center',\n ha='center', size=int(radius * 14))\n plt.savefig(searcher.population.name + '_circular.png')\n\n\ngen_size = 80\nletterpage = (11, 8.5)\n\ndbsettings = {'host': 'mongo01.systems.wvu.edu', 'name': 'OrbitalGAF', 'user': 'gufranco', 'passwd': 'zxcvbnm'}\npcdb = pychemia.db.get_database(dbsettings)\npopu = pychemia.population.orbitaldftu.OrbitalDFTU(pcdb)\nsearcher = pychemia.searcher.FireFly(popu, generation_size=80)\nsearcher.recover()\n\nif __name__ == '__main__':\n pdf = PdfPages(name + 'multipage.pdf')\n\n print(popu)\n\n colors = ['r', 'g', 'm', 'c', 'y', 'k']\n color_tags = {}\n for i in tags:\n if len(colors) == 0:\n colors = ['r', 'g', 'm', 'c', 'y', 'k']\n color_tags[i] = colors.pop(0)\n\n plot_generation_chart(searcher, gen_size)\n\n sys.exit(1)\n\n # Change of energy vs distance\n print('Plot: Change of energy vs distance')\n ret = {}\n color = {'replace_by_other': 'r', 'replace_by_random': 'b'}\n plt.figure(figsize=letterpage)\n for change in ['replace_by_other', 'replace_by_random']:\n print('Searching for: ', change)\n ret[change] = []\n for entry in popu.pcdb.db.generation_changes.find({'change': change}):\n id_from = entry['from']\n id_to = entry['to']\n # print \"%s --> %s\" % (id_from, id_to)\n x = popu.distance(id_from, id_to)\n if popu.is_evaluated(id_from) and popu.is_evaluated(id_to):\n values = popu.get_values([id_from, id_to])\n delta_energy = values[id_to] - values[id_from]\n y = delta_energy\n ret[change].append([x, y])\n ret[change] = np.array(ret[change])\n if len(ret[change]) > 0:\n plt.plot(ret[change][:, 0], ret[change][:, 1], color[change] + 'o', label=change)\n\n plt.xlim(min(ret[change][:, 0]) - 0.01, 0.01 + max(ret[change][:, 0]))\n plt.xlabel('Distance between structures (after relaxation)')\n plt.ylabel('Energy change')\n plt.title('Change of energy due to movement of structures')\n plt.legend()\n pdf.savefig()\n plt.close()\n\n # Best energies vs Generations\n miny = min(known_structures['energy_pa'])\n maxy = max(known_structures['energy_pa'])\n rangey = (miny - 0.1 * (maxy - miny), maxy + 0.1 * (maxy - miny))\n\n plt.figure(figsize=letterpage)\n\n maxx = 0\n for itag in tags:\n\n pcdb = pychemia.db.PyChemiaDB(name, host=host, user=user, passwd=passwd, ssl=ssl)\n popu = pychemia.population.RelaxStructures(pcdb, tag=itag)\n\n inigen = fingen = 0\n inigen, fingen = get_generation_limits(popu, gen_size)\n\n if fingen > maxx:\n maxx = fingen\n\n # All energies vs Generations\n sys.stdout.write(\"Plot: All energies vs Generations:\")\n for j in range(inigen, fingen):\n sys.stdout.write(' %d' % j)\n thegen = get_generation(popu, j)\n energies = []\n for i in range(len(thegen)):\n struct, properties, status = popu.pcdb.get_dicts(thegen[i])\n energy = properties['energy_pa']\n energies.append(energy)\n if len(energies) > 0:\n for i in range(len(energies)):\n plt.plot([j, j + 1 - 0.1], [energies[i], energies[i]], color_tags[itag] + '-')\n plt.text(j + 0.9 * float(i) / len(energies), energies[i], str(i),\n ha=\"center\", family='sans-serif', size=6)\n sys.stdout.write('\\n')\n\n for i in range(number_known):\n energy_pa = known_structures['energy_pa'][i]\n spacegroup = known_structures['spacegroup'][i]\n plt.plot([inigen, maxx], energy_pa * np.ones(2), 'b--')\n plt.text(fingen, energy_pa, str(spacegroup), ha=\"left\", family='sans-serif', size=6, color='b')\n\n plt.xticks(range(maxx))\n plt.ylabel('Energy per atom [eV]')\n plt.xlabel('Generations')\n plt.title('Energies')\n pdf.savefig()\n plt.close()\n\n plt.figure(figsize=letterpage)\n maxx = 0\n for itag in tags:\n\n pcdb = pychemia.db.PyChemiaDB(name, host=host, user=user, passwd=passwd, ssl=ssl)\n popu = pychemia.population.RelaxStructures(pcdb, tag=itag)\n\n inigen, fingen = get_generation_limits(popu, gen_size)\n if fingen > maxx:\n maxx = fingen\n\n sys.stdout.write(\"Plot: Best energies vs Generations:\")\n best = 16\n for j in range(inigen, fingen):\n sys.stdout.write(' %d' % j)\n thegen = get_generation(popu, j)[:best]\n energies = []\n spacegroups = []\n for i in range(len(thegen)):\n struct, properties, status = popu.pcdb.get_dicts(thegen[i])\n energies.append(properties['energy_pa'])\n spacegroups.append(properties['spacegroup'])\n if len(energies) > 0:\n for i in range(len(energies)):\n if rangey[0] < energies[i] < rangey[1]:\n plt.plot([j, j + 1 - 0.1], [energies[i], energies[i]], color_tags[itag] + '-')\n plt.text(j + 0.9 * float(i) / len(energies), energies[i], str(i),\n ha=\"center\", family='sans-serif', size=6, color='0.9')\n plt.text(j + 0.9, energies[i], str(spacegroups[i]),\n ha=\"center\", family='sans-serif', size=6, color=color_tags[itag])\n sys.stdout.write('\\n')\n\n for i in range(number_known):\n energy_pa = known_structures['energy_pa'][i]\n spacegroup = known_structures['spacegroup'][i]\n plt.plot([inigen, maxx], energy_pa * np.ones(2), 'b--')\n plt.text(maxx, energy_pa, str(spacegroup), ha=\"left\", family='sans-serif', size=6, color='b')\n\n plt.ylim(*rangey)\n plt.xticks(range(maxx))\n plt.ylabel('Energy per atom [eV]')\n plt.xlabel('Generations')\n plt.title('Energies')\n pdf.savefig()\n\n plt.figure(figsize=letterpage)\n\n for i in range(number_known):\n energy_pa = known_structures['energy_pa'][i]\n spacegroup = known_structures['spacegroup'][i]\n plt.plot([0, 1], energy_pa * np.ones(2), 'b--')\n plt.text(0.5, energy_pa, str(spacegroup), ha=\"left\", family='sans-serif', size=6, color='b')\n\n index = 1\n for itag in tags:\n\n pcdb = pychemia.db.PyChemiaDB(name, host=host, user=user, passwd=passwd, ssl=ssl)\n popu = pychemia.population.RelaxStructures(pcdb, tag=itag)\n pre_energy = 1E10\n\n for entry_id in popu.ids_sorted(popu.evaluated):\n entry = popu.get_entry(entry_id)\n\n if itag in entry['status']:\n energy_pa = entry['properties']['energy_pa']\n spacegroup = entry['properties']['spacegroup']\n if np.abs(pre_energy - energy_pa) > 1E-3 and rangey[0] < energy_pa < rangey[1]:\n plt.plot([index, index + 1], energy_pa * np.ones(2), color_tags[itag] + '--')\n plt.text(index + 0.5, energy_pa, str(spacegroup),\n ha=\"left\",\n family='sans-serif',\n size=6,\n color=color_tags[itag])\n pre_energy = energy_pa\n\n index += 1\n\n plt.ylim(*rangey)\n plt.xticks(0.5 + np.arange(len(tags) + 1), ['CIFs'] + tags)\n plt.ylabel('Energy per atom [eV]')\n plt.xlabel('Structures found')\n plt.title('Energies')\n pdf.savefig()\n\n plt.figure(figsize=letterpage)\n\n for i in range(number_known):\n energy_pa = known_structures['energy_pa'][i]\n spacegroup = known_structures['spacegroup'][i]\n plt.plot([0, 1], energy_pa * np.ones(2), 'b-')\n # plt.text(0.5, energy_pa,str(spacegroup), ha=\"left\", family='sans-serif', size=6, color='b')\n\n pcdb = pychemia.db.PyChemiaDB(name, host=host, user=user, passwd=passwd, ssl=ssl)\n popu = pychemia.population.RelaxStructures(pcdb, distance_tol=0.4, value_tol=0.1)\n pre_energy = 1E10\n\n selection = popu.evaluated\n n = len(selection)\n while True:\n selection = popu.cleaned_from_duplicates(selection)\n if len(selection) == n:\n break\n n = len(selection)\n print('Selection', n)\n\n for entry_id in popu.ids_sorted(selection):\n entry = popu.get_entry(entry_id)\n energy_pa = entry['properties']['energy_pa']\n spacegroup = entry['properties']['spacegroup']\n if np.abs(pre_energy - energy_pa) > 1E-3 and rangey[0] < energy_pa < rangey[1]:\n plt.plot([1, 2], energy_pa * np.ones(2), 'r-')\n # plt.text(1.5, energy_pa,str(spacegroup), ha=\"left\", family='sans-serif', size=6, color='r')\n pre_energy = energy_pa\n\n plt.ylim(*rangey)\n plt.xticks(0.5 + np.arange(2), ['Known Structures', 'New predicted'])\n plt.ylabel('Energy per atom [eV]')\n plt.xlabel('Structures found')\n plt.title('Energies')\n pdf.savefig()\n\n pdf.close()\n", "import os\nimport numpy as np\nfrom scipy.io.netcdf import netcdf_file\n\n\ndef file2dict(filename):\n\n if not os.path.isfile(filename):\n raise ValueError(\"ERROR: Could not read %s\" % filename)\n\n nc = netcdf_file(filename, 'r', mmap=False)\n ret = {}\n for ikey in nc.variables.keys():\n data = nc.variables[ikey].data\n if type(data[0]) == np.float64:\n if len(data) == 1:\n data = float(data[0])\n else:\n data = [float(x) for x in data]\n elif type(data[0]) == np.int32:\n if len(data) == 1:\n data = int(data[0])\n else:\n data = [int(x) for x in data]\n else:\n data = list(data)\n\n ret[ikey] = data\n del data\n\n nc.close()\n return ret\n\n\ndef netcdf2dict(filename):\n \"\"\"\n Read a NetCDF file and create a python dictionary with\n numbers or lists for each variable\n\n Args:\n filename:\n NetCDF filename\n \"\"\"\n if not os.path.isfile(filename):\n print('ERROR: No such file: ', filename)\n return None\n ret = {}\n netcdfile = netcdf_file(filename, 'r', mmap=False)\n for ii in netcdfile.variables.keys():\n ret[ii] = netcdfile.variables[ii][:]\n netcdfile.close()\n\n for i in ret:\n if ret[i].dtype == np.dtype('>f8'):\n ret[i] = [round(x, 11) for x in ret[i].flatten()]\n elif ret[i].dtype == np.dtype('>i4'):\n ret[i] = [int(x) for x in ret[i].flatten()]\n\n for i in ret:\n if len(ret[i]) == 1:\n ret[i] = ret[i][0]\n\n return ret\n", "\"\"\"\nChemical composition is just the description of the amount of atoms of each specie. In the case of clusters or\nmolecules, ie a finite structure, it represents the complete set of atoms. For periodic structures it represents\nthe species present on a cell.\n\"\"\"\n\nimport re\nfrom numpy import array, argsort\nfrom math import gcd as _gcd\nfrom math import pi\nfrom pychemia.utils.periodic import atomic_symbols, electronegativity, atomic_number, covalent_radius\nfrom pychemia.utils.computing import deep_unicode\nfrom functools import reduce\nfrom collections.abc import Mapping\n\n\nclass Composition(Mapping):\n \"\"\"\n A Composition is basically a mapping between a number of species and a integer indicating how many atoms of that\n specie are present in the structure.\n A composition object do not contain geometrical information or bonding.\n\n The main purpose of this class is to be able to parse formulas into compositions and return string formulas sorted\n in various ways.\n \"\"\"\n\n def __init__(self, value=None):\n \"\"\"\n Creates a new composition, currently only absolute formulas are supported.\n\n :param value: (str, dict) The input argument could be a string with a chemical formula or the actual dictionary\n of species and values. The order of species is not guaranteed to be preserved. A iterable of atomic symbols\n is also accepted to build a composition object.\n\n :rtype: Composition\n\n >>> cp = Composition({'Ba': 2, 'Cu': 3, 'O': 7, 'Y': 1})\n >>> cp.formula\n 'Ba2Cu3O7Y'\n >>> cp = Composition('Ba2Cu3O7Y')\n >>> cp2 = Composition(cp)\n >>> len(cp2)\n 4\n >>> cp.nspecies\n 4\n >>> cp = Composition(['O', 'H', 'O'])\n >>> len(cp)\n 2\n >>> cp['O']\n 2\n\n \"\"\"\n # The internal dictionary where atom species and numbers of atoms of each specie are stored.\n self._composition = {}\n # Convert strings and dictionaries into unicode\n if value is not None:\n value = deep_unicode(value)\n # Case 1: The input is a formula\n if isinstance(value, str):\n self._set_composition(self.formula_parser(value))\n # Case 2: The input is a dictionary\n elif isinstance(value, dict):\n self._set_composition(value)\n # Case 3: The input is another composition object\n elif isinstance(value, Composition):\n self._set_composition(value.composition)\n # Case 4: The input is an iterable of atomic symbols\n elif hasattr(value, \"__len__\"):\n dvalue = {}\n for i in value:\n if i in dvalue:\n dvalue[i] += 1\n else:\n dvalue[i] = 1\n self._set_composition(dvalue)\n else:\n self._composition = {}\n\n def __len__(self):\n return len(self._composition)\n\n def __getitem__(self, specie):\n \"\"\"\n Returns the number of atoms of a given specie\n\n :param specie: Atomic Symbol for which the value will be returned\n :return: number of atoms of the given specie\n :rtype: int\n\n >>> comp = Composition('H2')\n >>> comp['H']\n 2\n >>> comp['He']\n 0\n \"\"\"\n if specie in self._composition:\n return self._composition[specie]\n else:\n return 0\n\n def __repr__(self):\n \"\"\"\n Evaluable representation of Composition object\n\n :return: Text representation that can be evaluated\n :rtype: str\n\n >>> cp1 = Composition('H2O')\n >>> cp2 = eval(repr(cp1))\n >>> cp2 == cp1\n True\n \"\"\"\n return 'Composition(' + str(self.composition) + ')'\n\n def __str__(self):\n \"\"\"\n\n :return: String representation of the composition\n\n >>> cp = Composition('YBa2Cu3O7')\n >>> 'Cu' in str(cp)\n True\n \"\"\"\n ret = ''\n for i in self.species:\n ret += \" %3s: %4d \" % (i, self.composition[i])\n return ret\n\n def __iter__(self):\n return iter(self.composition)\n\n def __contains__(self, specie):\n \"\"\"True if 'specie' is present in composition\n\n :return: True if specie is present\n :param specie: atomic specie\n :rtype: bool\n\n >>> cp = Composition('H2O')\n >>> 'He' in cp\n False\n \"\"\"\n return specie in self._composition\n\n def _set_composition(self, value):\n \"\"\"\n Checks the values of a dictionary before setting the actual composition\n\n :param value: (dict)\n :rtype: None\n \"\"\"\n for i in value:\n assert (i in atomic_symbols)\n assert (isinstance(value[i], int))\n self._composition = value.copy()\n\n @property\n def composition(self):\n \"\"\"Dictionary with composition\n\n :return: The composition dictionary\n :rtype: dict\n\n >>> import pprint\n >>> cp = Composition('H2O')\n >>> pprint.pprint(cp.composition)\n {'H': 2, 'O': 1}\n\n \"\"\"\n return self._composition\n\n def covalent_volume(self, packing='cubes'):\n \"\"\"\n :param packing: The kind of packing could be 'cubes' or 'spheres'\n :type packing: str\n :return: The volume occupied by a given formula assuming a 'cubes' packing or 'spheres' packing\n :rtype: (float)\n\n >>> cp = Composition('C5H10')\n >>> cp.covalent_volume()\n 19.942320000000002\n >>> cp.covalent_volume(packing='spheres')\n 10.441774334589468\n \"\"\"\n if packing == 'cubes':\n factor = 8\n elif packing == 'spheres':\n factor = 4 * pi / 3.0\n else:\n raise ValueError('Non-valid packing: \"%s\"' % packing)\n\n # find volume of unit cell by adding cubes\n volume = 0.0\n for specie in self:\n number_atoms_specie = self.composition[specie]\n # Pack each atom in a cube (2*r)^3\n volume += factor * number_atoms_specie * covalent_radius(specie) ** 3\n return volume\n\n @property\n def formula(self):\n \"\"\"Chemical formula\n\n :return: The chemical formula with atoms sorted alphabetically\n :rtype: str\n\n >>> cp = Composition('NaCl')\n >>> cp.formula\n 'ClNa'\n\n \"\"\"\n return self.sorted_formula(sortby='alpha', reduced=True)\n\n @staticmethod\n def formula_parser(value):\n \"\"\"Return a dictionary from a chemical formula\n\n :return: Convert an string representing a chemical formula into a dictionary with the species as keys\n and values as the number of atoms of that specie\n :param value: (str) Chemical formula\n :rtype: dict\n\n >>> import pprint\n >>> Composition.formula_parser('Au20')\n {'Au': 20}\n >>> ret = Composition.formula_parser('UutUupUusUuo')\n >>> pprint.pprint(ret)\n {'Uuo': 1, 'Uup': 1, 'Uus': 1, 'Uut': 1}\n \"\"\"\n ret = {}\n jump = False\n for i in range(len(value)):\n if jump > 0: # This char belongs to the current atom, move on\n jump -= 1\n elif value[i].isupper(): # Atom Name starts with Uppercase\n if i + 1 < len(value) and value[i + 1].islower(): # Atom name has more than 1 char\n if i + 2 < len(value) and value[i + 2].islower(): # Atom name has more than 2 chars\n specie = value[i:i + 3]\n jump = 2\n else:\n specie = value[i:i + 2]\n jump = 1\n else:\n specie = value[i]\n jump = 0\n j = 1\n number = ''\n while True:\n if i + jump + j < len(value) and value[i + jump + j].isdigit():\n number += value[i + jump + j]\n j += 1\n else:\n break\n if number == '':\n ret[specie] = 1\n else:\n ret[specie] = int(number)\n return ret\n\n @staticmethod\n def formula_to_list(formula, nunits=1):\n \"\"\"\n Reads a formula and returns a list of atomic symbols consistent with the formula and the number of\n formulas given by nunits\n\n :param formula: (str) Chemical formula as string\n :param nunits: (int) Number of formulas to apply\n :return: list of atomic symbols\n :rtype: list\n\n >>> Composition.formula_to_list('NaCl')\n ['Na', 'Cl']\n >>> flist = Composition.formula_to_list(u'Uut2Uup3Uus4Uuo5')\n >>> len(flist)\n 14\n >>> flist = Composition.formula_to_list('Uut2Uup3Uus4Uuo5', nunits=2)\n >>> len(flist)\n 28\n \"\"\"\n # decompose composition\n a = re.findall(r\"[A-Z][a-z0-9]*\", formula)\n composition = []\n for i in a:\n m = re.match(r\"([A-Za-z]+)([0-9]*)\", i)\n if m.group(2) == \"\":\n n = int(1)\n else:\n n = int(m.group(2))\n\n for j in range(n * nunits):\n composition.append(m.group(1))\n\n return composition\n\n @property\n def gcd(self):\n \"\"\" Number of minimal formulas on a given composition.\n\n :return: The number of formulas that can be extracted from a composition ie, the greatest common denominator\n for the composition.\n :rtype: int\n\n >>> cp = Composition('NaCl')\n >>> cp.gcd\n 1\n >>> cp = Composition('Na2Cl2')\n >>> cp.gcd\n 2\n >>> cp = Composition()\n >>> cp.gcd is None\n True\n \"\"\"\n if self.natom > 0:\n return reduce(_gcd, self.values)\n else:\n return None\n\n @staticmethod\n def get_species_from_hex(arg):\n \"\"\"List of species encoded for hex string produced by species_hex\n\n :return: Return a set of species from the encoded species created by the output of \"species_hex\" method.\n :param arg: str String with hexadecimal representation of list of species.\n\n >>> Composition.get_species_from_hex('0x38271d08')\n [8, 29, 39, 56]\n \"\"\"\n num = int(arg, 16)\n ret = []\n while num > 0:\n ret.append(num % 256)\n num = (num-ret[-1])//256\n return ret\n\n @property\n def natom(self):\n \"\"\"\n :return: The number of atoms in the composition\n :rtype: int\n\n >>> cp = Composition('H2O')\n >>> cp.natom\n 3\n \"\"\"\n return sum(self.values)\n\n @property\n def nspecies(self):\n \"\"\"\n :return: Number of species in the composition\n :rtype: int\n\n >>> cp = Composition('H2O')\n >>> cp.nspecies\n 2\n \"\"\"\n return len(self.species)\n\n @property\n def symbols(self):\n \"\"\"List of species on the composition\n\n :return: A list of atomic symbols\n :rtype: list\n\n >>> cp = Composition('H2O')\n >>> cp.symbols\n ['H', 'H', 'O']\n \"\"\"\n ret = []\n for specie in self:\n number_atoms_specie = self.composition[specie]\n for i in range(number_atoms_specie):\n ret.append(specie)\n return sorted(deep_unicode(ret))\n\n @property\n def species(self):\n \"\"\"List of species on the composition\n\n :return: The list of species, no particular order but atoms of the same specie are contiguous.\n :rtype: list\n\n >>> cp = Composition('H2O')\n >>> sorted(cp.species)\n ['H', 'O']\n \"\"\"\n return [deep_unicode(x) for x in self._composition]\n\n def sorted_formula(self, sortby='alpha', reduced=True):\n \"\"\"\n :return: The chemical formula. It could be sorted alphabetically using sortby='alpha', by electronegativity\n using sortby='electronegativity' or using Hill System with sortby='Hill'\n Just the first 3 letters are unambiguous and case is not taken in account so you can use 'alp', 'hil'\n or 'ele'\n :param sortby: (str) 'alpha' : Alphabetically\n 'electronegativity' : Electronegativity\n 'hill' : Hill System\n :param reduced: (bool) If the formula should be normalized\n :rtype: str\n\n .. notes: Hill exceptions have not being implemented yet\n\n >>> cp = Composition('YBa2Cu3O7')\n >>> cp.sorted_formula()\n 'Ba2Cu3O7Y'\n >>> cp.sorted_formula(sortby='hill')\n 'Ba2Cu3O7Y'\n >>> cp.sorted_formula(sortby='electroneg')\n 'Ba2YCu3O7'\n >>> cp = Composition('H10C5')\n >>> cp.sorted_formula(sortby='hill', reduced=True)\n 'CH2'\n >>> cp = Composition('IBr')\n >>> cp.sorted_formula(sortby='hill', reduced=False)\n 'BrI'\n >>> cp = Composition('Cl4C')\n >>> cp.sorted_formula(sortby='hill', reduced=False)\n 'CCl4'\n >>> cp = Composition('IH3C')\n >>> cp.sorted_formula(sortby='hill', reduced=False)\n 'CH3I'\n >>> cp = Composition('BrH5C2')\n >>> cp.sorted_formula(sortby='hill', reduced=False)\n 'C2H5Br'\n >>> cp = Composition('S04H2')\n >>> cp.sorted_formula(sortby='hill', reduced=False)\n 'H2S4'\n >>> cp = Composition('SO4H2')\n >>> cp.sorted_formula(sortby='hill', reduced=False)\n 'H2O4S'\n \"\"\"\n if reduced and self.gcd > 1:\n comp = Composition(self.composition)\n for i in comp.composition:\n comp._composition[i] //= self.gcd\n else:\n comp = self\n if sortby.lower()[:3] == 'ele':\n electroneg = list(electronegativity(comp.species))\n # Not longer needed as electronegativy will return 0 for 'None' values\n # for i in range(len(electroneg)):\n # if electroneg[i] is None:\n # electroneg[i] = -1\n sortedspecies = array(comp.species)[argsort(electroneg)]\n elif sortby.lower()[:3] == \"hil\": # FIXME: Hill system exceptions not implemented\n sortedspecies = []\n presortedspecies = sorted(comp.species)\n if 'C' in presortedspecies:\n sortedspecies.append('C')\n presortedspecies.pop(presortedspecies.index('C'))\n if 'H' in presortedspecies:\n sortedspecies.append('H')\n presortedspecies.pop(presortedspecies.index('H'))\n sortedspecies += presortedspecies\n else:\n sortedspecies = sorted(comp.species)\n ret = u''\n for specie in sortedspecies:\n ret += '%s' % specie\n if comp.composition[specie] > 1:\n ret += \"%d\" % comp.composition[specie]\n return deep_unicode(ret)\n\n def species_encoded(self, base):\n \"\"\"Encode the list of species with a number\n\n :return: Encodes the species as a number.\n :param base: Integer used as base for encoding.\n :rtype: int\n\n >>> cp = Composition('H2O')\n >>> cp.species_encoded(100)\n 801\n \"\"\"\n ret = 0\n i = 0\n for atom_number in sorted(atomic_number(self.species)):\n ret += atom_number * (base ** i)\n i += 1\n return ret\n\n def species_hex(self):\n \"\"\"Encoding in hexadecimal with 2 bytes per specie (base 256)\n\n :return: Encodes the species into a hexadecimal representation where each specie is stored on a 2-Byte slot\n ordered by atomic number.\n The output produces a unique encoding where each 2 character from the hexadecimal will encode a single\n species and the species are ordered by atomic number making the codification unique.\n :rtype: str\n\n >>> cp = Composition('YBa2Cu3O7')\n >>> cp.species_hex()\n '0x38271d08'\n \"\"\"\n enc = self.species_encoded(256)\n return hex(enc)\n\n @property\n def values(self):\n \"\"\"\n :return: The number of atoms of each specie\n :rtype: list\n\n >>> cp = Composition('YBa2Cu3O7')\n >>> sorted(cp.values)\n [1, 2, 3, 7]\n \"\"\"\n return [self._composition[x] for x in self._composition]\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.plot", "numpy.mean", "matplotlib.backends.backend_pdf.PdfPages", "numpy.arange", "numpy.sin", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.text", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "numpy.array", "matplotlib.pyplot.ylabel", "matplotlib.collections.PatchCollection", "numpy.abs", "matplotlib.pyplot.scatter", "matplotlib.lines.Line2D", "matplotlib.pyplot.subplots", "numpy.cos", "numpy.ones", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel" ], [ "scipy.io.netcdf.netcdf_file", "numpy.dtype" ], [ "numpy.argsort", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
asappresearch/compositional-inductive-bias
[ "2c67713306ec6591f397ca252f915c3edc5a794f" ]
[ "ulfs/gumbel.py" ]
[ "import torch\nfrom typing import Optional\nimport torch.nn.functional as F\n\n\ndef sample_gumbel(shape: torch.Size, eps: float = 1e-10):\n \"\"\"\n Sample from Gumbel(0, 1)\n based on\n https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,\n (MIT license)\n \"\"\"\n U = torch.rand(shape).float()\n return - torch.log(eps - torch.log(U + eps))\n\n\ndef gumbel_softmax_sample(\n logits: torch.Tensor, tau: float, eps: float = 1e-10, gumbel_noise: Optional[torch.Tensor] = None\n) -> torch.Tensor:\n \"\"\"\n Draw a sample from the Gumbel-Softmax distribution\n based on\n https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb\n (MIT license)\n \"\"\"\n if gumbel_noise is None:\n gumbel_noise = sample_gumbel(logits.size(), eps=eps)\n y = logits + gumbel_noise\n res = F.softmax(y / tau, dim=-1)\n return res\n\n\ndef gumbel_softmax(logits: torch.Tensor, tau: float, hard: bool, eps: float = 1e-10) -> torch.Tensor:\n \"\"\"\n Sample from the Gumbel-Softmax distribution and optionally discretize.\n Args:\n logits: [batch_size, n_class] unnormalized log-probs\n tau: non-negative scalar temperature\n hard: if True, take argmax, but differentiate w.r.t. soft sample y\n Returns:\n [batch_size, n_class] sample from the Gumbel-Softmax distribution.\n If hard=True, then the returned sample will be one-hot, otherwise it will\n be a probability distribution that sums to 1 across classes\n Constraints:\n - this implementation only works on batch_size x num_features tensor for now\n based on\n https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,\n (MIT license)\n \"\"\"\n shape = logits.size()\n assert len(shape) == 2\n y_soft = gumbel_softmax_sample(logits, tau=tau, eps=eps)\n if hard:\n _, k = y_soft.detach().max(-1)\n # this bit is based on\n # https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5\n y_hard = torch.FloatTensor(*shape).zero_().scatter_(-1, k.view(-1, 1), 1.0)\n # this cool bit of code achieves two things:\n # - makes the output value exactly one-hot (since we add then\n # subtract y_soft value)\n # - makes the gradient equal to y_soft gradient (since we strip\n # all other gradients)\n y = (y_hard - y_soft).detach() + y_soft\n else:\n y = y_soft\n return y\n" ]
[ [ "torch.FloatTensor", "torch.nn.functional.softmax", "torch.log", "torch.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jkariukidev/geemap
[ "51cbd3169cd077f3666173a0c4b02e70f7207870" ]
[ "geemap/toolbar.py" ]
[ "\"\"\"Module for dealing with the toolbar.\n\"\"\"\nimport os\n\nimport ee\nimport ipyevents\nimport ipyleaflet\nimport ipywidgets as widgets\nfrom ipyfilechooser import FileChooser\nfrom IPython.core.display import display\n\nfrom .common import *\nfrom .timelapse import *\n\n\ndef tool_template(m=None):\n\n widget_width = \"250px\"\n padding = \"0px 0px 0px 5px\" # upper, right, bottom, left\n\n toolbar_button = widgets.ToggleButton(\n value=False,\n tooltip=\"Toolbar\",\n icon=\"gear\",\n layout=widgets.Layout(width=\"28px\", height=\"28px\", padding=\"0px 0px 0px 4px\"),\n )\n\n close_button = widgets.ToggleButton(\n value=False,\n tooltip=\"Close the tool\",\n icon=\"times\",\n button_style=\"primary\",\n layout=widgets.Layout(height=\"28px\", width=\"28px\", padding=\"0px 0px 0px 4px\"),\n )\n\n checkbox = widgets.Checkbox(\n description=\"Checkbox\",\n indent=False,\n layout=widgets.Layout(padding=padding, width=widget_width),\n )\n\n dropdown = widgets.Dropdown(\n options=[\"Option 1\", \"Option 2\", \"Option 3\"],\n value=None,\n description=\"Dropdown:\",\n layout=widgets.Layout(width=widget_width, padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n int_slider = widgets.IntSlider(\n min=1,\n max=100,\n description=\"Int Slider: \",\n readout=False,\n continuous_update=True,\n layout=widgets.Layout(width=\"220px\", padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n int_slider_label = widgets.Label()\n widgets.jslink((int_slider, \"value\"), (int_slider_label, \"value\"))\n\n float_slider = widgets.FloatSlider(\n min=1,\n max=100,\n description=\"Float Slider: \",\n readout=False,\n continuous_update=True,\n layout=widgets.Layout(width=\"220px\", padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n float_slider_label = widgets.Label()\n widgets.jslink((float_slider, \"value\"), (float_slider_label, \"value\"))\n\n color = widgets.ColorPicker(\n concise=False,\n description=\"Color:\",\n value=\"white\",\n style={\"description_width\": \"initial\"},\n layout=widgets.Layout(width=widget_width, padding=padding),\n )\n\n text = widgets.Text(\n value=\"\",\n description=\"Textbox:\",\n placeholder=\"Placeholder\",\n style={\"description_width\": \"initial\"},\n layout=widgets.Layout(width=widget_width, padding=padding),\n )\n\n textarea = widgets.Textarea(\n placeholder=\"Placeholder\",\n layout=widgets.Layout(width=widget_width),\n )\n\n buttons = widgets.ToggleButtons(\n value=None,\n options=[\"Apply\", \"Reset\", \"Close\"],\n tooltips=[\"Apply\", \"Reset\", \"Close\"],\n button_style=\"primary\",\n )\n buttons.style.button_width = \"80px\"\n\n output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))\n\n toolbar_widget = widgets.VBox()\n toolbar_widget.children = [toolbar_button]\n toolbar_header = widgets.HBox()\n toolbar_header.children = [close_button, toolbar_button]\n toolbar_footer = widgets.VBox()\n toolbar_footer.children = [\n checkbox,\n widgets.HBox([int_slider, int_slider_label]),\n widgets.HBox([float_slider, float_slider_label]),\n dropdown,\n text,\n color,\n textarea,\n buttons,\n output,\n ]\n\n toolbar_event = ipyevents.Event(\n source=toolbar_widget, watched_events=[\"mouseenter\", \"mouseleave\"]\n )\n\n def handle_toolbar_event(event):\n\n if event[\"type\"] == \"mouseenter\":\n toolbar_widget.children = [toolbar_header, toolbar_footer]\n elif event[\"type\"] == \"mouseleave\":\n if not toolbar_button.value:\n toolbar_widget.children = [toolbar_button]\n toolbar_button.value = False\n close_button.value = False\n\n toolbar_event.on_dom_event(handle_toolbar_event)\n\n def toolbar_btn_click(change):\n if change[\"new\"]:\n close_button.value = False\n toolbar_widget.children = [toolbar_header, toolbar_footer]\n else:\n if not close_button.value:\n toolbar_widget.children = [toolbar_button]\n\n toolbar_button.observe(toolbar_btn_click, \"value\")\n\n def close_btn_click(change):\n if change[\"new\"]:\n toolbar_button.value = False\n if m is not None:\n m.toolbar_reset()\n if m.tool_control is not None and m.tool_control in m.controls:\n m.remove_control(m.tool_control)\n m.tool_control = None\n toolbar_widget.close()\n\n close_button.observe(close_btn_click, \"value\")\n\n def button_clicked(change):\n if change[\"new\"] == \"Apply\":\n with output:\n output.clear_output()\n print(\"Running ...\")\n elif change[\"new\"] == \"Reset\":\n textarea.value = \"\"\n output.clear_output()\n elif change[\"new\"] == \"Close\":\n if m is not None:\n m.toolbar_reset()\n if m.tool_control is not None and m.tool_control in m.controls:\n m.remove_control(m.tool_control)\n m.tool_control = None\n toolbar_widget.close()\n\n buttons.value = None\n\n buttons.observe(button_clicked, \"value\")\n\n toolbar_button.value = True\n if m is not None:\n toolbar_control = ipyleaflet.WidgetControl(\n widget=toolbar_widget, position=\"topright\"\n )\n\n if toolbar_control not in m.controls:\n m.add_control(toolbar_control)\n m.tool_control = toolbar_control\n else:\n return toolbar_widget\n\n\ndef open_data_widget(m):\n \"\"\"A widget for opening local vector/raster data.\n\n Args:\n m (object): geemap.Map\n \"\"\"\n\n padding = \"0px 0px 0px 5px\"\n style = {\"description_width\": \"initial\"}\n\n tool_output = widgets.Output()\n tool_output_ctrl = ipyleaflet.WidgetControl(widget=tool_output, position=\"topright\")\n\n if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls:\n m.remove_control(m.tool_output_ctrl)\n\n file_type = widgets.ToggleButtons(\n options=[\"Shapefile\", \"GeoJSON\", \"CSV\", \"Vector\", \"Raster\"],\n tooltips=[\n \"Open a shapefile\",\n \"Open a GeoJSON file\",\n \"Open a vector dataset\",\n \"Create points from CSV\",\n \"Open a vector dataset\",\n \"Open a raster dataset\",\n ],\n )\n file_type.style.button_width = \"88px\"\n\n filepath = widgets.Text(\n value=\"\",\n description=\"File path or http URL:\",\n tooltip=\"Enter a file path or http URL to vector data\",\n style=style,\n layout=widgets.Layout(width=\"454px\", padding=padding),\n )\n http_widget = widgets.HBox()\n\n file_chooser = FileChooser(\n os.getcwd(), sandbox_path=m.sandbox_path, layout=widgets.Layout(width=\"454px\")\n )\n file_chooser.filter_pattern = \"*.shp\"\n file_chooser.use_dir_icons = True\n\n style = {\"description_width\": \"initial\"}\n layer_name = widgets.Text(\n value=\"Shapefile\",\n description=\"Enter a layer name:\",\n tooltip=\"Enter a layer name for the selected file\",\n style=style,\n layout=widgets.Layout(width=\"454px\", padding=\"0px 0px 0px 5px\"),\n )\n\n longitude = widgets.Dropdown(\n options=[],\n value=None,\n description=\"Longitude:\",\n layout=widgets.Layout(width=\"149px\", padding=\"0px 0px 0px 5px\"),\n style={\"description_width\": \"initial\"},\n )\n\n latitude = widgets.Dropdown(\n options=[],\n value=None,\n description=\"Latitude:\",\n layout=widgets.Layout(width=\"149px\", padding=\"0px 0px 0px 5px\"),\n style={\"description_width\": \"initial\"},\n )\n\n label = widgets.Dropdown(\n options=[],\n value=None,\n description=\"Label:\",\n layout=widgets.Layout(width=\"149px\", padding=\"0px 0px 0px 5px\"),\n style={\"description_width\": \"initial\"},\n )\n\n csv_widget = widgets.HBox()\n\n convert_bool = widgets.Checkbox(\n description=\"Convert to ee.FeatureCollection?\",\n indent=False,\n layout=widgets.Layout(padding=\"0px 0px 0px 5px\"),\n )\n convert_hbox = widgets.HBox([convert_bool])\n\n ok_cancel = widgets.ToggleButtons(\n value=None,\n options=[\"Apply\", \"Reset\", \"Close\"],\n tooltips=[\"Apply\", \"Reset\", \"Close\"],\n button_style=\"primary\",\n )\n # ok_cancel.style.button_width = \"133px\"\n\n bands = widgets.Text(\n value=None,\n description=\"Band:\",\n tooltip=\"Enter a list of band indices\",\n style=style,\n layout=widgets.Layout(width=\"150px\", padding=padding),\n )\n\n vmin = widgets.Text(\n value=None,\n description=\"vmin:\",\n tooltip=\"Minimum value of the raster to visualize\",\n style=style,\n layout=widgets.Layout(width=\"148px\"),\n )\n\n vmax = widgets.Text(\n value=None,\n description=\"vmax:\",\n tooltip=\"Maximum value of the raster to visualize\",\n style=style,\n layout=widgets.Layout(width=\"148px\"),\n )\n\n nodata = widgets.Text(\n value=None,\n description=\"Nodata:\",\n tooltip=\"Nodata the raster to visualize\",\n style=style,\n layout=widgets.Layout(width=\"150px\", padding=padding),\n )\n\n palette = widgets.Dropdown(\n options=[],\n value=None,\n description=\"palette:\",\n layout=widgets.Layout(width=\"300px\"),\n style=style,\n )\n\n raster_options = widgets.VBox()\n\n main_widget = widgets.VBox(\n [\n file_type,\n file_chooser,\n http_widget,\n csv_widget,\n layer_name,\n convert_hbox,\n raster_options,\n ok_cancel,\n ]\n )\n\n tool_output.clear_output()\n with tool_output:\n display(main_widget)\n\n def bands_changed(change):\n if change[\"new\"] and \",\" in change[\"owner\"].value:\n palette.value = None\n palette.disabled = True\n else:\n palette.disabled = False\n\n bands.observe(bands_changed, \"value\")\n\n def chooser_callback(chooser):\n\n filepath.value = file_chooser.selected\n\n if file_type.value == \"CSV\":\n import pandas as pd\n\n df = pd.read_csv(filepath.value)\n col_names = df.columns.values.tolist()\n longitude.options = col_names\n latitude.options = col_names\n label.options = col_names\n\n if \"longitude\" in col_names:\n longitude.value = \"longitude\"\n if \"latitude\" in col_names:\n latitude.value = \"latitude\"\n if \"name\" in col_names:\n label.value = \"name\"\n\n file_chooser.register_callback(chooser_callback)\n\n def file_type_changed(change):\n ok_cancel.value = None\n file_chooser.default_path = os.getcwd()\n file_chooser.reset()\n layer_name.value = file_type.value\n csv_widget.children = []\n filepath.value = \"\"\n\n if change[\"new\"] == \"Shapefile\":\n file_chooser.filter_pattern = \"*.shp\"\n raster_options.children = []\n convert_hbox.children = [convert_bool]\n http_widget.children = []\n elif change[\"new\"] == \"GeoJSON\":\n file_chooser.filter_pattern = \"*.geojson\"\n raster_options.children = []\n convert_hbox.children = [convert_bool]\n http_widget.children = [filepath]\n elif change[\"new\"] == \"Vector\":\n file_chooser.filter_pattern = \"*.*\"\n raster_options.children = []\n convert_hbox.children = [convert_bool]\n http_widget.children = [filepath]\n elif change[\"new\"] == \"CSV\":\n file_chooser.filter_pattern = [\"*.csv\", \"*.CSV\"]\n csv_widget.children = [longitude, latitude, label]\n raster_options.children = []\n convert_hbox.children = [convert_bool]\n http_widget.children = [filepath]\n elif change[\"new\"] == \"Raster\":\n file_chooser.filter_pattern = [\"*.tif\", \"*.img\"]\n palette.options = get_palettable(types=[\"matplotlib\", \"cartocolors\"])\n palette.value = None\n raster_options.children = [\n widgets.HBox([bands, vmin, vmax]),\n widgets.HBox([nodata, palette]),\n ]\n convert_hbox.children = []\n http_widget.children = [filepath]\n\n def ok_cancel_clicked(change):\n if change[\"new\"] == \"Apply\":\n m.default_style = {\"cursor\": \"wait\"}\n file_path = filepath.value\n\n if file_path is not None:\n ext = os.path.splitext(file_path)[1]\n with tool_output:\n if ext.lower() == \".shp\":\n if convert_bool.value:\n ee_object = shp_to_ee(file_path)\n m.addLayer(ee_object, {}, layer_name.value)\n else:\n m.add_shapefile(\n file_path, style={}, layer_name=layer_name.value\n )\n elif ext.lower() == \".geojson\":\n if convert_bool.value:\n ee_object = geojson_to_ee(file_path)\n m.addLayer(ee_object, {}, layer_name.value)\n else:\n m.add_geojson(\n file_path, style={}, layer_name=layer_name.value\n )\n\n elif ext.lower() == \".csv\":\n if convert_bool.value:\n ee_object = csv_to_ee(\n file_path, latitude.value, longitude.value\n )\n m.addLayer(ee_object, {}, layer_name.value)\n else:\n m.add_xy_data(\n file_path,\n x=longitude.value,\n y=latitude.value,\n label=label.value,\n layer_name=layer_name.value,\n )\n\n elif ext.lower() in [\".tif\", \"img\"] and file_type.value == \"Raster\":\n band = None\n vis_min = None\n vis_max = None\n vis_nodata = None\n\n try:\n if len(bands.value) > 0:\n band = int(bands.value)\n if len(vmin.value) > 0:\n vis_min = float(vmin.value)\n if len(vmax.value) > 0:\n vis_max = float(vmax.value)\n if len(nodata.value) > 0:\n vis_nodata = float(nodata.value)\n except:\n pass\n\n m.add_local_tile(\n file_path,\n layer_name=layer_name.value,\n band=band,\n palette=palette.value,\n vmin=vis_min,\n vmax=vis_max,\n nodata=vis_nodata,\n )\n else:\n m.add_vector(file_path, style={}, layer_name=layer_name.value)\n else:\n print(\"Please select a file to open.\")\n\n m.toolbar_reset()\n m.default_style = {\"cursor\": \"default\"}\n\n elif change[\"new\"] == \"Reset\":\n file_chooser.reset()\n tool_output.clear_output()\n with tool_output:\n display(main_widget)\n m.toolbar_reset()\n elif change[\"new\"] == \"Close\":\n if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls:\n m.remove_control(m.tool_output_ctrl)\n m.tool_output_ctrl = None\n m.toolbar_reset()\n\n ok_cancel.value = None\n\n file_type.observe(file_type_changed, names=\"value\")\n ok_cancel.observe(ok_cancel_clicked, names=\"value\")\n # file_chooser.register_callback(chooser_callback)\n\n m.add_control(tool_output_ctrl)\n m.tool_output_ctrl = tool_output_ctrl\n\n\ndef change_basemap(m):\n \"\"\"Widget for change basemaps.\n\n Args:\n m (object): geemap.Map()\n \"\"\"\n from .basemaps import _ee_basemaps\n\n dropdown = widgets.Dropdown(\n options=list(_ee_basemaps.keys()),\n value=\"ROADMAP\",\n layout=widgets.Layout(width=\"200px\")\n # description=\"Basemaps\",\n )\n\n close_btn = widgets.Button(\n icon=\"times\",\n tooltip=\"Close the basemap widget\",\n button_style=\"primary\",\n layout=widgets.Layout(width=\"32px\"),\n )\n\n basemap_widget = widgets.HBox([dropdown, close_btn])\n\n def on_click(change):\n basemap_name = change[\"new\"]\n\n if len(m.layers) == 1:\n old_basemap = m.layers[0]\n else:\n old_basemap = m.layers[1]\n m.substitute_layer(old_basemap, _ee_basemaps[basemap_name])\n\n dropdown.observe(on_click, \"value\")\n\n def close_click(change):\n m.toolbar_reset()\n if m.basemap_ctrl is not None and m.basemap_ctrl in m.controls:\n m.remove_control(m.basemap_ctrl)\n basemap_widget.close()\n\n close_btn.on_click(close_click)\n\n basemap_control = ipyleaflet.WidgetControl(\n widget=basemap_widget, position=\"topright\"\n )\n m.add_control(basemap_control)\n m.basemap_ctrl = basemap_control\n\n\ndef convert_js2py(m):\n \"\"\"A widget for converting Earth Engine JavaScript to Python.\n\n Args:\n m (object): geemap.Map\n \"\"\"\n\n full_widget = widgets.VBox(layout=widgets.Layout(width=\"465px\", height=\"350px\"))\n\n text_widget = widgets.Textarea(\n placeholder=\"Paste your Earth Engine JavaScript into this textbox and click the Convert button below to convert the Javascript to Python\",\n layout=widgets.Layout(width=\"455px\", height=\"310px\"),\n )\n\n buttons = widgets.ToggleButtons(\n value=None,\n options=[\"Convert\", \"Clear\", \"Close\"],\n tooltips=[\"Convert\", \"Clear\", \"Close\"],\n button_style=\"primary\",\n )\n buttons.style.button_width = \"142px\"\n\n def button_clicked(change):\n if change[\"new\"] == \"Convert\":\n from .conversion import create_new_cell, js_snippet_to_py\n\n if len(text_widget.value) > 0:\n out_lines = js_snippet_to_py(\n text_widget.value,\n add_new_cell=False,\n import_ee=False,\n import_geemap=False,\n show_map=False,\n )\n if len(out_lines) > 0 and len(out_lines[0].strip()) == 0:\n out_lines = out_lines[1:]\n text_widget.value = \"\".join(out_lines)\n create_code_cell(text_widget.value)\n\n elif change[\"new\"] == \"Clear\":\n text_widget.value = \"\"\n elif change[\"new\"] == \"Close\":\n m.toolbar_reset()\n if m.convert_ctrl is not None and m.convert_ctrl in m.controls:\n m.remove_control(m.convert_ctrl)\n full_widget.close()\n buttons.value = None\n\n buttons.observe(button_clicked, \"value\")\n\n full_widget.children = [text_widget, buttons]\n widget_control = ipyleaflet.WidgetControl(widget=full_widget, position=\"topright\")\n m.add_control(widget_control)\n m.convert_ctrl = widget_control\n\n\ndef collect_samples(m):\n\n full_widget = widgets.VBox()\n layout = widgets.Layout(width=\"100px\")\n prop_label = widgets.Label(\n value=\"Property\",\n layout=widgets.Layout(display=\"flex\", justify_content=\"center\", width=\"100px\"),\n )\n value_label = widgets.Label(\n value=\"Value\",\n layout=widgets.Layout(display=\"flex\", justify_content=\"center\", width=\"100px\"),\n )\n color_label = widgets.Label(\n value=\"Color\",\n layout=widgets.Layout(display=\"flex\", justify_content=\"center\", width=\"100px\"),\n )\n\n prop_text1 = widgets.Text(layout=layout, placeholder=\"Required\")\n value_text1 = widgets.Text(layout=layout, placeholder=\"Integer\")\n prop_text2 = widgets.Text(layout=layout, placeholder=\"Optional\")\n value_text2 = widgets.Text(layout=layout, placeholder=\"String\")\n\n color = widgets.ColorPicker(\n concise=False,\n value=\"#3388ff\",\n layout=layout,\n style={\"description_width\": \"initial\"},\n )\n\n buttons = widgets.ToggleButtons(\n value=None,\n options=[\"Apply\", \"Clear\", \"Close\"],\n tooltips=[\"Apply\", \"Clear\", \"Close\"],\n button_style=\"primary\",\n )\n buttons.style.button_width = \"99px\"\n\n def button_clicked(change):\n if change[\"new\"] == \"Apply\":\n\n if len(color.value) != 7:\n color.value = \"#3388ff\"\n draw_control = ipyleaflet.DrawControl(\n marker={\"shapeOptions\": {\"color\": color.value}, \"repeatMode\": True},\n rectangle={\"shapeOptions\": {\"color\": color.value}, \"repeatMode\": True},\n polygon={\"shapeOptions\": {\"color\": color.value}, \"repeatMode\": True},\n circlemarker={},\n polyline={},\n edit=False,\n remove=False,\n )\n\n controls = []\n old_draw_control = None\n for control in m.controls:\n if isinstance(control, ipyleaflet.DrawControl):\n controls.append(draw_control)\n old_draw_control = control\n\n else:\n controls.append(control)\n\n m.controls = tuple(controls)\n old_draw_control.close()\n m.draw_control = draw_control\n\n train_props = {}\n\n if prop_text1.value != \"\" and value_text1.value != \"\":\n try:\n _ = int(value_text1.value)\n except Exception as _:\n value_text1.placeholder = \"Integer only\"\n value_text1.value = \"\"\n return\n train_props[prop_text1.value] = int(value_text1.value)\n if prop_text2.value != \"\" and value_text2.value != \"\":\n train_props[prop_text2.value] = value_text2.value\n if color.value != \"\":\n train_props[\"color\"] = color.value\n\n # Handles draw events\n def handle_draw(target, action, geo_json):\n from .geemap import ee_tile_layer\n\n try:\n geom = geojson_to_ee(geo_json, False)\n m.user_roi = geom\n\n if len(train_props) > 0:\n feature = ee.Feature(geom, train_props)\n else:\n feature = ee.Feature(geom)\n m.draw_last_json = geo_json\n m.draw_last_feature = feature\n if action == \"deleted\" and len(m.draw_features) > 0:\n m.draw_features.remove(feature)\n m.draw_count -= 1\n else:\n m.draw_features.append(feature)\n m.draw_count += 1\n collection = ee.FeatureCollection(m.draw_features)\n m.user_rois = collection\n ee_draw_layer = ee_tile_layer(\n collection, {\"color\": \"blue\"}, \"Drawn Features\", False, 0.5\n )\n draw_layer_index = m.find_layer_index(\"Drawn Features\")\n\n if draw_layer_index == -1:\n m.add_layer(ee_draw_layer)\n m.draw_layer = ee_draw_layer\n else:\n m.substitute_layer(m.draw_layer, ee_draw_layer)\n m.draw_layer = ee_draw_layer\n\n except Exception as e:\n m.draw_count = 0\n m.draw_features = []\n m.draw_last_feature = None\n m.draw_layer = None\n m.user_roi = None\n m.roi_start = False\n m.roi_end = False\n print(\"There was an error creating Earth Engine Feature.\")\n raise Exception(e)\n\n draw_control.on_draw(handle_draw)\n\n elif change[\"new\"] == \"Clear\":\n prop_text1.value = \"\"\n value_text1.value = \"\"\n prop_text2.value = \"\"\n value_text2.value = \"\"\n color.value = \"#3388ff\"\n elif change[\"new\"] == \"Close\":\n m.toolbar_reset()\n if m.training_ctrl is not None and m.training_ctrl in m.controls:\n m.remove_control(m.training_ctrl)\n full_widget.close()\n buttons.value = None\n\n buttons.observe(button_clicked, \"value\")\n\n full_widget.children = [\n widgets.HBox([prop_label, value_label, color_label]),\n widgets.HBox([prop_text1, value_text1, color]),\n widgets.HBox([prop_text2, value_text2, color]),\n buttons,\n ]\n\n widget_control = ipyleaflet.WidgetControl(widget=full_widget, position=\"topright\")\n m.add_control(widget_control)\n m.training_ctrl = widget_control\n\n\ndef get_tools_dict():\n\n import pandas as pd\n import pkg_resources\n\n pkg_dir = os.path.dirname(pkg_resources.resource_filename(\"geemap\", \"geemap.py\"))\n toolbox_csv = os.path.join(pkg_dir, \"data/template/toolbox.csv\")\n\n df = pd.read_csv(toolbox_csv).set_index(\"index\")\n tools_dict = df.to_dict(\"index\")\n\n return tools_dict\n\n\ndef tool_gui(tool_dict, max_width=\"420px\", max_height=\"600px\"):\n \"\"\"Create a GUI for a tool based on the tool dictionary.\n\n Args:\n tool_dict (dict): The dictionary containing the tool info.\n max_width (str, optional): The max width of the tool dialog.\n max_height (str, optional): The max height of the tool dialog.\n\n Returns:\n object: An ipywidget object representing the tool interface.\n \"\"\"\n tool_widget = widgets.VBox(\n layout=widgets.Layout(max_width=max_width, max_height=max_height)\n )\n children = []\n args = {}\n required_inputs = []\n style = {\"description_width\": \"initial\"}\n max_width = str(int(max_width.replace(\"px\", \"\")) - 10) + \"px\"\n\n header_width = str(int(max_width.replace(\"px\", \"\")) - 104) + \"px\"\n header = widgets.Label(\n value=f'Current Tool: {tool_dict[\"label\"]}',\n style=style,\n layout=widgets.Layout(width=header_width),\n )\n code_btn = widgets.Button(\n description=\"View Code\", layout=widgets.Layout(width=\"100px\")\n )\n\n children.append(widgets.HBox([header, code_btn]))\n\n desc = widgets.Textarea(\n value=f'Description: {tool_dict[\"description\"]}',\n layout=widgets.Layout(width=\"410px\", max_width=max_width),\n disabled=True,\n )\n children.append(desc)\n\n run_btn = widgets.Button(description=\"Run\", layout=widgets.Layout(width=\"100px\"))\n cancel_btn = widgets.Button(\n description=\"Cancel\", layout=widgets.Layout(width=\"100px\")\n )\n help_btn = widgets.Button(description=\"Help\", layout=widgets.Layout(width=\"100px\"))\n import_btn = widgets.Button(\n description=\"Import\",\n tooltip=\"Import the script to a new cell\",\n layout=widgets.Layout(width=\"98px\"),\n )\n tool_output = widgets.Output(layout=widgets.Layout(max_height=\"200px\"))\n children.append(widgets.HBox([run_btn, cancel_btn, help_btn, import_btn]))\n children.append(tool_output)\n tool_widget.children = children\n\n def run_button_clicked(b):\n tool_output.clear_output()\n\n required_params = required_inputs.copy()\n args2 = []\n for arg in args:\n\n line = \"\"\n if isinstance(args[arg], FileChooser):\n if arg in required_params and args[arg].selected is None:\n with tool_output:\n print(f\"Please provide inputs for required parameters.\")\n break\n elif arg in required_params:\n required_params.remove(arg)\n if arg == \"i\":\n line = f\"-{arg}={args[arg].selected}\"\n else:\n line = f\"--{arg}={args[arg].selected}\"\n elif isinstance(args[arg], widgets.Text):\n if arg in required_params and len(args[arg].value) == 0:\n with tool_output:\n print(f\"Please provide inputs for required parameters.\")\n break\n elif arg in required_params:\n required_params.remove(arg)\n if args[arg].value is not None and len(args[arg].value) > 0:\n line = f\"--{arg}={args[arg].value}\"\n elif isinstance(args[arg], widgets.Checkbox):\n line = f\"--{arg}={args[arg].value}\"\n args2.append(line)\n\n if len(required_params) == 0:\n with tool_output:\n # wbt.run_tool(tool_dict[\"name\"], args2)\n pass\n\n def help_button_clicked(b):\n import webbrowser\n\n tool_output.clear_output()\n with tool_output:\n html = widgets.HTML(\n value=f'<a href={tool_dict[\"link\"]} target=\"_blank\">{tool_dict[\"link\"]}</a>'\n )\n display(html)\n webbrowser.open_new_tab(tool_dict[\"link\"])\n\n def code_button_clicked(b):\n import webbrowser\n\n with tool_output:\n html = widgets.HTML(\n value=f'<a href={tool_dict[\"link\"]} target=\"_blank\">{tool_dict[\"link\"]}</a>'\n )\n display(html)\n webbrowser.open_new_tab(tool_dict[\"link\"])\n\n def cancel_btn_clicked(b):\n tool_output.clear_output()\n\n def import_button_clicked(b):\n tool_output.clear_output()\n\n content = []\n\n create_code_cell(\"\\n\".join(content))\n\n import_btn.on_click(import_button_clicked)\n run_btn.on_click(run_button_clicked)\n help_btn.on_click(help_button_clicked)\n code_btn.on_click(code_button_clicked)\n cancel_btn.on_click(cancel_btn_clicked)\n\n return tool_widget\n\n\ndef build_toolbox(tools_dict, max_width=\"1080px\", max_height=\"600px\"):\n \"\"\"Build the GEE toolbox.\n\n Args:\n tools_dict (dict): A dictionary containing information for all tools.\n max_width (str, optional): The maximum width of the widget.\n max_height (str, optional): The maximum height of the widget.\n\n Returns:\n object: An ipywidget representing the toolbox.\n \"\"\"\n left_widget = widgets.VBox(layout=widgets.Layout(min_width=\"175px\"))\n center_widget = widgets.VBox(\n layout=widgets.Layout(min_width=\"200px\", max_width=\"200px\")\n )\n right_widget = widgets.Output(\n layout=widgets.Layout(width=\"630px\", max_height=max_height)\n )\n full_widget = widgets.HBox(\n [left_widget, center_widget, right_widget],\n layout=widgets.Layout(max_width=max_width, max_height=max_height),\n )\n\n search_widget = widgets.Text(\n placeholder=\"Search tools ...\", layout=widgets.Layout(width=\"170px\")\n )\n label_widget = widgets.Label(layout=widgets.Layout(width=\"170px\"))\n label_widget.value = f\"{len(tools_dict)} Available Tools\"\n close_btn = widgets.Button(\n description=\"Close Toolbox\", icon=\"close\", layout=widgets.Layout(width=\"170px\")\n )\n\n categories = {}\n categories[\"All Tools\"] = []\n for key in tools_dict.keys():\n category = tools_dict[key][\"category\"]\n if category not in categories.keys():\n categories[category] = []\n categories[category].append(tools_dict[key][\"name\"])\n categories[\"All Tools\"].append(tools_dict[key][\"name\"])\n\n options = list(categories.keys())\n all_tools = categories[\"All Tools\"]\n all_tools.sort()\n category_widget = widgets.Select(\n options=options, layout=widgets.Layout(width=\"170px\", height=\"165px\")\n )\n tools_widget = widgets.Select(\n options=[], layout=widgets.Layout(width=\"195px\", height=\"400px\")\n )\n\n def category_selected(change):\n if change[\"new\"]:\n selected = change[\"owner\"].value\n options = categories[selected]\n options.sort()\n tools_widget.options = options\n label_widget.value = f\"{len(options)} Available Tools\"\n\n category_widget.observe(category_selected, \"value\")\n\n def tool_selected(change):\n if change[\"new\"]:\n selected = change[\"owner\"].value\n tool_dict = tools_dict[selected]\n with right_widget:\n right_widget.clear_output()\n display(tool_gui(tool_dict, max_height=max_height))\n\n tools_widget.observe(tool_selected, \"value\")\n\n def search_changed(change):\n if change[\"new\"]:\n keyword = change[\"owner\"].value\n if len(keyword) > 0:\n selected_tools = []\n for tool in all_tools:\n if keyword.lower() in tool.lower():\n selected_tools.append(tool)\n if len(selected_tools) > 0:\n tools_widget.options = selected_tools\n label_widget.value = f\"{len(selected_tools)} Available Tools\"\n else:\n tools_widget.options = all_tools\n label_widget.value = f\"{len(tools_dict)} Available Tools\"\n\n search_widget.observe(search_changed, \"value\")\n\n def close_btn_clicked(b):\n full_widget.close()\n\n close_btn.on_click(close_btn_clicked)\n\n category_widget.value = list(categories.keys())[0]\n tools_widget.options = all_tools\n left_widget.children = [category_widget, search_widget, label_widget, close_btn]\n center_widget.children = [tools_widget]\n\n return full_widget\n\n\ndef timelapse_gui(m=None):\n \"\"\"Creates timelapse animations.\n\n Args:\n m (geemap.Map, optional): A geemap Map instance. Defaults to None.\n\n Returns:\n ipywidgets: The interactive GUI.\n \"\"\"\n if m is not None:\n m.add_basemap(\"HYBRID\")\n\n widget_width = \"350px\"\n padding = \"0px 0px 0px 5px\" # upper, right, bottom, left\n style = {\"description_width\": \"initial\"}\n\n toolbar_button = widgets.ToggleButton(\n value=False,\n tooltip=\"Toolbar\",\n icon=\"gear\",\n layout=widgets.Layout(width=\"28px\", height=\"28px\", padding=\"0px 0px 0px 4px\"),\n )\n\n close_button = widgets.ToggleButton(\n value=False,\n tooltip=\"Close the tool\",\n icon=\"times\",\n button_style=\"primary\",\n layout=widgets.Layout(height=\"28px\", width=\"28px\", padding=\"0px 0px 0px 4px\"),\n )\n\n collection = widgets.Dropdown(\n options=[\n \"Landsat TM-ETM-OLI Surface Reflectance\",\n \"Sentinel-2AB Surface Reflectance\",\n \"MODIS\",\n ],\n value=\"Landsat TM-ETM-OLI Surface Reflectance\",\n description=\"Collection:\",\n layout=widgets.Layout(width=widget_width, padding=padding),\n style=style,\n )\n\n title = widgets.Text(\n value=\"Timelapse\",\n description=\"Title:\",\n style=style,\n layout=widgets.Layout(width=\"181px\", padding=padding),\n )\n\n bands = widgets.Dropdown(\n description=\"RGB:\",\n options=[\n \"Red/Green/Blue\",\n \"NIR/Red/Green\",\n \"SWIR2/SWIR1/NIR\",\n \"NIR/SWIR1/Red\",\n \"SWIR2/NIR/Red\",\n \"SWIR2/SWIR1/Red\",\n \"SWIR1/NIR/Blue\",\n \"NIR/SWIR1/Blue\",\n \"SWIR2/NIR/Green\",\n \"SWIR1/NIR/Red\",\n ],\n value=\"NIR/Red/Green\",\n style=style,\n layout=widgets.Layout(width=\"165px\", padding=padding),\n )\n\n speed = widgets.IntSlider(\n description=\"Frames/sec:\",\n tooltip=\"Frames per second\",\n value=10,\n min=1,\n max=30,\n readout=False,\n style=style,\n layout=widgets.Layout(width=\"142px\", padding=padding),\n )\n\n speed_label = widgets.Label(\n layout=widgets.Layout(width=\"20px\", padding=padding),\n )\n widgets.jslink((speed, \"value\"), (speed_label, \"value\"))\n\n cloud = widgets.Checkbox(\n value=True,\n description=\"Apply fmask (remove clouds, shadows, snow)\",\n tooltip=\"Apply fmask (remove clouds, shadows, snow)\",\n style=style,\n )\n\n start_year = widgets.IntSlider(\n description=\"Start Year:\",\n value=1984,\n min=1984,\n max=2021,\n readout=False,\n style=style,\n layout=widgets.Layout(width=\"138px\", padding=padding),\n )\n\n start_year_label = widgets.Label()\n widgets.jslink((start_year, \"value\"), (start_year_label, \"value\"))\n\n end_year = widgets.IntSlider(\n description=\"End Year:\",\n value=2020,\n min=1984,\n max=2021,\n readout=False,\n style=style,\n layout=widgets.Layout(width=\"138px\", padding=padding),\n )\n end_year_label = widgets.Label()\n widgets.jslink((end_year, \"value\"), (end_year_label, \"value\"))\n\n start_month = widgets.IntSlider(\n description=\"Start Month:\",\n value=5,\n min=1,\n max=12,\n readout=False,\n style=style,\n layout=widgets.Layout(width=\"145px\", padding=padding),\n )\n\n start_month_label = widgets.Label(\n layout=widgets.Layout(width=\"20px\", padding=padding),\n )\n widgets.jslink((start_month, \"value\"), (start_month_label, \"value\"))\n\n end_month = widgets.IntSlider(\n description=\"End Month:\",\n value=10,\n min=1,\n max=12,\n readout=False,\n style=style,\n layout=widgets.Layout(width=\"155px\", padding=padding),\n )\n\n end_month_label = widgets.Label()\n widgets.jslink((end_month, \"value\"), (end_month_label, \"value\"))\n\n font_size = widgets.IntSlider(\n description=\"Font size:\",\n value=30,\n min=10,\n max=50,\n readout=False,\n style=style,\n layout=widgets.Layout(width=\"152px\", padding=padding),\n )\n\n font_size_label = widgets.Label()\n widgets.jslink((font_size, \"value\"), (font_size_label, \"value\"))\n\n font_color = widgets.ColorPicker(\n concise=False,\n description=\"Font color:\",\n value=\"white\",\n style=style,\n layout=widgets.Layout(width=\"170px\", padding=padding),\n )\n\n progress_bar_color = widgets.ColorPicker(\n concise=False,\n description=\"Progress bar:\",\n value=\"blue\",\n style=style,\n layout=widgets.Layout(width=\"180px\", padding=padding),\n )\n\n # Normalized Satellite Indices: https://www.usna.edu/Users/oceano/pguth/md_help/html/norm_sat.htm\n\n nd_options = [\n \"Vegetation Index (NDVI)\",\n \"Water Index (NDWI)\",\n \"Modified Water Index (MNDWI)\",\n \"Snow Index (NDSI)\",\n \"Soil Index (NDSI)\",\n \"Burn Ratio (NBR)\",\n \"Customized\",\n ]\n nd_indices = widgets.Dropdown(\n options=nd_options,\n value=None,\n description=\"Normalized Difference Index:\",\n style=style,\n layout=widgets.Layout(width=\"347px\", padding=padding),\n )\n\n first_band = widgets.Dropdown(\n description=\"1st band:\",\n options=[\"Blue\", \"Green\", \"Red\", \"NIR\", \"SWIR1\", \"SWIR2\"],\n value=None,\n style=style,\n layout=widgets.Layout(width=\"171px\", padding=padding),\n )\n\n second_band = widgets.Dropdown(\n description=\"2nd band:\",\n options=[\"Blue\", \"Green\", \"Red\", \"NIR\", \"SWIR1\", \"SWIR2\"],\n value=None,\n style=style,\n layout=widgets.Layout(width=\"172px\", padding=padding),\n )\n\n nd_threshold = widgets.FloatSlider(\n value=0,\n min=-1,\n max=1,\n step=0.01,\n description=\"Threshold:\",\n orientation=\"horizontal\",\n readout=False,\n style=style,\n layout=widgets.Layout(width=\"159px\", padding=padding),\n )\n\n nd_threshold_label = widgets.Label(\n layout=widgets.Layout(width=\"35px\", padding=padding),\n )\n widgets.jslink((nd_threshold, \"value\"), (nd_threshold_label, \"value\"))\n\n nd_color = widgets.ColorPicker(\n concise=False,\n description=\"Color:\",\n value=\"blue\",\n style=style,\n layout=widgets.Layout(width=\"145px\", padding=padding),\n )\n\n def nd_index_change(change):\n if nd_indices.value == \"Vegetation Index (NDVI)\":\n first_band.value = \"NIR\"\n second_band.value = \"Red\"\n elif nd_indices.value == \"Water Index (NDWI)\":\n first_band.value = \"NIR\"\n second_band.value = \"SWIR1\"\n elif nd_indices.value == \"Modified Water Index (MNDWI)\":\n first_band.value = \"Green\"\n second_band.value = \"SWIR1\"\n elif nd_indices.value == \"Snow Index (NDSI)\":\n first_band.value = \"Green\"\n second_band.value = \"SWIR1\"\n elif nd_indices.value == \"Soil Index (NDSI)\":\n first_band.value = \"SWIR1\"\n second_band.value = \"NIR\"\n elif nd_indices.value == \"Burn Ratio (NBR)\":\n first_band.value = \"NIR\"\n second_band.value = \"SWIR2\"\n elif nd_indices.value == \"Customized\":\n first_band.value = None\n second_band.value = None\n\n nd_indices.observe(nd_index_change, names=\"value\")\n\n button_width = \"113px\"\n create_gif = widgets.Button(\n description=\"Create timelapse\",\n button_style=\"primary\",\n tooltip=\"Click to create timelapse\",\n style=style,\n layout=widgets.Layout(padding=\"0px\", width=button_width),\n )\n\n def submit_clicked(b):\n\n if start_year.value > end_year.value:\n print(\"The end year must be great than the start year.\")\n return\n if start_month.value > end_month.value:\n print(\"The end month must be great than the start month.\")\n return\n if start_year.value == end_year.value:\n add_progress_bar = False\n else:\n add_progress_bar = True\n\n start_date = str(start_month.value).zfill(2) + \"-01\"\n end_date = str(end_month.value).zfill(2) + \"-30\"\n\n with output:\n print(\"Computing... Please wait...\")\n\n nd_bands = None\n if (first_band.value is not None) and (second_band.value is not None):\n nd_bands = [first_band.value, second_band.value]\n\n temp_output = widgets.Output()\n\n if m is not None:\n\n out_dir = os.path.expanduser(\"~/Downloads\")\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n out_gif = os.path.join(out_dir, \"timelapse_\" + random_string(3) + \".gif\")\n\n with temp_output:\n temp_output.clear_output()\n m.add_landsat_ts_gif(\n roi=m.user_roi,\n label=title.value,\n start_year=start_year.value,\n end_year=end_year.value,\n start_date=start_date,\n end_date=end_date,\n bands=bands.value.split(\"/\"),\n font_color=font_color.value,\n frames_per_second=speed.value,\n font_size=font_size.value,\n add_progress_bar=add_progress_bar,\n progress_bar_color=progress_bar_color.value,\n out_gif=out_gif,\n apply_fmask=cloud.value,\n nd_bands=nd_bands,\n nd_threshold=nd_threshold.value,\n nd_palette=[\"black\", nd_color.value],\n )\n if m.user_roi is not None:\n m.centerObject(m.user_roi)\n\n with output:\n print(\"The timelapse has been added to the map.\")\n link = create_download_link(\n out_gif,\n title=\"Click here to download: \",\n )\n display(link)\n if nd_bands is not None:\n link_nd = create_download_link(\n out_gif.replace(\".gif\", \"_nd.gif\"),\n title=\"Click here to download: \",\n )\n display(link_nd)\n\n create_gif.on_click(submit_clicked)\n\n reset_btn = widgets.Button(\n description=\"Reset\",\n button_style=\"primary\",\n style=style,\n layout=widgets.Layout(padding=\"0px\", width=button_width),\n )\n\n def reset_btn_click(change):\n output.clear_output()\n\n reset_btn.on_click(reset_btn_click)\n\n close_btn = widgets.Button(\n description=\"Close\",\n button_style=\"primary\",\n style=style,\n layout=widgets.Layout(padding=\"0px\", width=button_width),\n )\n\n def close_click(change):\n if m is not None:\n m.toolbar_reset()\n if m.tool_control is not None and m.tool_control in m.controls:\n m.remove_control(m.tool_control)\n m.tool_control = None\n toolbar_widget.close()\n\n close_btn.on_click(close_click)\n\n output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))\n\n toolbar_widget = widgets.VBox()\n toolbar_widget.children = [toolbar_button]\n toolbar_header = widgets.HBox()\n toolbar_header.children = [close_button, toolbar_button]\n toolbar_footer = widgets.VBox()\n toolbar_footer.children = [\n collection,\n widgets.HBox([title, bands]),\n widgets.HBox([speed, speed_label, progress_bar_color]),\n widgets.HBox([start_year, start_year_label, end_year, end_year_label]),\n widgets.HBox([start_month, start_month_label, end_month, end_month_label]),\n widgets.HBox([font_size, font_size_label, font_color]),\n cloud,\n nd_indices,\n widgets.HBox([first_band, second_band]),\n widgets.HBox([nd_threshold, nd_threshold_label, nd_color]),\n widgets.HBox([create_gif, reset_btn, close_btn]),\n output,\n ]\n\n toolbar_event = ipyevents.Event(\n source=toolbar_widget, watched_events=[\"mouseenter\", \"mouseleave\"]\n )\n\n def handle_toolbar_event(event):\n\n if event[\"type\"] == \"mouseenter\":\n toolbar_widget.children = [toolbar_header, toolbar_footer]\n elif event[\"type\"] == \"mouseleave\":\n if not toolbar_button.value:\n toolbar_widget.children = [toolbar_button]\n toolbar_button.value = False\n close_button.value = False\n\n toolbar_event.on_dom_event(handle_toolbar_event)\n\n def toolbar_btn_click(change):\n if change[\"new\"]:\n close_button.value = False\n toolbar_widget.children = [toolbar_header, toolbar_footer]\n else:\n if not close_button.value:\n toolbar_widget.children = [toolbar_button]\n\n toolbar_button.observe(toolbar_btn_click, \"value\")\n\n def close_btn_click(change):\n if change[\"new\"]:\n toolbar_button.value = False\n if m is not None:\n if m.tool_control is not None and m.tool_control in m.controls:\n m.remove_control(m.tool_control)\n m.tool_control = None\n m.toolbar_reset()\n toolbar_widget.close()\n\n close_button.observe(close_btn_click, \"value\")\n\n toolbar_button.value = True\n if m is not None:\n toolbar_control = ipyleaflet.WidgetControl(\n widget=toolbar_widget, position=\"topright\"\n )\n\n if toolbar_control not in m.controls:\n m.add_control(toolbar_control)\n m.tool_control = toolbar_control\n else:\n return toolbar_widget\n\n\ndef time_slider(m=None):\n \"\"\"Creates a time slider for visualizing any ee.ImageCollection.\n\n Args:\n m (geemap.Map, optional): A geemap Map instance. Defaults to None.\n\n Returns:\n ipywidgets: The interactive GUI.\n \"\"\"\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n\n widget_width = \"350px\"\n padding = \"0px 0px 0px 5px\" # upper, right, bottom, left\n style = {\"description_width\": \"initial\"}\n\n toolbar_button = widgets.ToggleButton(\n value=False,\n tooltip=\"Toolbar\",\n icon=\"fast-forward\",\n layout=widgets.Layout(width=\"28px\", height=\"28px\", padding=\"0px 0px 0px 4px\"),\n )\n\n close_button = widgets.ToggleButton(\n value=False,\n tooltip=\"Close the tool\",\n icon=\"times\",\n button_style=\"primary\",\n layout=widgets.Layout(height=\"28px\", width=\"28px\", padding=\"0px 0px 0px 4px\"),\n )\n\n col_options_dict = {\n \"Landsat TM-ETM-OLI Surface Reflectance\": {\n \"min\": 0,\n \"max\": 4000,\n \"bands\": [\"NIR\", \"Red\", \"Green\"],\n \"start_year\": 1984,\n \"end_year\": 2021,\n \"bandnames\": [\"Blue\", \"Green\", \"Red\", \"NIR\", \"SWIR1\", \"SWIR2\", \"pixel_qa\"],\n },\n \"MOD13A2.006 Terra Vegetation Indices\": {\n \"min\": 0,\n \"max\": 9000,\n \"start_year\": 2000,\n \"end_year\": 2021,\n \"palette\": [\n \"FFFFFF\",\n \"CE7E45\",\n \"DF923D\",\n \"F1B555\",\n \"FCD163\",\n \"99B718\",\n \"74A901\",\n \"66A000\",\n \"529400\",\n \"3E8601\",\n \"207401\",\n \"056201\",\n \"004C00\",\n \"023B01\",\n \"012E01\",\n \"011D01\",\n \"011301\",\n ],\n },\n \"Sentinel-2 Surface Relectance\": {\n \"min\": 0,\n \"max\": 4000,\n \"bands\": [\"NIR\", \"Red\", \"Green\"],\n \"start_year\": 2015,\n \"end_year\": 2021,\n \"bandnames\": [\n \"Blue\",\n \"Green\",\n \"Red\",\n \"Red Edge 1\",\n \"Red Edge 2\",\n \"Red Edge 3\",\n \"NIR\",\n \"Red Edge 4\",\n \"SWIR1\",\n \"SWIR2\",\n \"QA60\",\n ],\n },\n \"USDA NAIP Imagery\": {\n \"min\": 0,\n \"max\": 255,\n \"bands\": [\"R\", \"G\", \"B\"],\n \"start_year\": 2003,\n \"end_year\": 2021,\n \"bandnames\": [\"R\", \"G\", \"B\", \"N\"],\n },\n }\n\n col_options = list(col_options_dict.keys())\n\n if m is not None:\n col_options += m.ee_raster_layer_names\n\n collection = widgets.Dropdown(\n options=col_options,\n value=col_options[0],\n description=\"Time series:\",\n layout=widgets.Layout(width=widget_width, padding=padding),\n style=style,\n )\n\n region = widgets.Dropdown(\n options=[\"User-drawn ROI\"] + m.ee_vector_layer_names,\n value=\"User-drawn ROI\",\n description=\"Region:\",\n layout=widgets.Layout(width=widget_width, padding=padding),\n style=style,\n )\n\n dropdown_width = \"97px\"\n landsat_bands = [\"Blue\", \"Green\", \"Red\", \"NIR\", \"SWIR1\", \"SWIR2\", \"pixel_qa\"]\n band1_dropdown = widgets.Dropdown(\n options=landsat_bands,\n value=\"NIR\",\n layout=widgets.Layout(width=dropdown_width),\n )\n band2_dropdown = widgets.Dropdown(\n options=landsat_bands,\n value=\"Red\",\n layout=widgets.Layout(width=dropdown_width),\n )\n band3_dropdown = widgets.Dropdown(\n options=landsat_bands,\n value=\"Green\",\n layout=widgets.Layout(width=dropdown_width),\n )\n\n bands_label = widgets.Label(\"Bands:\", layout=widgets.Layout(padding=padding))\n bands_hbox = widgets.HBox(\n [bands_label, band1_dropdown, band2_dropdown, band3_dropdown]\n )\n\n vis = widgets.Text(\n value=\"\",\n description=\"Vis min value:\",\n placeholder=\"{'min': 0, 'max': 1, 'palette': ['red', 'blue']}\",\n style=style,\n layout=widgets.Layout(width=widget_width, padding=padding),\n )\n\n vis_min = widgets.Text(\n value=\"0\",\n description=\"Vis min value:\",\n style=style,\n layout=widgets.Layout(width=\"172px\", padding=padding),\n )\n\n vis_max = widgets.Text(\n value=\"4000\",\n description=\"Vis max value:\",\n style=style,\n layout=widgets.Layout(width=\"172px\", padding=padding),\n )\n\n opacity = widgets.FloatSlider(\n value=1,\n min=0,\n max=1,\n step=0.01,\n description=\"Opacity:\",\n continuous_update=True,\n readout=False,\n readout_format=\".2f\",\n layout=widgets.Layout(width=\"130px\", padding=padding),\n style={\"description_width\": \"50px\"},\n )\n\n opacity_label = widgets.Label(layout=widgets.Layout(width=\"40px\", padding=padding))\n widgets.jslink((opacity, \"value\"), (opacity_label, \"value\"))\n\n gamma = widgets.FloatSlider(\n value=1,\n min=0.1,\n max=10,\n step=0.01,\n description=\"Gamma:\",\n continuous_update=True,\n readout=False,\n readout_format=\".2f\",\n layout=widgets.Layout(width=\"123px\", padding=padding),\n style={\"description_width\": \"50px\"},\n )\n\n gamma_label = widgets.Label(layout=widgets.Layout(width=\"40px\", padding=padding))\n widgets.jslink((gamma, \"value\"), (gamma_label, \"value\"))\n\n color_picker = widgets.ColorPicker(\n concise=False,\n value=\"#000000\",\n layout=widgets.Layout(width=\"97px\"),\n style={\"description_width\": \"initial\"},\n )\n\n add_color = widgets.Button(\n icon=\"plus\",\n tooltip=\"Add a hex color string to the palette\",\n layout=widgets.Layout(width=\"32px\"),\n )\n\n del_color = widgets.Button(\n icon=\"minus\",\n tooltip=\"Remove a hex color string from the palette\",\n layout=widgets.Layout(width=\"32px\"),\n )\n\n reset_color = widgets.Button(\n icon=\"eraser\",\n tooltip=\"Remove all color strings from the palette\",\n layout=widgets.Layout(width=\"34px\"),\n )\n\n classes = widgets.Dropdown(\n options=[\"Any\"] + [str(i) for i in range(3, 13)],\n description=\"Classes:\",\n layout=widgets.Layout(width=\"150px\", padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n colormap = widgets.Dropdown(\n options=plt.colormaps(),\n value=None,\n description=\"Colormap:\",\n layout=widgets.Layout(width=\"195px\", padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n def classes_changed(change):\n if change[\"new\"]:\n selected = change[\"owner\"].value\n if colormap.value is not None:\n\n n_class = None\n if selected != \"Any\":\n n_class = int(classes.value)\n\n colors = plt.cm.get_cmap(colormap.value, n_class)\n cmap_colors = [\n mpl.colors.rgb2hex(colors(i))[1:] for i in range(colors.N)\n ]\n\n _, ax = plt.subplots(figsize=(6, 0.4))\n cmap = mpl.colors.LinearSegmentedColormap.from_list(\n \"custom\", to_hex_colors(cmap_colors), N=256\n )\n\n vmin = 0\n vmax = 1\n try:\n if vis_min.value != \"\":\n vmin = float(vis_min.value)\n if vis_max.value != \"\":\n vmax = float(vis_max.value)\n except Exception as _:\n pass\n\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n mpl.colorbar.ColorbarBase(\n ax, norm=norm, cmap=cmap, orientation=\"horizontal\"\n )\n\n palette.value = \", \".join([color for color in cmap_colors])\n\n if m.colorbar_widget is None:\n m.colorbar_widget = widgets.Output(\n layout=widgets.Layout(height=\"60px\")\n )\n\n if m.colorbar_ctrl is None:\n m.colorbar_ctrl = ipyleaflet.WidgetControl(\n widget=m.colorbar_widget, position=\"bottomright\"\n )\n m.add_control(m.colorbar_ctrl)\n\n colorbar_output = m.colorbar_widget\n with colorbar_output:\n colorbar_output.clear_output()\n plt.show()\n\n classes.observe(classes_changed, \"value\")\n\n palette = widgets.Text(\n value=\"\",\n placeholder=\"\",\n description=\"Palette:\",\n tooltip=\"Enter a list of hex color code (RRGGBB)\",\n layout=widgets.Layout(width=\"137px\", padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n def add_color_clicked(b):\n if color_picker.value is not None:\n if len(palette.value) == 0:\n palette.value = color_picker.value[1:]\n else:\n palette.value += \", \" + color_picker.value[1:]\n\n def del_color_clicked(b):\n if \",\" in palette.value:\n items = [item.strip() for item in palette.value.split(\",\")]\n palette.value = \", \".join(items[:-1])\n else:\n palette.value = \"\"\n\n def reset_color_clicked(b):\n palette.value = \"\"\n\n add_color.on_click(add_color_clicked)\n del_color.on_click(del_color_clicked)\n reset_color.on_click(reset_color_clicked)\n\n def colormap_changed(change):\n if change[\"new\"]:\n\n n_class = None\n if classes.value != \"Any\":\n n_class = int(classes.value)\n\n colors = plt.cm.get_cmap(colormap.value, n_class)\n cmap_colors = [mpl.colors.rgb2hex(colors(i))[1:] for i in range(colors.N)]\n\n _, ax = plt.subplots(figsize=(6, 0.4))\n cmap = mpl.colors.LinearSegmentedColormap.from_list(\n \"custom\", to_hex_colors(cmap_colors), N=256\n )\n\n vmin = 0\n vmax = 1\n try:\n if vis_min.value != \"\":\n vmin = float(vis_min.value)\n if vis_max.value != \"\":\n vmax = float(vis_max.value)\n except Exception as _:\n pass\n\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n mpl.colorbar.ColorbarBase(\n ax, norm=norm, cmap=cmap, orientation=\"horizontal\"\n )\n\n palette.value = \", \".join(cmap_colors)\n\n if m.colorbar_widget is None:\n m.colorbar_widget = widgets.Output(layout=widgets.Layout(height=\"60px\"))\n\n if m.colorbar_ctrl is None:\n m.colorbar_ctrl = ipyleaflet.WidgetControl(\n widget=m.colorbar_widget, position=\"bottomright\"\n )\n m.add_control(m.colorbar_ctrl)\n\n colorbar_output = m.colorbar_widget\n with colorbar_output:\n colorbar_output.clear_output()\n plt.show()\n\n colormap.observe(colormap_changed, \"value\")\n\n palette_vbox = widgets.VBox()\n\n labels = widgets.Text(\n value=\", \".join([str(i) for i in range(1984, 2021)]),\n description=\"Labels:\",\n style=style,\n layout=widgets.Layout(width=\"150px\", padding=padding),\n )\n\n speed = widgets.FloatSlider(\n description=\"Speed (sec):\",\n tooltip=\"Time interval in seconds\",\n value=1,\n min=0.1,\n max=10,\n readout=False,\n style=style,\n layout=widgets.Layout(width=\"160px\", padding=padding),\n )\n\n speed_label = widgets.Label(\n layout=widgets.Layout(width=\"25px\", padding=padding),\n )\n widgets.jslink((speed, \"value\"), (speed_label, \"value\"))\n\n prebuilt_options = widgets.VBox()\n\n cloud = widgets.Checkbox(\n value=True,\n description=\"Apply fmask (remove clouds, shadows, snow)\",\n tooltip=\"Apply fmask (remove clouds, shadows, snow)\",\n style=style,\n )\n\n start_year = widgets.IntSlider(\n description=\"Start Year:\",\n value=1984,\n min=1984,\n max=2021,\n readout=False,\n style=style,\n layout=widgets.Layout(width=\"138px\", padding=padding),\n )\n\n def year_change(change):\n if change[\"new\"]:\n\n if collection.value != \"MOD13A2.006 Terra Vegetation Indices\":\n\n labels.value = \", \".join(\n str(i)\n for i in range(int(start_year.value), int(end_year.value) + 1)\n )\n else:\n modis_labels = []\n for i in range(int(start_year.value), int(end_year.value) + 1):\n for j in range(1, 13):\n modis_labels.append(str(i) + \"-\" + str(j).zfill(2))\n labels.value = \", \".join(modis_labels)\n\n start_year.observe(year_change, \"value\")\n\n start_year_label = widgets.Label()\n widgets.jslink((start_year, \"value\"), (start_year_label, \"value\"))\n\n end_year = widgets.IntSlider(\n description=\"End Year:\",\n value=2020,\n min=1984,\n max=2021,\n readout=False,\n style=style,\n layout=widgets.Layout(width=\"138px\", padding=padding),\n )\n\n end_year.observe(year_change, \"value\")\n\n end_year_label = widgets.Label()\n widgets.jslink((end_year, \"value\"), (end_year_label, \"value\"))\n\n start_month = widgets.IntSlider(\n description=\"Start Month:\",\n value=1,\n min=1,\n max=12,\n readout=False,\n style=style,\n layout=widgets.Layout(width=\"145px\", padding=padding),\n )\n\n start_month_label = widgets.Label(\n layout=widgets.Layout(width=\"20px\", padding=padding),\n )\n widgets.jslink((start_month, \"value\"), (start_month_label, \"value\"))\n\n end_month = widgets.IntSlider(\n description=\"End Month:\",\n value=12,\n min=1,\n max=12,\n readout=False,\n style=style,\n layout=widgets.Layout(width=\"155px\", padding=padding),\n )\n\n end_month_label = widgets.Label()\n widgets.jslink((end_month, \"value\"), (end_month_label, \"value\"))\n\n prebuilt_options.children = [\n widgets.HBox([start_year, start_year_label, end_year, end_year_label]),\n widgets.HBox([start_month, start_month_label, end_month, end_month_label]),\n cloud,\n ]\n\n button_width = \"113px\"\n apply_btn = widgets.Button(\n description=\"Apply\",\n button_style=\"primary\",\n tooltip=\"Apply the settings to activate the time slider\",\n style=style,\n layout=widgets.Layout(padding=\"0px\", width=button_width),\n )\n\n def submit_clicked(b):\n\n output.clear_output()\n with output:\n if start_year.value > end_year.value:\n print(\"The end year must be great than the start year.\")\n return\n if start_month.value > end_month.value:\n print(\"The end month must be great than the start month.\")\n return\n\n if m is not None:\n\n roi = None\n if region.value == \"User-drawn ROI\" and (m.user_roi is not None):\n roi = m.user_roi\n elif region.value == \"User-drawn ROI\" and (m.user_roi is None):\n with output:\n print(\"Use the Drawing tool to create an ROI.\")\n return\n elif region.value in m.ee_layer_dict:\n roi = m.ee_layer_dict[region.value][\"ee_object\"]\n\n with output:\n print(\"Computing... Please wait...\")\n\n layer_labels = None\n vis_params = {}\n\n try:\n if vis_min.value != \"\":\n vis_params[\"min\"] = float(vis_min.value)\n\n if vis_max.value != \"\":\n vis_params[\"max\"] = float(vis_max.value)\n\n vis_params[\"opacity\"] = float(opacity.value)\n\n if len(bands_hbox.children) > 0 and (\n band1_dropdown.value\n and band2_dropdown.value\n and band3_dropdown.value\n ):\n vis_params[\"bands\"] = [\n band1_dropdown.value,\n band2_dropdown.value,\n band3_dropdown.value,\n ]\n vis_params[\"gamma\"] = float(gamma.value)\n\n if len(palette_vbox.children) > 0:\n if \",\" in palette.value:\n vis_params[\"palette\"] = [\n i.strip() for i in palette.value.split(\",\")\n ]\n elif len(palette.value) > 0:\n vis_params[\"palette\"] = palette.value.strip()\n\n except Exception as _:\n with output:\n print(\"The vis parmas are invalid.\")\n return\n\n if labels.value != \"\" and \",\" in labels.value:\n try:\n layer_labels = [i.strip() for i in labels.value.split(\",\")]\n except Exception as e:\n raise ValueError(e)\n\n if collection.value in m.ee_raster_layer_names:\n layer = m.ee_layer_dict[collection.value]\n ee_object = layer[\"ee_object\"]\n elif collection.value in col_options_dict:\n start_date = str(start_month.value).zfill(2) + \"-01\"\n end_date = str(end_month.value).zfill(2) + \"-30\"\n\n if collection.value == \"Landsat TM-ETM-OLI Surface Reflectance\":\n ee_object = landsat_timeseries(\n roi,\n int(start_year.value),\n int(end_year.value),\n start_date,\n end_date,\n cloud.value,\n )\n elif collection.value == \"MOD13A2.006 Terra Vegetation Indices\":\n ee_object = modis_timeseries(\n roi=roi,\n start_year=int(start_year.value),\n end_year=int(end_year.value),\n start_date=start_date,\n end_date=end_date,\n )\n\n elif collection.value == \"Sentinel-2 Surface Relectance\":\n ee_object = sentinel2_timeseries(\n roi,\n int(start_year.value),\n int(end_year.value),\n start_date,\n end_date,\n cloud.value,\n )\n elif collection.value == \"USDA NAIP Imagery\":\n\n if int(start_year.value) < 2009 and (\n band1_dropdown.value == \"N\"\n or band2_dropdown.value == \"N\"\n or band3_dropdown.value == \"N\"\n ):\n with output:\n output.clear_output()\n print(\"4-band NAIP imagery not available before 2009.\")\n return\n\n ee_object = naip_timeseries(roi, start_year.value, end_year.value)\n\n m.add_time_slider(\n ee_object,\n region=roi,\n vis_params=vis_params,\n labels=layer_labels,\n time_interval=speed.value,\n )\n\n output.clear_output()\n\n if m.colorbar_ctrl is not None:\n m.remove_control(m.colorbar_ctrl)\n m.colorbar_ctrl = None\n\n apply_btn.on_click(submit_clicked)\n\n reset_btn = widgets.Button(\n description=\"Reset\",\n button_style=\"primary\",\n style=style,\n layout=widgets.Layout(padding=\"0px\", width=button_width),\n )\n\n def reset_btn_click(change):\n output.clear_output()\n collection.value = col_options[0]\n region.value = \"User-drawn ROI\"\n vis.value = \"\"\n labels.value = \"1, 2, 3\"\n speed.value = 1\n\n if m.colorbar_ctrl is not None:\n m.remove_control(m.colorbar_ctrl)\n m.colorbar_ctrl = None\n\n reset_btn.on_click(reset_btn_click)\n\n close_btn = widgets.Button(\n description=\"Close\",\n button_style=\"primary\",\n style=style,\n layout=widgets.Layout(padding=\"0px\", width=button_width),\n )\n\n def close_click(change):\n if m is not None:\n m.toolbar_reset()\n if m.tool_control is not None and m.tool_control in m.controls:\n m.remove_control(m.tool_control)\n m.tool_control = None\n\n if m.colorbar_ctrl is not None:\n m.remove_control(m.colorbar_ctrl)\n m.colorbar_ctrl = None\n toolbar_widget.close()\n\n close_btn.on_click(close_click)\n\n def collection_changed(change):\n\n if change[\"new\"]:\n selected = change[\"owner\"].value\n if selected in m.ee_layer_dict:\n prebuilt_options.children = []\n labels.value = \"\"\n region.value = None\n\n ee_object = m.ee_layer_dict[selected][\"ee_object\"]\n vis_params = m.ee_layer_dict[selected][\"vis_params\"]\n if isinstance(ee_object, ee.Image):\n palette_vbox.children = [\n widgets.HBox([classes, colormap]),\n widgets.HBox(\n [palette, color_picker, add_color, del_color, reset_color]\n ),\n ]\n bands_hbox.children = []\n\n elif isinstance(ee_object, ee.ImageCollection):\n\n first = ee.Image(ee_object.first())\n band_names = first.bandNames().getInfo()\n band_count = len(band_names)\n\n if band_count > 2:\n band1_dropdown.options = band_names\n band2_dropdown.options = band_names\n band3_dropdown.options = band_names\n\n band1_dropdown.value = band_names[2]\n band2_dropdown.value = band_names[1]\n band3_dropdown.value = band_names[0]\n\n palette_vbox.children = []\n bands_hbox.children = [\n bands_label,\n band1_dropdown,\n band2_dropdown,\n band3_dropdown,\n ]\n\n else:\n palette_vbox.children = [\n widgets.HBox([classes, colormap]),\n widgets.HBox(\n [\n palette,\n color_picker,\n add_color,\n del_color,\n reset_color,\n ]\n ),\n ]\n bands_hbox.children = []\n\n if \"min\" in vis_params:\n vis_min.value = str(vis_params[\"min\"])\n if \"max\" in vis_params:\n vis_max.value = str(vis_params[\"max\"])\n if \"opacity\" in vis_params:\n opacity.value = str(vis_params[\"opacity\"])\n if \"gamma\" in vis_params:\n if isinstance(vis_params[\"gamma\"], list):\n gamma.value = str(vis_params[\"gamma\"][0])\n else:\n gamma.value = str(vis_params[\"gamma\"])\n if \"palette\" in vis_params:\n palette.value = \", \".join(vis_params[\"palette\"])\n\n else:\n prebuilt_options.children = [\n widgets.HBox(\n [start_year, start_year_label, end_year, end_year_label]\n ),\n widgets.HBox(\n [start_month, start_month_label, end_month, end_month_label]\n ),\n cloud,\n ]\n\n if selected == \"MOD13A2.006 Terra Vegetation Indices\":\n palette_vbox.children = [\n widgets.HBox([classes, colormap]),\n widgets.HBox(\n [\n palette,\n color_picker,\n add_color,\n del_color,\n reset_color,\n ]\n ),\n ]\n bands_hbox.children = []\n\n palette.value = \", \".join(col_options_dict[selected][\"palette\"])\n modis_labels = []\n for i in range(int(start_year.value), int(end_year.value) + 1):\n for j in range(1, 13):\n modis_labels.append(str(i) + \"-\" + str(j).zfill(2))\n labels.value = \", \".join(modis_labels)\n\n else:\n bands_hbox.children = [\n bands_label,\n band1_dropdown,\n band2_dropdown,\n band3_dropdown,\n ]\n\n bandnames = col_options_dict[selected][\"bandnames\"]\n band1_dropdown.options = bandnames\n band2_dropdown.options = bandnames\n band3_dropdown.options = bandnames\n\n if (\n selected == \"Landsat TM-ETM-OLI Surface Reflectance\"\n or selected == \"Sentinel-2 Surface Relectance\"\n ):\n band1_dropdown.value = bandnames[2]\n band2_dropdown.value = bandnames[1]\n band3_dropdown.value = bandnames[0]\n palette_vbox.children = []\n elif selected == \"USDA NAIP Imagery\":\n band1_dropdown.value = bandnames[0]\n band2_dropdown.value = bandnames[1]\n band3_dropdown.value = bandnames[2]\n palette_vbox.children = []\n\n labels.value = \", \".join(\n str(i)\n for i in range(int(start_year.value), int(end_year.value) + 1)\n )\n\n start_year.min = col_options_dict[selected][\"start_year\"]\n start_year.max = col_options_dict[selected][\"end_year\"]\n start_year.value = start_year.min\n end_year.min = col_options_dict[selected][\"start_year\"]\n end_year.max = col_options_dict[selected][\"end_year\"]\n end_year.value = end_year.max\n vis_min.value = str(col_options_dict[selected][\"min\"])\n vis_max.value = str(col_options_dict[selected][\"max\"])\n\n if selected == \"MOD13A2.006 Terra Vegetation Indices\":\n start_year.value = \"2001\"\n end_year.value = \"2020\"\n elif selected == \"USDA NAIP Imagery\":\n start_year.value = \"2009\"\n end_year.value = \"2019\"\n\n collection.observe(collection_changed, \"value\")\n\n output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))\n\n toolbar_widget = widgets.VBox()\n toolbar_widget.children = [toolbar_button]\n toolbar_header = widgets.HBox()\n toolbar_header.children = [close_button, toolbar_button]\n toolbar_footer = widgets.VBox()\n toolbar_footer.children = [\n collection,\n region,\n bands_hbox,\n widgets.HBox([vis_min, vis_max]),\n widgets.HBox([opacity, opacity_label, gamma, gamma_label]),\n palette_vbox,\n widgets.HBox([labels, speed, speed_label]),\n prebuilt_options,\n widgets.HBox([apply_btn, reset_btn, close_btn]),\n output,\n ]\n\n toolbar_event = ipyevents.Event(\n source=toolbar_widget, watched_events=[\"mouseenter\", \"mouseleave\"]\n )\n\n def handle_toolbar_event(event):\n\n if event[\"type\"] == \"mouseenter\":\n toolbar_widget.children = [toolbar_header, toolbar_footer]\n elif event[\"type\"] == \"mouseleave\":\n if not toolbar_button.value:\n toolbar_widget.children = [toolbar_button]\n toolbar_button.value = False\n close_button.value = False\n\n toolbar_event.on_dom_event(handle_toolbar_event)\n\n def toolbar_btn_click(change):\n if change[\"new\"]:\n close_button.value = False\n toolbar_widget.children = [toolbar_header, toolbar_footer]\n else:\n if not close_button.value:\n toolbar_widget.children = [toolbar_button]\n\n toolbar_button.observe(toolbar_btn_click, \"value\")\n\n def close_btn_click(change):\n if change[\"new\"]:\n toolbar_button.value = False\n if m is not None:\n if m.tool_control is not None and m.tool_control in m.controls:\n m.remove_control(m.tool_control)\n m.tool_control = None\n m.toolbar_reset()\n toolbar_widget.close()\n\n if m.colorbar_ctrl is not None:\n m.remove_control(m.colorbar_ctrl)\n m.colorbar_ctrl = None\n\n close_button.observe(close_btn_click, \"value\")\n\n toolbar_button.value = True\n if m is not None:\n toolbar_control = ipyleaflet.WidgetControl(\n widget=toolbar_widget, position=\"topright\"\n )\n\n if toolbar_control not in m.controls:\n m.add_control(toolbar_control)\n m.tool_control = toolbar_control\n else:\n return toolbar_widget\n\n\ndef plot_transect(m=None):\n\n from bqplot import pyplot as plt\n\n widget_width = \"250px\"\n padding = \"0px 0px 0px 5px\" # upper, right, bottom, left\n\n toolbar_button = widgets.ToggleButton(\n value=False,\n tooltip=\"Show or hide the toolbar\",\n icon=\"line-chart\",\n layout=widgets.Layout(width=\"28px\", height=\"28px\", padding=\"0px 0px 0px 4px\"),\n )\n\n close_button = widgets.ToggleButton(\n value=False,\n tooltip=\"Close the tool\",\n icon=\"times\",\n button_style=\"primary\",\n layout=widgets.Layout(height=\"28px\", width=\"28px\", padding=\"0px 0px 0px 4px\"),\n )\n\n layer = widgets.Dropdown(\n options=[\"Option 1\", \"Option 2\", \"Option 3\"],\n value=None,\n description=\"Image:\",\n layout=widgets.Layout(width=widget_width, padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n band = widgets.Dropdown(\n options=[\"Option 1\", \"Option 2\", \"Option 3\"],\n value=None,\n description=\"Band:\",\n layout=widgets.Layout(width=widget_width, padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n reducer = widgets.Dropdown(\n options=[\"mean\", \"median\", \"min\", \"max\", \"mode\", \"sum\", \"stdDev\", \"variance\"],\n value=\"mean\",\n description=\"Stats:\",\n layout=widgets.Layout(width=\"120px\", padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n segments = widgets.IntText(\n value=\"100\",\n description=\"Segments:\",\n placeholder=\"Number of segments\",\n style={\"description_width\": \"initial\"},\n layout=widgets.Layout(width=\"126px\", padding=padding),\n )\n\n dist_interval = widgets.Text(\n value=\"\",\n description=\"Distance interval (m):\",\n placeholder=\"Optional\",\n style={\"description_width\": \"initial\"},\n layout=widgets.Layout(width=widget_width, padding=padding),\n )\n\n title = widgets.Text(\n value=\"\",\n description=\"Plot title:\",\n placeholder=\"Plot title\",\n style={\"description_width\": \"initial\"},\n layout=widgets.Layout(width=widget_width, padding=padding),\n )\n\n xlabel = widgets.Text(\n value=\"\",\n description=\"xlabel:\",\n placeholder=\"x-axis\",\n style={\"description_width\": \"initial\"},\n layout=widgets.Layout(width=\"123px\", padding=padding),\n )\n\n ylabel = widgets.Text(\n value=\"\",\n description=\"ylabel:\",\n placeholder=\"y-axis\",\n style={\"description_width\": \"initial\"},\n layout=widgets.Layout(width=\"123px\", padding=padding),\n )\n\n buttons = widgets.ToggleButtons(\n value=None,\n options=[\"Plot\", \"Reset\", \"Close\"],\n tooltips=[\"Plot transect\", \"Reset\", \"Close\"],\n button_style=\"primary\",\n )\n buttons.style.button_width = \"80px\"\n\n output = widgets.Output(\n layout=widgets.Layout(max_width=\"500px\", max_height=\"265px\", padding=padding)\n )\n\n toolbar_widget = widgets.VBox()\n toolbar_widget.children = [toolbar_button]\n toolbar_header = widgets.HBox()\n toolbar_header.children = [close_button, toolbar_button]\n toolbar_footer = widgets.VBox()\n toolbar_footer.children = [\n layer,\n band,\n widgets.HBox([reducer, segments]),\n dist_interval,\n title,\n widgets.HBox([xlabel, ylabel]),\n buttons,\n ]\n\n toolbar_event = ipyevents.Event(\n source=toolbar_widget, watched_events=[\"mouseenter\", \"mouseleave\"]\n )\n\n if m is not None:\n layer.options = m.ee_raster_layer_names\n if len(layer.options) > 0:\n image = m.ee_layer_dict[layer.value][\"ee_object\"]\n if isinstance(image, ee.ImageCollection):\n image = image.toBands()\n band.options = image.bandNames().getInfo()\n\n transect_control = ipyleaflet.WidgetControl(\n widget=output, position=\"bottomright\"\n )\n m.add_control(transect_control)\n m.transect_control = transect_control\n\n def layer_changed(change):\n if change[\"new\"]:\n if m is not None:\n image = m.ee_layer_dict[layer.value][\"ee_object\"]\n if isinstance(image, ee.ImageCollection):\n image = image.toBands()\n band.options = image.bandNames().getInfo()\n\n layer.observe(layer_changed, \"value\")\n\n def handle_toolbar_event(event):\n\n if event[\"type\"] == \"mouseenter\":\n toolbar_widget.children = [toolbar_header, toolbar_footer]\n elif event[\"type\"] == \"mouseleave\":\n if not toolbar_button.value:\n toolbar_widget.children = [toolbar_button]\n toolbar_button.value = False\n close_button.value = False\n\n toolbar_event.on_dom_event(handle_toolbar_event)\n\n def toolbar_btn_click(change):\n if change[\"new\"]:\n close_button.value = False\n toolbar_widget.children = [toolbar_header, toolbar_footer]\n else:\n if not close_button.value:\n toolbar_widget.children = [toolbar_button]\n\n toolbar_button.observe(toolbar_btn_click, \"value\")\n\n def close_btn_click(change):\n if change[\"new\"]:\n toolbar_button.value = False\n if m is not None:\n m.toolbar_reset()\n if m.tool_control is not None and m.tool_control in m.controls:\n m.remove_control(m.tool_control)\n m.tool_control = None\n if m.transect_control is not None and m.transect_control in m.controls:\n m.remove_control(m.transect_control)\n m.transect_control = None\n toolbar_widget.close()\n\n close_button.observe(close_btn_click, \"value\")\n\n def button_clicked(change):\n if change[\"new\"] == \"Plot\":\n with output:\n output.clear_output()\n if m is not None:\n if m.user_roi is not None:\n line = m.user_roi\n geom_type = line.type().getInfo()\n if geom_type != \"LineString\":\n print(\"Use drawing tool to draw a line\")\n else:\n image = m.ee_layer_dict[layer.value][\"ee_object\"]\n if isinstance(image, ee.ImageCollection):\n image = image.toBands()\n image = image.select([band.value])\n if dist_interval.value == \"\":\n dist = None\n else:\n dist = float(dist_interval.value)\n\n print(\"Computing ...\")\n df = extract_transect(\n image,\n line,\n reducer.value,\n int(segments.value),\n dist,\n to_pandas=True,\n )\n output.clear_output()\n fig = plt.figure(title=title.value)\n fig.layout.width = output.layout.max_width\n fig.layout.height = output.layout.max_height\n plt.plot(df[\"distance\"], df[reducer.value])\n plt.xlabel(xlabel.value)\n plt.ylabel(ylabel.value)\n plt.show()\n else:\n print(\"Use drawing tool to draw a line\")\n elif change[\"new\"] == \"Reset\":\n output.clear_output()\n elif change[\"new\"] == \"Close\":\n if m is not None:\n m.toolbar_reset()\n if m.tool_control is not None and m.tool_control in m.controls:\n m.remove_control(m.tool_control)\n m.tool_control = None\n if m.transect_control is not None and m.transect_control in m.controls:\n m.remove_control(m.transect_control)\n m.transect_control = None\n toolbar_widget.close()\n\n buttons.value = None\n\n buttons.observe(button_clicked, \"value\")\n\n toolbar_button.value = True\n if m is not None:\n toolbar_control = ipyleaflet.WidgetControl(\n widget=toolbar_widget, position=\"topright\"\n )\n\n if toolbar_control not in m.controls:\n m.add_control(toolbar_control)\n m.tool_control = toolbar_control\n else:\n return toolbar_widget\n\n\ndef sankee_gui(m=None):\n\n import sankee\n\n widget_width = \"250px\"\n padding = \"0px 0px 0px 5px\" # upper, right, bottom, left\n\n toolbar_button = widgets.ToggleButton(\n value=False,\n tooltip=\"Toolbar\",\n icon=\"random\",\n layout=widgets.Layout(width=\"28px\", height=\"28px\", padding=\"0px 0px 0px 4px\"),\n )\n\n close_button = widgets.ToggleButton(\n value=False,\n tooltip=\"Close the tool\",\n icon=\"times\",\n button_style=\"primary\",\n layout=widgets.Layout(height=\"28px\", width=\"28px\", padding=\"0px 0px 0px 4px\"),\n )\n\n region = widgets.Dropdown(\n options=[\"User-drawn ROI\"],\n value=\"User-drawn ROI\",\n description=\"Region:\",\n layout=widgets.Layout(width=widget_width, padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n def region_changed(change):\n if change[\"new\"] == \"Las Vegas\":\n if m is not None:\n las_vegas = ee.Geometry.Polygon(\n [\n [\n [-115.01184401606046, 36.24170785506492],\n [-114.98849806879484, 36.29928186470082],\n [-115.25628981684171, 36.35238941394592],\n [-115.34692702387296, 36.310348922031565],\n [-115.37988600824796, 36.160811202271944],\n [-115.30298171137296, 36.03653336474891],\n [-115.25628981684171, 36.05207884201088],\n [-115.26590285395109, 36.226199908103695],\n [-115.19174513910734, 36.25499793268206],\n ]\n ]\n )\n m.addLayer(las_vegas, {}, \"Las Vegas\")\n m.centerObject(las_vegas, 10)\n\n region.observe(region_changed, \"value\")\n\n dataset = widgets.Dropdown(\n options=[\n \"NLCD - National Land Cover Database\",\n \"MCD12Q1 - MODIS Global Land Cover\",\n \"CGLS - Copernicus Global Land Cover\",\n \"LCMS - Land Change Monitoring System\",\n ],\n value=\"NLCD - National Land Cover Database\",\n description=\"Dataset:\",\n layout=widgets.Layout(width=widget_width, padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n NLCD_options = [\"2001\", \"2004\", \"2006\", \"2008\", \"2011\", \"2013\", \"2016\"]\n MODIS_options = [str(y) for y in range(2001, 2020)]\n CGLS_options = [str(y) for y in range(2015, 2020)]\n LCMS_options = [str(y) for y in range(1985, 2021)]\n\n before = widgets.Dropdown(\n options=NLCD_options,\n value=\"2001\",\n description=\"Before:\",\n layout=widgets.Layout(width=\"123px\", padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n after = widgets.Dropdown(\n options=NLCD_options,\n value=\"2016\",\n description=\"After:\",\n layout=widgets.Layout(width=\"123px\", padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n def dataset_changed(change):\n if change[\"new\"] == \"NLCD - National Land Cover Database\":\n before.options = NLCD_options\n after.options = NLCD_options\n before.value = NLCD_options[0]\n after.value = NLCD_options[-1]\n elif change[\"new\"] == \"MCD12Q1 - MODIS Global Land Cover\":\n before.options = MODIS_options\n after.options = MODIS_options\n before.value = MODIS_options[0]\n after.value = MODIS_options[-1]\n elif change[\"new\"] == \"CGLS - Copernicus Global Land Cover\":\n before.options = CGLS_options\n after.options = CGLS_options\n before.value = CGLS_options[0]\n after.value = CGLS_options[-1]\n elif change[\"new\"] == \"LCMS - Land Change Monitoring System\":\n before.options = LCMS_options\n after.options = LCMS_options\n before.value = LCMS_options[0]\n after.value = LCMS_options[-1]\n\n dataset.observe(dataset_changed, \"value\")\n\n dataset_template = {\n \"NLCD - National Land Cover Database\": sankee.datasets.NLCD2016,\n \"MCD12Q1 - MODIS Global Land Cover\": sankee.datasets.MODIS_LC_TYPE1,\n \"CGLS - Copernicus Global Land Cover\": sankee.datasets.CGLS_LC100,\n \"LCMS - Land Change Monitoring System\": sankee.datasets.LCMS_LC,\n }\n\n band_name = {\n \"NLCD - National Land Cover Database\": \"landcover\",\n \"MCD12Q1 - MODIS Global Land Cover\": \"LC_Type1\",\n \"CGLS - Copernicus Global Land Cover\": \"discrete_classification\",\n \"LCMS - Land Change Monitoring System\": \"Land_Cover\",\n }\n\n samples = widgets.IntText(\n value=1000,\n description=\"Samples:\",\n placeholder=\"The number of samples points to randomly generate for characterizing all images\",\n style={\"description_width\": \"initial\"},\n layout=widgets.Layout(width=\"133px\", padding=padding),\n )\n\n classes = widgets.IntText(\n value=6,\n description=\"Classes:\",\n style={\"description_width\": \"initial\"},\n layout=widgets.Layout(width=\"113px\", padding=padding),\n )\n\n title = widgets.Text(\n value=\"Land Cover Change\",\n description=\"Title:\",\n style={\"description_width\": \"initial\"},\n layout=widgets.Layout(width=widget_width, padding=padding),\n )\n\n buttons = widgets.ToggleButtons(\n value=None,\n options=[\"Apply\", \"Reset\", \"Close\"],\n tooltips=[\"Apply\", \"Reset\", \"Close\"],\n button_style=\"primary\",\n )\n buttons.style.button_width = \"80px\"\n\n output = widgets.Output(layout=widgets.Layout(padding=padding))\n\n toolbar_widget = widgets.VBox()\n toolbar_widget.children = [toolbar_button]\n toolbar_header = widgets.HBox()\n toolbar_header.children = [close_button, toolbar_button]\n toolbar_footer = widgets.VBox()\n toolbar_footer.children = [\n region,\n dataset,\n widgets.HBox([before, after]),\n widgets.HBox([samples, classes]),\n title,\n buttons,\n output,\n ]\n\n toolbar_event = ipyevents.Event(\n source=toolbar_widget, watched_events=[\"mouseenter\", \"mouseleave\"]\n )\n\n if m is not None:\n if \"Las Vegas\" not in m.ee_vector_layer_names:\n region.options = [\"User-drawn ROI\", \"Las Vegas\"] + m.ee_vector_layer_names\n else:\n region.options = [\"User-drawn ROI\"] + m.ee_vector_layer_names\n\n plot_close_btn = widgets.Button(\n tooltip=\"Close the plot\",\n icon=\"times\",\n layout=widgets.Layout(\n height=\"28px\", width=\"28px\", padding=\"0px 0px 0px 0px\"\n ),\n )\n\n def plot_close_btn_clicked(b):\n plot_widget.children = []\n\n plot_close_btn.on_click(plot_close_btn_clicked)\n\n plot_reset_btn = widgets.Button(\n tooltip=\"Reset the plot\",\n icon=\"home\",\n layout=widgets.Layout(\n height=\"28px\", width=\"28px\", padding=\"0px 0px 0px 0px\"\n ),\n )\n\n def plot_reset_btn_clicked(b):\n\n m.sankee_plot.update_layout(\n width=600,\n height=250,\n margin=dict(l=10, r=10, b=10, t=50, pad=5),\n )\n with plot_output:\n plot_output.clear_output()\n display(m.sankee_plot)\n\n plot_reset_btn.on_click(plot_reset_btn_clicked)\n\n plot_fullscreen_btn = widgets.Button(\n tooltip=\"Fullscreen the plot\",\n icon=\"arrows-alt\",\n layout=widgets.Layout(\n height=\"28px\", width=\"28px\", padding=\"0px 0px 0px 0px\"\n ),\n )\n\n def plot_fullscreen_btn_clicked(b):\n\n m.sankee_plot.update_layout(\n width=1030,\n height=int(m.layout.height[:-2]) - 60,\n margin=dict(l=10, r=10, b=10, t=50, pad=5),\n )\n with plot_output:\n plot_output.clear_output()\n display(m.sankee_plot)\n\n plot_fullscreen_btn.on_click(plot_fullscreen_btn_clicked)\n\n width_btn = widgets.Button(\n tooltip=\"Change plot width\",\n icon=\"arrows-h\",\n layout=widgets.Layout(\n height=\"28px\", width=\"28px\", padding=\"0px 0px 0px 0px\"\n ),\n )\n\n def width_btn_clicked(b):\n m.sankee_plot.update_layout(\n width=1030,\n margin=dict(l=10, r=10, b=10, t=50, pad=5),\n )\n with plot_output:\n plot_output.clear_output()\n display(m.sankee_plot)\n\n width_btn.on_click(width_btn_clicked)\n\n height_btn = widgets.Button(\n tooltip=\"Change plot height\",\n icon=\"arrows-v\",\n layout=widgets.Layout(\n height=\"28px\", width=\"28px\", padding=\"0px 0px 0px 0px\"\n ),\n )\n\n def height_btn_clicked(b):\n m.sankee_plot.update_layout(\n height=int(m.layout.height[:-2]) - 60,\n margin=dict(l=10, r=10, b=10, t=50, pad=5),\n )\n with plot_output:\n plot_output.clear_output()\n display(m.sankee_plot)\n\n height_btn.on_click(height_btn_clicked)\n\n width_slider = widgets.IntSlider(\n value=600,\n min=400,\n max=1030,\n step=10,\n description=\"\",\n readout=False,\n continuous_update=False,\n layout=widgets.Layout(width=\"100px\", padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n width_slider_label = widgets.Label(\n layout=widgets.Layout(padding=\"0px 10px 0px 0px\")\n )\n widgets.jslink((width_slider, \"value\"), (width_slider_label, \"value\"))\n\n def width_changed(change):\n if change[\"new\"]:\n\n m.sankee_plot.update_layout(\n width=width_slider.value,\n margin=dict(l=10, r=10, b=10, t=50, pad=5),\n )\n with plot_output:\n plot_output.clear_output()\n display(m.sankee_plot)\n\n width_slider.observe(width_changed, \"value\")\n\n height_slider = widgets.IntSlider(\n value=250,\n min=200,\n max=int(m.layout.height[:-2]) - 60,\n step=10,\n description=\"\",\n readout=False,\n continuous_update=False,\n layout=widgets.Layout(width=\"100px\", padding=padding),\n style={\"description_width\": \"initial\"},\n )\n\n height_slider_label = widgets.Label()\n widgets.jslink((height_slider, \"value\"), (height_slider_label, \"value\"))\n\n def height_changed(change):\n if change[\"new\"]:\n\n m.sankee_plot.update_layout(\n height=height_slider.value,\n margin=dict(l=10, r=10, b=10, t=50, pad=5),\n )\n with plot_output:\n plot_output.clear_output()\n display(m.sankee_plot)\n\n height_slider.observe(height_changed, \"value\")\n\n plot_output = widgets.Output()\n\n plot_widget = widgets.VBox([plot_output])\n\n sankee_control = ipyleaflet.WidgetControl(\n widget=plot_widget, position=\"bottomright\"\n )\n m.add_control(sankee_control)\n m.sankee_control = sankee_control\n\n def handle_toolbar_event(event):\n\n if event[\"type\"] == \"mouseenter\":\n toolbar_widget.children = [toolbar_header, toolbar_footer]\n elif event[\"type\"] == \"mouseleave\":\n if not toolbar_button.value:\n toolbar_widget.children = [toolbar_button]\n toolbar_button.value = False\n close_button.value = False\n\n toolbar_event.on_dom_event(handle_toolbar_event)\n\n def toolbar_btn_click(change):\n if change[\"new\"]:\n close_button.value = False\n toolbar_widget.children = [toolbar_header, toolbar_footer]\n else:\n if not close_button.value:\n toolbar_widget.children = [toolbar_button]\n\n toolbar_button.observe(toolbar_btn_click, \"value\")\n\n def close_btn_click(change):\n if change[\"new\"]:\n toolbar_button.value = False\n if m is not None:\n m.toolbar_reset()\n if m.tool_control is not None and m.tool_control in m.controls:\n m.remove_control(m.tool_control)\n m.tool_control = None\n if m.sankee_control is not None and m.sankee_control in m.controls:\n m.remove_control(m.sankee_control)\n m.sankee_control = None\n toolbar_widget.close()\n\n close_button.observe(close_btn_click, \"value\")\n\n def button_clicked(change):\n if change[\"new\"] == \"Apply\":\n with output:\n output.clear_output()\n plot_output.clear_output()\n print(\"Running ...\")\n\n if m is not None:\n exclude_classes = []\n\n if \"NLCD\" in dataset.value:\n before_img = ee.Image(f\"USGS/NLCD/NLCD{before.value}\")\n after_img = ee.Image(f\"USGS/NLCD/NLCD{after.value}\")\n vis_params = {}\n elif \"MODIS\" in dataset.value:\n before_img = ee.Image(f\"MODIS/006/MCD12Q1/{before.value}_01_01\")\n after_img = ee.Image(f\"MODIS/006/MCD12Q1/{after.value}_01_01\")\n vis_params = {\n \"min\": 1.0,\n \"max\": 17.0,\n \"palette\": [\n \"05450a\",\n \"086a10\",\n \"54a708\",\n \"78d203\",\n \"009900\",\n \"c6b044\",\n \"dcd159\",\n \"dade48\",\n \"fbff13\",\n \"b6ff05\",\n \"27ff87\",\n \"c24f44\",\n \"a5a5a5\",\n \"ff6d4c\",\n \"69fff8\",\n \"f9ffa4\",\n \"1c0dff\",\n ],\n }\n elif \"CGLS\" in dataset.value:\n before_img = ee.Image(\n f\"COPERNICUS/Landcover/100m/Proba-V-C3/Global/{before.value}\"\n )\n after_img = ee.Image(\n f\"COPERNICUS/Landcover/100m/Proba-V-C3/Global/{after.value}\"\n )\n vis_params = {}\n elif \"LCMS\" in dataset.value:\n before_img = ee.Image(\n f\"USFS/GTAC/LCMS/v2020-5/LCMS_CONUS_v2020-5_{before.value}\"\n )\n after_img = ee.Image(\n f\"USFS/GTAC/LCMS/v2020-5/LCMS_CONUS_v2020-5_{after.value}\"\n )\n vis_params = {}\n # LCMS Land Cover class 15 is a no data mask and should be excluded\n exclude_classes.append(15)\n\n img_list = [before_img, after_img]\n label_list = [before.value, after.value]\n\n image1 = before_img.select(band_name[dataset.value])\n image2 = after_img.select(band_name[dataset.value])\n\n if region.value != \"User-drawn ROI\" or (\n region.value == \"User-drawn ROI\" and m.user_roi is not None\n ):\n\n if region.value == \"User-drawn ROI\":\n geom = m.user_roi\n image1 = image1.clip(geom)\n image2 = image2.clip(geom)\n else:\n roi_object = m.ee_layer_dict[region.value][\"ee_object\"]\n if region.value == \"Las Vegas\":\n m.centerObject(roi_object, 10)\n if isinstance(roi_object, ee.Geometry):\n geom = roi_object\n image1 = image1.clip(geom)\n image2 = image2.clip(geom)\n else:\n roi_object = ee.FeatureCollection(roi_object)\n image1 = image1.clipToCollection(roi_object)\n image2 = image2.clipToCollection(roi_object)\n geom = roi_object.geometry()\n\n if len(title.value) > 0:\n plot_title = title.value\n else:\n plot_title = None\n m.default_style = {\"cursor\": \"wait\"}\n plot = sankee.sankify(\n img_list,\n geom,\n label_list,\n dataset_template[dataset.value],\n max_classes=classes.value,\n n=int(samples.value),\n title=plot_title,\n exclude=exclude_classes,\n )\n\n output.clear_output()\n plot_output.clear_output()\n with plot_output:\n plot.update_layout(\n width=600,\n height=250,\n margin=dict(l=10, r=10, b=10, t=50, pad=5),\n )\n plot_widget.children = [\n widgets.HBox(\n [\n plot_close_btn,\n plot_reset_btn,\n plot_fullscreen_btn,\n width_btn,\n width_slider,\n width_slider_label,\n height_btn,\n height_slider,\n height_slider_label,\n ]\n ),\n plot_output,\n ]\n display(plot)\n\n m.sankee_plot = plot\n m.addLayer(image1, vis_params, before.value)\n m.addLayer(image2, vis_params, after.value)\n m.default_style = {\"cursor\": \"default\"}\n\n else:\n with output:\n output.clear_output()\n print(\"Draw a polygon on the map.\")\n\n elif change[\"new\"] == \"Reset\":\n output.clear_output()\n plot_output.clear_output()\n plot_widget.children = []\n\n elif change[\"new\"] == \"Close\":\n if m is not None:\n m.toolbar_reset()\n if m.tool_control is not None and m.tool_control in m.controls:\n m.remove_control(m.tool_control)\n m.tool_control = None\n if m.sankee_control is not None and m.sankee_control in m.controls:\n m.remove_control(m.sankee_control)\n m.sankee_control = None\n toolbar_widget.close()\n\n buttons.value = None\n\n buttons.observe(button_clicked, \"value\")\n\n toolbar_button.value = True\n if m is not None:\n toolbar_control = ipyleaflet.WidgetControl(\n widget=toolbar_widget, position=\"topright\"\n )\n\n if toolbar_control not in m.controls:\n m.add_control(toolbar_control)\n m.tool_control = toolbar_control\n else:\n return toolbar_widget\n\n\ndef split_basemaps(\n m, layers_dict=None, left_name=None, right_name=None, width=\"120px\", **kwargs\n):\n\n from .basemaps import basemap_tiles\n\n controls = m.controls\n layers = m.layers\n m.layers = [m.layers[0]]\n m.clear_controls()\n\n add_zoom = True\n add_fullscreen = True\n\n if layers_dict is None:\n layers_dict = {}\n keys = dict(basemap_tiles).keys()\n for key in keys:\n if isinstance(basemap_tiles[key], ipyleaflet.WMSLayer):\n pass\n else:\n layers_dict[key] = basemap_tiles[key]\n\n keys = list(layers_dict.keys())\n if left_name is None:\n left_name = keys[0]\n if right_name is None:\n right_name = keys[-1]\n\n left_layer = layers_dict[left_name]\n right_layer = layers_dict[right_name]\n\n control = ipyleaflet.SplitMapControl(left_layer=left_layer, right_layer=right_layer)\n m.add_control(control)\n\n left_dropdown = widgets.Dropdown(\n options=keys, value=left_name, layout=widgets.Layout(width=width)\n )\n\n left_control = ipyleaflet.WidgetControl(widget=left_dropdown, position=\"topleft\")\n m.add_control(left_control)\n\n right_dropdown = widgets.Dropdown(\n options=keys, value=right_name, layout=widgets.Layout(width=width)\n )\n\n right_control = ipyleaflet.WidgetControl(widget=right_dropdown, position=\"topright\")\n m.add_control(right_control)\n\n close_button = widgets.ToggleButton(\n value=False,\n tooltip=\"Close the tool\",\n icon=\"times\",\n # button_style=\"primary\",\n layout=widgets.Layout(height=\"28px\", width=\"28px\", padding=\"0px 0px 0px 4px\"),\n )\n\n def close_btn_click(change):\n if change[\"new\"]:\n m.controls = controls\n m.layers = layers\n\n close_button.observe(close_btn_click, \"value\")\n close_control = ipyleaflet.WidgetControl(\n widget=close_button, position=\"bottomright\"\n )\n m.add_control(close_control)\n\n if add_zoom:\n m.add_control(ipyleaflet.ZoomControl())\n if add_fullscreen:\n m.add_control(ipyleaflet.FullScreenControl())\n m.add_control(ipyleaflet.ScaleControl(position=\"bottomleft\"))\n\n split_control = None\n for ctrl in m.controls:\n if isinstance(ctrl, ipyleaflet.SplitMapControl):\n split_control = ctrl\n break\n\n def left_change(change):\n split_control.left_layer.url = layers_dict[left_dropdown.value].url\n\n left_dropdown.observe(left_change, \"value\")\n\n def right_change(change):\n split_control.right_layer.url = layers_dict[right_dropdown.value].url\n\n right_dropdown.observe(right_change, \"value\")\n" ]
[ [ "matplotlib.colorbar.ColorbarBase", "pandas.read_csv", "matplotlib.colors.Normalize" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
susumOyaji/chaquopy-matplotlib-master
[ "dda4a8da1391f968023bdd9d4b9c05e63b499390", "dda4a8da1391f968023bdd9d4b9c05e63b499390", "dda4a8da1391f968023bdd9d4b9c05e63b499390" ]
[ "app/src/main/python/main.py", "app/src/main/python/KinoCode.py", "app/src/main/python/kabu_pre10.py" ]
[ "import glob\nimport pandas as pd\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\nimport torch.nn.functional as F\nimport torch.optim as optim\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ntorch.manual_seed(1)\n\nfuture_num = 1 #何日先を予測するか\nfeature_num = 7 #'始値', '高値','安値','終値','5日平均','25日平均','75日平均'の7項目\nbatch_size = 128\n\ntime_steps = 30 #lstmのtimesteps\nmoving_average_num = 30 #移動平均を取る日数\nn_epocs = 50\n\nlstm_hidden_dim = 16\ntarget_dim = 1\n\n\n\npath = \"./data/nikkei_heikin.csv\"\n\nmodel_name = \"./models/nikkei.mdl\"\n\n#data load\nflist = glob.glob(path)\nfor file in flist:\n df = pd.read_csv(file, header=0, encoding='cp932')\n dt = pd.read_csv(file, header=0, encoding='cp932')\n\n\n#データをtrain, testに分割するIndex\nval_idx_from = 3500\ntest_idx_from = 4000\n\nfuture_price = df.iloc[future_num:]['終値'].values\ncurr_price = df.iloc[:-future_num]['終値'].values\n\n#future_num日後との比較した価格を正解ラベルとして扱う\ny_data_tmp = future_price / curr_price\n#正解ラベル用のlistを用意\ny_data = np.zeros_like(y_data_tmp)\n\n#予測するfuture_num日後が前日以上なら正解\n\nfor i in range(len(y_data_tmp)):\n if y_data_tmp[i] > 1.0:\n y_data[i] = 1\n\n#価格の正規化をした際にmoving_average_num分空白のデータができるためその部分を削っておく\ny_data = y_data[moving_average_num:]\n\n#価格の正規化\n#カラム名の取得\ncols = ['始値', '高値','安値','終値','5日平均','25日平均','75日平均']\n#出来高のデータに缺損があったため抜いた\n\nfor col in cols:\n dt[col] = df[col].rolling(window=25, min_periods=25).mean()\n df[col] = df[col] / dt[col] - 1\n\n\nX_data = df.iloc[moving_average_num:-future_num][cols].values\n\n#データの分割、TorchのTensorに変換\n#学習用データ\nX_train = torch.tensor(X_data[:val_idx_from], dtype=torch.float, device=device)\ny_train = torch.tensor(y_data[:val_idx_from], dtype=torch.float, device=device)\n#評価用データ\nX_val = torch.tensor(X_data[val_idx_from:test_idx_from], dtype=torch.float, device=device)\ny_val = y_data[val_idx_from:test_idx_from]\n#テスト用データ\nX_test = torch.tensor(X_data[test_idx_from:], dtype=torch.float, device=device)\ny_test = y_data[test_idx_from:]\n\n\n\n\n\n\nclass LSTMClassifier(nn.Module):\n def __init__(self, lstm_input_dim, lstm_hidden_dim, target_dim):\n super(LSTMClassifier, self).__init__()\n self.input_dim = lstm_input_dim\n self.hidden_dim = lstm_hidden_dim\n self.lstm = nn.LSTM(input_size=lstm_input_dim, \n hidden_size=lstm_hidden_dim,\n num_layers=1, #default\n #dropout=0.2,\n batch_first=True\n )\n self.dense = nn.Linear(lstm_hidden_dim, target_dim)\n\n def forward(self, X_input):\n _, lstm_out = self.lstm(X_input)\n # LSTMの最終出力のみを利用する。\n linear_out = self.dense(lstm_out[0].view(X_input.size(0), -1))\n return torch.sigmoid(linear_out)\n\n\ndef prepare_data(batch_idx, time_steps, X_data, feature_num, device):\n feats = torch.zeros((len(batch_idx), time_steps, feature_num), dtype=torch.float, device=device)\n for b_i, b_idx in enumerate(batch_idx):\n # 過去の30日分をtime stepのデータとして格納する。\n b_slc = slice(b_idx + 1 - time_steps ,b_idx + 1)\n feats[b_i, :, :] = X_data[b_slc, :]\n\n return feats\n\n\n\n#学習\nmodel = LSTMClassifier(feature_num, lstm_hidden_dim, target_dim).to(device)\nloss_function = nn.BCELoss()\noptimizer= optim.Adam(model.parameters(), lr=1e-4)\n\n\ntrain_size = X_train.size(0)\nbest_acc_score = 0\n\nfor epoch in range(n_epocs):\n # trainデータのindexをランダムに入れ替える。最初のtime_steps分は使わない。\n perm_idx = np.random.permutation(np.arange(time_steps, train_size))\n for t_i in range(0, len(perm_idx), batch_size):\n batch_idx = perm_idx[t_i:(t_i + batch_size)]\n # LSTM入力用の時系列データの準備\n feats = prepare_data(batch_idx, time_steps, X_train, feature_num, device)\n y_target = y_train[batch_idx]\n model.zero_grad()\n train_scores = model(feats) # batch size x time steps x feature_num\n loss = loss_function(train_scores, y_target.view(-1, 1))\n loss.backward()\n optimizer.step()\n\n # validationデータの評価\n print('EPOCH: ', str(epoch), ' loss :', loss.item())\n with torch.no_grad():\n feats_val = prepare_data(np.arange(time_steps, X_val.size(0)), time_steps, X_val, feature_num, device)\n val_scores = model(feats_val)\n tmp_scores = val_scores.view(-1).to('cpu').numpy()\n bi_scores = np.round(tmp_scores)\n acc_score = accuracy_score(y_val[time_steps:], bi_scores)\n roc_score = roc_auc_score(y_val[time_steps:], tmp_scores)\n f1_scores = f1_score(y_val[time_steps:], bi_scores)\n print('Val ACC Score :', acc_score, ' ROC AUC Score :', roc_score, 'f1 Score :', f1_scores)\n\n # validationの評価が良ければモデルを保存\n if acc_score > best_acc_score:\n best_acc_score = acc_score\n torch.save(model.state_dict(),model_name)\n print('best score updated, Pytorch model was saved!!', )\n\n# bestモデルで予測する。\nmodel.load_state_dict(torch.load(model_name))\n\nwith torch.no_grad():\n feats_test = prepare_data(np.arange(time_steps, X_test.size(0)), time_steps, X_test, feature_num, device)\n val_scores = model(feats_test)\n tmp_scores = val_scores.view(-1).to('cpu').numpy() \n bi_scores = np.round(tmp_scores)\n acc_score = accuracy_score(y_test[time_steps:], bi_scores)\n roc_score = roc_auc_score(y_test[time_steps:], tmp_scores)\n f1_scores = f1_score(y_test[time_steps:], bi_scores)\n print('Test ACC Score :', acc_score, ' ROC AUC Score :', roc_score, 'f1 Score :', f1_scores)\n\n\n \n ", "from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import TimeSeriesSplit\nfrom keras.layers import Dropout\nfrom keras.layers import Dense, LSTM\nfrom keras.models import Sequential\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom matplotlib import pyplot as plt\nfrom datetime import timedelta\nfrom datetime import datetime\nimport pandas as pd\nimport datetime\nfrom dateutil.relativedelta import relativedelta\nfrom pandas_datareader import data as pdr\nfrom sklearn.metrics import r2_score, mean_squared_error\n# pandasのインポート\n\n# データの読み込み\n#df = pd.read_csv('finance_dataset.csv')\n# データフレームの表示\n#df\n\n\ncode = '6976' # '6976'#6758\n#2021年から今日までの1年間のデータを取得しましょう。期日を決めて行きます。\n# (2021, 1, 1) # 教師データ(今までのデータ)\n#start_train = datetime.date(2022, 1, 1) # 教師データ(今までのデータ)\nstart_train=datetime.date.today() + relativedelta(days=-700)\n#dowstart_train = datetime.date(2022, 1, 5)#start_train + relativedelta(days=+3)\n# 昨日分(today-1日)まで取得できる(当日分は変動しているため)\nend_train = datetime.date.today() + relativedelta(days=-1)\n\ndata = pdr.get_data_yahoo(f'{code}.T', start_train, end_train) # 教師データを読み込む。\nDow_df = pdr.get_data_yahoo('^DJI', start_train, end_train) # 試験データのcsvファイルを読み込む。\nNikkei_df = pdr.get_data_yahoo('^N225', start_train, end_train) # 試験データのcsvファイルを読み込む。\n\n\n\n#データの前処理\n#欠損データがあるので、欠損値NaNを除外する\n#df_NikkeiAll_drop = df_NikkeiAll.dropna()\n\n#df_NikkeiAll_drop.head() # 先頭の5行を表形式で表示\nprint(data.head())\n'''\npng\nインデックスが0から13966までの連番で、カラムに\n日付('Date')、最高値('High')、最安値('Low')、始値('Open')、終値('Close')が設定されたデータフレームである事が確認できます。\n日付('Date)は1965年1月5日から2021年10月21日までとなっています。\n\n後に詳しく説明を行いますが、予測モデル作成に対して、目的変数の追加や、週ごとにデータを纏める必要があります。\nそのために、曜日情報や初めの週を基準として何週目となるか等の情報と、今回の目的変数である木曜日の終値から翌日金曜日の始値が上がるかどうかの’Up’(上がる場合は'1', 同じ又は下がる場合は'0')を追加していきます。\n\n次に、infoメソッドを用いて、欠損値の有無やカラムのデータ型の確認を行います。\n'''\n\n# 各カラムの詳細確認\ndata.info()\n\n\n'''\npng\n各カラム欠損値なしである事がわかります。\n日付('Date')が’object'型となっています。今回の様な時系列データを用いる際には、'datetime64'型を用いる方が利便性が高い為、pandasの'to_datetime'メソッドを用いてデータ型の変換を行います。\n'''\n\n# 日付インデックスをりセット\ndata.reset_index(drop=False,inplace=True)\nDow_df.reset_index(drop=False,inplace=True)\nNikkei_df.reset_index(drop=False, inplace=True)\n\n# Dateのデータ型をを'datetime'型へ変更\ndata['Date'] = pd.to_datetime(data['Date'])\nDow_df['Date'] = pd.to_datetime(Dow_df['Date'])\nNikkei_df['Date'] = pd.to_datetime(Nikkei_df['Date'])\ndata.info()\n\n\n'''\npng\n'Date'のカラムが'object'型から'datetime64'型へ代わっていることが確認できます。\n次に曜日情報のカラムを追加します。'datetime64'型は'dt.weekday'メソッドを用いて、曜日情報を取得する事ができます。月曜日を0として連番の数字を設定されます。実行結果をdfに'weekday'カラムを追加して入力し、実行結果を確認します。\n'''\n\n\n# 曜日情報を追加(月曜:0, 火曜:1, 水曜:2, 木曜:3, 金曜:4、土曜:5、日曜:6)\ndata['weekday'] = data['Date'].dt.weekday\n#data['Dowweekday'] = Dow_df['Date'].dt.weekday\n#data['DowDate'] = Dow_df['Date']\n#data['Nikkeiweekday'] = Nikkei_df['Date'].dt.weekday\nprint(data)\n\n\n'''\npng\n'weekday'のカラムが追加され0から4の数字が入力されている事がわかります。\nまた、株取引の行われない土曜日: 5と日曜日: 6のデータは存在していない事もわかります。\n\n次に、1965年1月5日の週を基準に何周目となるのかの情報を追加します。\n1965年1月5日が火曜日である事がわかるので、その週の頭の月曜日となる1965年1月4日を基準として、何日目となるのかの情報を追加します。\ndatetimeのライブラリからdatetimeとtimedeltaをインポートします。\n基準となる日の1965年1月4日をdatetime関数を使って、変数startに代入します。\ndfの'Date'カラムから基準のstartと引き算をすることで、何日目となるのかを計算します。これをtimedelta関数を用いて1週間となる7日周期で割ることで何週目かを計算する事ができます。\ntimedelta(weeks=1)と設定することで1週間となります。\nこの計算結果を'weeks'というカラムをdfに追加します。実行することで初めの週は0から始まり最後の2021年10月18日の週は2963となっている事が分かります。\n'''\n\n# 初めの月曜日となる1965/1/4を基準に日数を追加\nstart = start_train+relativedelta(days=-2) # datetime(1965, 1, 4)\nstart = pd.to_datetime(start)\n#data['weeks'] = (data['Date'] - start) // timedelta(weeks=1)\n#data['Dowweeks'] = (Dow_df['Date'] - start) // timedelta(weeks=1)\n#data['Nikkiweeks'] = (Nikkei_df['Date'] - start) // timedelta(weeks=1)\n#print(data)\n#data.to_csv('data/stocks_price_data/KinoCode_data.csv') # csv書き出し\n'''\npng\n日付の情報の'Date', 'weekday', 'weeks'のカラムが分かれて表示されているので、見栄えを整理する目的で、一旦カラムの並び替えを行います。\n先頭に日付の情報をまとめます。\n並び替えたい順序でカラムを記述しdfを置き換えます。\n実行する事で、並び替えられている事がわかります。\n'''\n\n\n\n# Closeの列のデータのみを取り出し\ndata['NikkiClose'] = Nikkei_df['Close'].values\n\n\n\n\n\n# カラムの並べ替え\ndf = data[['Date', 'weekday','High', 'Low', 'Open', 'Close', 'NikkiClose']]\n#df_dow = Dow_df[['Date', 'weeks', 'weekday', 'High', 'Low', 'Open', 'Close']]\n#df_nikkei = Nikkei_df[['Date', 'weeks', 'weekday', 'High', 'Low', 'Open', 'Close']]\nprint(df)\ndf.to_csv('data/stocks_price_data/KinoCode_data.csv') # csv書き出し\n'''\npng\n今回のような時系列データを処理する際には、set_indexメソッドを使ってindexを日付に設定します。念のためにsort_valuesメソッドを使って日付順に並び替えを行います。実行する事で、日付の'Date'がindexに設定されている事がわかります。\n'''\n\n\n# データの並び替え\ndf.sort_values(by='Date', ascending=True, inplace=True)\n\n# 日付をインデックスにセット\ndf.set_index(keys='Date', inplace=True)\nprint(df)\n\n'''\npng\n次に今回予測したい翌日の終値が本日の終値よりも上がるのかどうかの情報を追加します。shiftメソッドを用いてカラムの情報をずらすdfを作成する事ができるので、それを用いて計算を行います。\nshift(-1)とする事で、カラムの情報を1行上にずらしたデータフレームを作成する事ができます。\ndfを1行分上にずらしたものをdf_shiftとして作成します。実行する事でカラムの情報が1行分上にシフトしている事がわかります。一番下のカラムは欠損値となります。\n'''\n\n\n#カラム情報を1行上にずらしたデータフレームを作成する\ndf_shift = df.shift(-1)\ndf_shift\n\n#png\n#このdf_shiftを用いて、翌日の終値と本日の終値を引き算し、その結果を'delta_Close'というカラムを追加しdfに入力します。\n\n#翌日の始値と本日の終値の差分を追加する\ndf['delta_Close'] = df_shift['Close'] - df['Close']\ndf\n\n'''\npng\nこの'delta_Close'が上がる場合1、それ以外を0として目的変数となる'Up'のカラムを追加します。同時に'delta_Close'カラムの削除も行います。\n'''\n\n#目的変数Upを追加する(翌日の終値が上がる場合1、それ以外は0とする)、'delta_Close'カラムの削除\ndf['Up'] = 0\ndf['Up'][df['delta_Close'] > 0] = 1\ndf = df.drop('delta_Close', axis=1)\ndf\n\n'''\npng\nここまでで、下準備となる週番号、曜日、目的変数の追加が終わりました。\n\nデータの全体像をつかむ\n時系列データをグラフで表示する事で、株価変動の大まかなイメージを掴みます。\n'Open', 'High', 'Low', 'Close'を抜き出しdf_newを作成後に、pyplotを用いてグラフ化行います。\nmatplotlibのライブラリからpyplotをpltという名前でインポートします。\ndf_newにplotメソッドを用いて、引数'kind=line'とする事で折れ線グラフが作成されます。pyplotのshowメソッドでグラフを表示します。\n\n初めの1965年から1990年頃までは、上昇傾向となっています。その後は下がる傾向となり、2010頃より再度上昇傾向である事がわかります。\n'''\n# 'Open', 'High', 'Low', 'Close'グラフ化のためにカラム抽出\ndf_new = df[['Open', 'High', 'Low', 'Close']]\n\n# matplotlibのインポート\n\n# 時系列折れ線グラフの作成\ndf_new.plot(kind='line')\nplt.show()\n\n'''\npng\n特徴量を追加する\n予測を正しく行えるようにする為の情報量(特徴量)を追加します。現在dfに入っている始値、終値、最高値、最安値の情報だけを用いて予測する事も可能ですが、株価の変動に影響すると言われている一般的な情報を追加していきます。\n終値の前日比率と、始値と終値の差分カラムに追加します。\n\nまず終値の前日比率ですが、本日の終値が前日から何%変動したのかを表す値となります。\n(今日の終値 - 前日の終値) ÷ 前日の終値\nで計算します。\nshiftメソッドを用いて、今度は1行したにずらしたデータフレームを作成し、終値の前日比率'Close_ratio'を計算しdfにカラムを追加します。\n'''\n\n\n# 終値の前日比の追加\ndf_shift = df.shift(1)\n\ndf['Close_ratio'] = (df['Close'] - df_shift['Close']) / df_shift['Close']\ndf\n\n#png\n#次に、始値と終値の差分'Body'をdfに追加します。\n\n# 始値と終値の差分を追加\ndf['Body'] = df['Open'] - df['Close']\ndf\n\n'''\npng\n特徴量の追加は以上になります。次に、不要なデータの削除を行います。今回、月曜日から木曜日までの情報を用いて、金曜日の始値が上がるか下がるのかを予測するモデルを作成するために、各週で月曜日から金曜日までのデータが揃っている週だけ使用します。祝日や年末年始など株取引が行われていない日はデータがない為、5日分のデータが揃っていない週が存在しています。\n各週毎に何日分のデータが存在しているのかを調べて、5日分揃っている週のデータを持ってきます。\n手順としては、週番号'weeks'のリストを作成します。その後リストから取り出した同じ週番号のデータ数をカウントして行き結果をdfに格納し、5日揃っている週だけ残す処理をします。\n週番号は0から2963まで連番で有ると考えられ、0から順番に処理すれば良いと考えられますが、万が一抜けている週が存在して居ても処理が行えるように、あえて週番号を抜き出したリスト(list_weeks)を作成します。\n'''\n\n'''\n# 週番号をリストに格納\nlist_weeks = []\nlist_weeks = df['weeks'].unique()\nlist_weeks\n\n\n#png\n#リストに従い、for文を用いて、週毎の日数をカウントしたカラム'week_days'にカウント数を入力します。\n\n# 各週ごとの日数を入力\ndf['week_days'] = 0\n\nfor i in list_weeks:\n df['week_days'][df['weeks'] == i] = len(df[df['weeks'] == i])\n\ndf\n\n#png\n#5日データの存在する週(week_daysが5)の週のデータを抜き出して、dfに入力します。\n\n# 月曜〜金曜まで5日分データのある週だけデータを取り出す\ndf = df[df['week_days'] == 5]\ndf\n\n#png\n#予測に使用しない金曜日のデータ(weekdayが4)を削除します。\n\n#金曜日のデータを削除する(weekday:4となるデータ)\ndf = df[df['weekday'] != 4]\ndf\n'''\n#png\n#不要カラムの削除と並び替えを行います。\n\n# 不要カラムの削除と並べ替え\ndf = df[['weekday', 'High', 'Low', 'Open', 'Close', 'Close_ratio', 'Body', 'Up']]\ndf\n\n'''\npng\nここまでで、データの準備は完了です。\n\n学習データと検証データに分割する\nさて、ここからは直近の2018年以降のデータを使用します。\n2018年から2020年を学習データ、2021年以降を検証データとして分割します。\ndatetime64型をindexに設定している時系列のデータフレームは、期間を設定してデータを抜き出す事ができます。\n2018年1月1日から2020年12月31日までのデータを抜き出し、df_trainに入力します。\n'''\n\n\n# 学習データを2018-01-01〜2020-12-31の期間としdf_trainに入力する\ndf_train = df['2018-01-01': '2020-12-31']\ndf_train\n\n#png\n#同様に、2021年1月1日以降のデータを抜き出し、df_valに入力します。\n\n# 検証データを2021-01-01以降としてとしてdf_valに入力する\ndf_val = df['2021-01-01':]\ndf_val\n\n'''\npng\n学習データと検証データをそれぞれ、説明変数と目的変数に分けます。\n説明変数のカラムは'weekday', 'High', 'Low', 'Open', 'Close', 'Close_ratio', 'Body'を\n目的変数のカラムは'Up'になります。\n学習データの説明変数をX_train、学習データの目的変数をy_trainとしてカラムを指定して、それぞれを入力します。また、表示することでX_train, y_trainそれぞれに指定した期間内のデータが入力されていることが分かります。\n'''\n\n\n# 学習データを説明変数(X_train)と目的変数(y_train)に分ける\nX_train = df_train[['weekday', 'High', 'Low',\n 'Open', 'Close', 'Close_ratio', 'Body']]\ny_train = df_train['Up']\n\n# 学習データの説明変数と目的変数を確認\nprint(X_train)\nprint(y_train)\n\n\n#png\n#png\n#同様に検証データの説明変数をX_val、目的変数をy_valとしてデータを入力し、確認します。\n\n# 検証データを説明変数(X_val)と目的変数(y_val)に分ける\nX_val = df_val[['weekday', 'High', 'Low',\n 'Open', 'Close', 'Close_ratio', 'Body']]\ny_val = df_val['Up']\n\n# 検証データの説明変数と目的変数を確認\nprint(X_val)\nprint(y_val)\n\n\n#png\n#png\n#学習データと検証データの時系列グラフを作成し2021年前後でデータが分かれていることを目で確認します。2021年以前が学習データで青いグラフ、2021年以降が検証データでオレンジのグラフで示されている事が分かります。\n\n# 学習データと検証データの終値(Close)の折れ線グラフ作成\nX_train['Close'].plot(kind='line')\nX_val['Close'].plot(kind='line')\n\n# グラフの凡例を設定\nplt.legend(['X_train', 'X_val'])\n\n# グラフの表示\nplt.show()\n\n\n'''\npng\nデータを整える\n予測モデルに学習をさせるために、データを整えます。\n説明変数は各週毎の月曜日から木曜日の4日間をセットとして一つにまとめます。また、目的変数は翌日の金曜日の始値が上がるか下がるかを示す木曜日のデータを抜き出します。機械学習を行うためには説明変数と目的変数の数を揃える必要があります。\npng\n\n説明変数を抜き出す期間により、株価の金額や変動量が違っています。\n例えば、2020年4月頃は株価が16000円程度であったのに対し、12月頃には25000円を超えていたり、同じ週でも株価の変動が大きい事もあります。\nこのように抜き出している期間内において、データの大きさや変動幅が大きく異なっている場合、機械学習では予測が正しく行えない事があります。このような場合に標準化という処理を行うことが有ります。\nこの処理を行うことで、平均が0で±3以内の範囲に収める事が出来るために、機械は計算の処理がし易くなり、また予測精度が向上する事もあります。\npng\n\nこの4日毎にデータを抜き出して、標準化を行うための処理を、sklearnのpreprocessingというライブラリのStandardScalerという関数を使って、for文の繰り返し処理を用いて次のような関数を定義します。\nまた今回、機械学習に使用する予測モデルはLSTMというニューラルネットのモデルを使用します。このモデルではnumpy配列という形式のデータを用います。\n'''\n\n\n\n# 標準化関数(StandardScaler)のインポート\n# numpyのインポート\n# 4日ごとにデータを抜き出して、標準化ととnumpy配列に変換する関数(std_to_np)の定義\n\ndef std_to_np(df):\n df_list = []\n df = np.array(df)\n for i in range(0, len(df) - 3, 4):\n df_s = df[i:i+4]\n scl = StandardScaler()\n df_std = scl.fit_transform(df_s)\n df_list.append(df_std)\n return np.array(df_list)\n\n\n#標準化を行うStandardScalaerをsklearn.preprocessingから、numpyをnpとしてインポートします。\n# 次に4日毎にデータを抜き出し、標準化を行い、numpy配列で出力する関数(std_to_np)を定義します。\n#df_list = [] でまず空のリストを定義します。ここには標準化をおこなった後の、4日毎にまとまったデータを格納して行きます。\n#df = np.array(df) で入力されたデータフレームをまずnumpy配列に変換します。\n#この配列に対して、for文を用いて4日ずつのデータ抜き出して、df_sに入力(df_s=df[i:i+4])した後に、StandardScalerをインスタンス化し(scl= StandardScaler()) 標準化をおこなった結果をdf_stdに入力(df_std=scl.fit_transform(df_s))し、それをはじめに定義したdf_listにappendメソッドを用いて格納(df_list.append(df_std))して行きます。最後の4日分のデータまでこの繰り返し処理を行います。\n#繰り返し処理が終了すると、df_listをnumpy配列で出力(return np.array(df_list))します。\n\n#この関数をX_trainとX_valに適用してデータの型を確認します。\n\n# 学習データと検証データの説明変数に関数(std_to_np)を実行\nX_train_np_array = std_to_np(X_train)\nX_val_np_array = std_to_np(X_val)\n\n# 学習データと検証データの形の確認\nprint(X_train_np_array.shape)\nprint(X_val_np_array.shape)\n\n'''\npng\n出力結果から、480日分あったX_trainが4分の1の120個のデータとなり、132日分あったX_valが4分の1の33個のデータになっている事がわかります。\nそれぞれの数に続く'4'は月曜から木曜の4日分のデータ数を表しており、'7'は説明変数('weekday', 'High', 'Low', 'Open', 'Close', 'Close_ratio', 'Body')のカラム数を表しています。\n続いて、目的変数の木曜日のデータだけ抜き出します。抜き出す前に一旦、学習データと検証データのデータを確認します。\n'''\n\n# 学習データと検証データの目的変数を確認\nprint(y_train)\nprint(y_val)\n\n#png\n#学習データは480個、検証データは132個有ることがわかります。\n#これらのデータに対して、各週の4日目(木曜日)のデータを抜き出して確認します。\n\n# 学習データ、検証データの目的変数の間引き\n# 週の4日目(木曜日)のデータだけ抜き出す\ny_train_new = y_train[3::4]\ny_val_new = y_val[3::4]\n\n# 間引き後の学習データと検証データの目的変数を確認\nprint(y_train_new)\nprint(y_val_new)\n\n#学習データと検証データそれぞれ各週の4日目のデータのみになっており、個数は120個と33個となっており、4日毎にまとめた説明変数のデータ数と同じになっています。\n#png\n#png\n\n#これで、機械学習を行うためのデータは整いました。\n\n'''\n予測モデルの作成\nニューラルネットの1種のLSTMを用いて予測モデルの作成と、検証データを用いた予測精度の検証をします。\nLSTMを使用するためにkerasのライブラリを使えるようにする必要があります。まずこのためにtensorflowをインストールします。個人の環境で、インストール済みの方は不要ですが、google colabolatoryを使用の方は毎回行う必要があります。インストールは次のコマンドで数秒で完了します。\n'''\n\n#!pip install tensorflow\n#続いて、kerasから必要な関数をインポートします。\n\n# keras.modelsからSequentialのインポート\n# keras.layersからDense、LSTMのインポート\n# Dropoutのインポート\n#ニューラルネットの構築や、パラメータのチューニング方法の説明は省略させて頂きますが、\n# 基本的な入力層、中間層と出力層からなるモデルをこのように構築することができます。\n# また、このモデルをlstm_compという関数で定義しましょう。\n\n# LSTM構築とコンパイル関数\n\n\ndef lstm_comp(df):\n # 入力層/中間層/出力層のネットワークを構築\n model = Sequential()\n model.add(LSTM(256, activation='relu', batch_input_shape=(\n None, df.shape[1], df.shape[2])))\n model.add(Dropout(0.2))\n model.add(Dense(256, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(1, activation='sigmoid'))\n\n # ネットワークのコンパイル\n model.compile(loss='binary_crossentropy',\n optimizer='adam', metrics=['accuracy'])\n\n return model\n\n'''\n次に、作成したモデルが本当に予測に使用できるのかを確認する方法として、交差検証をしましょう。正解の分かっている学習データを複数に分割して、交差検証を行うのが有効です。\n交差検証の手法には複数存在しますが、今回の様な時系列のデータで過去のデータを用いて未来を予測する場合は、時系列分割の交差検証を用いるのが一般的です。\n今回は学習データを5分割し、学習データと検証データが図の様なイメージの組み合わせで合計4回の学習、予測と精度検証を繰り返します。これらのスコアの平均値から、モデルが予測に使用できるかの判断を行います。\nこの手法では検証データよりも過去のデータのみを用いて学習を行ないます。\npng\n\nまず、時系列分割交差検証を行うためのTimeSeriesSplitと、予測結果の精度(accuracy)を算出するためにaccuracy_scoreをインポートします。\n\n# 時系列分割のためTimeSeriesSplitのインポート\n# accuracy算出のためaccuracy_scoreのインポート\nつぎに、4回分の交差検証の結果を代入する空のリストを作成します。そして、TimeSeriesSplitのインスタンス化を行い変数(tscv)に代入します。\n'''\nvalid_scores = []\ntscv = TimeSeriesSplit(n_splits=4)\n\n'''\nfor文を用いて、交差検証を4回繰り返します。\n具体的にはこのような検証を実施します。\n\nsplitメソッドを用いて学習データを分割し、交差検証用の学習データと検証データを作成\n先に定義したlstm_comp関数よりLSTMモデルを作成\n交差検証用の学習データより学習\n検証データの説明変数を用いて予測\n予測結果の2値化\n検証データの目的変数(正解データ)を用いて、予測結果の精度算出と表示\n予測精度のスコアをリストに格納\n'''\n\nfor fold, (train_indices, valid_indices) in enumerate(tscv.split(X_train_np_array)):\n X_train, X_valid = X_train_np_array[train_indices], X_train_np_array[valid_indices]\n y_train, y_valid = y_train_new[train_indices], y_train_new[valid_indices]\n\n # LSTM構築とコンパイル関数にX_trainを渡し、変数modelに代入\n model = lstm_comp(X_train)\n\n '''# モデル学習'''\n hist = model.fit(X_train, y_train, epochs=10, batch_size=64)\n\n # loss(訓練データに対する判定結果)、val_loss(テストデータに対する判定結果)をプロットする\n #loss = hist.history['loss']\n #val_loss = hist.history['val_loss']\n #epochs = len(loss)\n ''''''\n\n\n # 予測\n y_valid_pred = model.predict(X_valid)\n\n # 予測結果の2値化\n y_valid_pred = np.where(y_valid_pred < 0.5, 0, 1)\n\n # 予測精度の算出と表示\n score = accuracy_score(y_valid, y_valid_pred)\n print(f'fold {fold} MAE: {score}')\n\n # 予測精度スコアをリストに格納\n valid_scores.append(score)\n\n\n#4回の交差検証が終了したら、予測精度のスコアが格納されたリストの表示し、スコアの平均値の算出と表示もしてみましょう。\n#4回のそれぞれのスコアと、平均値はこのようになりました。\n\nprint(f'valid_scores: {valid_scores}')\ncv_score = np.mean(valid_scores)\nprint(f'CV score: {cv_score}')\n\n'''\npng\n1回目:0.541\n2回目:0.708\n3回目:0.541\n4回目:0.333\n平均:0.531\n\n今回のような上がるか下がるかの2値予測の場合、一般的にはスコアが0.5以上であればある程度使用できるという目安となります。\n算出したスコアと平均値から、このモデルがある程度使用できるものと判断して次に進みましょう。\n\nでは、このモデルに対して、2018年から2020年の学習データを用いて学習をします。\n流れは先ほどの交差検証と似ています。\nまずは標準化した学習データでLSTMモデルを作成します。\n'''\n\n# LSTM構築とコンパイル関数にX_train_np_arrayを渡し、変数modelに代入\nmodel = lstm_comp(X_train_np_array)\n\n#作成したモデルで、学習します。\n#一瞬で学習が終了しました。\n\n# モデルの学習の実行\nresult = model.fit(X_train_np_array, y_train_new, epochs=10, batch_size=64)\n\n#今度は学習したモデルを用いて、検証データについて予測を行い、先頭の10個を表示させてみましょう。\n\n# 作成したモデルより検証データを用いて予測を行う\npred = model.predict(X_val_np_array)\npred[:10]\n\n'''\nこのように予測した結果が表示されます。\npng\n\nこの数値を、上がるか下がるかの0と1に変換します。numpyのwhereメソッドを用いて0.5を超えるものを1、それ以外を0と修正します。そして再度先頭の10個を表示します。\nこれで、上がるか下がるかの01どちらかの予測ができました。\n'''\n# 予測結果を0もしくは1に修正(0.5を境にして、1に近いほど株価が上昇、0に近いほど株価が上昇しない)\npred = np.where(pred < 0.5, 0, 1)\n\n# 修正した予測結果の先頭10件を確認\npred[:10]\n\n'''\npng\n次に、予測モデルの精度確認を行います。この予測結果を実際の値となる検証データの目的変数と比較し、正解率を計算します。sklearnのaccuracy_scoreという関数を使うことで計算が行えます。\nこの結果を表示すると57%の正解率で有ることがわかります。今回の様な株価が上がるか下がるかの2値の予測では、直感的に予測を行う場合50%の正解率となります。機械学習を用いる事でそれを超える正解率となりました。\n'''\n\n# 実際の結果から予測値の正解率を計算する\nprint('accuracy = ', accuracy_score(y_true=y_val_new, y_pred=pred))\n\n'''\n# モデルの精度を評価する\n# 決定係数とRMSEを計算する\n# 決定係数は1.0に、RMSEは0.0に近いほど、モデルの精度は高い\nr2_score = r2_score(y_test, predictions)\nrmse = np.sqrt(mean_squared_error(y_test, predictions))\n\nprint(f'r2_score: {r2_score:.4f}')\nprint(f'rmse: {rmse:.4f}')\n'''\n\n\n'''\npng\n最後に、予測結果と正解結果を混同行列を用いて確認します。\n混同行列とは、このように2行2列の表で、真陽性、真陰性、偽陽性、偽陰性の数を表したものです。今回は、予測が0で結果も0、予測が1で結果も1であれば正解です。0と予測して結果が1、1と予測して結果が0なら不正解ということになります。全体の精度だけではなく、0と1それぞれの正解に対する精度を確認することができます。\n\njpg\n\n混同行列を生成するために、sklern.mericsからconfusion_matrixとConfusionMatrixDisplayをインポートします。\nまた、視覚的にわかりやすい様に、ヒートマップで表示しましょう。\nこのように、正しく予測が行えているのは、右下の真陽性(TP)と左上の真陰性(TN)です。予測結果が、0か1のどちらかに極端に偏っている傾向ではなさそうですが、正しく予測できていないものも存在していることがわかります。予測精度を改善することで、偽陽性(FP)と偽陰性(FN)の数を減らすことができます。\n'''\n\n# 混同行列生成のためconfusion_matrixをインポート\n\n# 混同行列を表示\ncm = confusion_matrix(y_val_new, pred)\ncmp = ConfusionMatrixDisplay(cm)\ncmp.plot(cmap=plt.cm.Reds)\n\n# グラフの表示\nplt.show()\n'''\n今回は基本的な特徴量や、機械学習モデルの構築方法で予測を行いました。特徴量を追加することや、学習モデルの改良を行うことで、予測精度を向上させることが可能です。\nとはいえ、データの期間が変わるだけでも精度も変わります。必ずいつも予測がうまくいくわけではありませんのでご注意ください。\n\n'''\n\n'''\nGraphics parameter\n'''\n\n# Closeの列のデータのみを取り出し\nTergetData = data['Close'].values\n# datetimeの列のデータのみを取り出し\ndata = data.reset_index(drop=False)\nTergetDate = data['Date'].values\n\n#リシェイプ\nTergetData = TergetData.reshape(-1, 1) # float64\nTergetDate = TergetDate.reshape(-1, 1) # datetime64[ns]\n\n\n# 読み込んだ日経平均をプロット\nk = 700 # 表示する数\ni = TergetData.shape[0]-k\nj = TergetData.shape[0]\nxdata = TergetDate[i:j]\nydata = TergetData[i:j]\n\n#描画するデータの読み込み\nfig = plt.figure(figsize=(15, 10), dpi=100)\nax = fig.add_subplot(2, 1, 1)\n# 図全体のタイトル\nfig.suptitle(\n \"Long Short-Term Memory (Deep Larning) of Artificial Intelligence[AI]\", fontsize=20)\nplt.title(\"Test Graph\", {\"fontsize\": 20})\n\n\nax1 = plt.subplot(2, 2, 1) # 2x2の1番目\nax1.plot(xdata, ydata) # 1番目に描画\nax1.legend(loc='best')\nax1.grid()\nax1.set_xlabel('Date') # 1番目にxラベルを追加\nax1.set_ylabel(f'{code}') # 1番目にyラベルを追加\n\nax2 = plt.subplot(2, 2, 2) # 2x2の1番目\nax2.plot(range(epochs), loss, marker='.',\n label='loss(training data)') # 1番目に描画\nax2.plot(range(epochs), val_loss, marker='.',\n label='val_loss(evaluation data)') # 1番目に追加描画\nax2.legend(loc='best')\nax2.grid()\nax2.set_xlabel('epoch') # 1番目にxラベルを追加\nax2.set_ylabel('loss') # 1番目にyラベルを追加\n\nax3 = plt.subplot(2, 2, 3) # 2x2の3番目\nax3.plot(datelabel, predicted_N, marker='.', label='predicted') # 1番目に描画\nax3.plot(datelabel, y_test_price_N, marker='.',\n label='y_test_price') # 1番目に追加描画\nax3.legend(loc='best')\nax3.grid()\nax3.set_xlabel('Date')\nax3.set_ylabel(f'{code}')\n\nax4 = plt.subplot(2, 2, 4) # 2x2の4番目\nax4.plot(range(len(predicted_futureN)), predicted_futureN,\n marker='.', label='future predicted') # 1番目に描画\nax4.plot(range(len(predicted_futureN[:10])), predicted_futureN[:10],\n marker='.', label='real data', color=\"0.5\") # 1番目に追加描画\nax4.legend(loc='best')\nax4.grid()\nax4.set_xlabel('Date') # 1番目にxラベルを追加\nax4.set_ylabel(f'{code}') # 1番目にyラベルを追加\n\n\n# グラフを表示する\nplt.show()\n", "#artificial intelligence\n#artificial_i\n\n\n#Google colaboratoryでLinuxコマンドを実行する場合、コマンドの先頭に「!」を付ける (!pipや!wgetなど)\n#事前処理\n#!pip list\n#!pip install -q xlrd\n#!pip install pandas_datareader\n#!pip install --upgrade yfinance\n\n###############################################\n# Jupyter_notebook's Shortcut\n# Ctrl + \\ :すべてのランタイムをリセット[←ショートカットを任意に割り振り]\n# Ctrl + Enter :セルを実行\n###############################################\n\n#from google.colab import files\n#from google.colab import drive\n'''\nimport google.colab\nimport googleapiclient.discovery00\nimport googleapiclient.http\n'''\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom sklearn.tree import DecisionTreeClassifier # 決定木(分類)\n\n#drive.mount('/content/gdrive')\n\n#import pandas\nfrom pandas_datareader import data as pdr\n#import yfinance as yfin\n#yfin.pdr_override()\n\nstart = datetime.date(2021, 1, 1)\nend = datetime.date.today()\ncode = '6758' # SONY\nstock = []\n\n#https://finance.yahoo.com/quote/6758.T/history?period1=1609473600&period2=1646798399&interval=1d&frequency=1d&filter=history\n#df = pdr.get_data_yahoo('1514.T',start ='2021-06-07', end='2021-07-07')\n#df = web.DataReader(f'{code}.T', 'yahoo', start, end)\nadjclosed = pdr.get_data_yahoo(f'{code}.T',start, end)[\"Adj Close\"] \n#closed = pdr.get_data_yahoo(f'{code}.T', start, end)[\"Close\"] # 株価データの取得\nall_data = pdr.get_data_yahoo(f'{code}.T', start, end) # 株価データの取得\nadjclosed.to_csv('data/stocks_price_data/kabu_pre10_data.csv') # csv書き出し\nprint(all_data)\n\n\n\n'''学習'''\n'''教師データの数値の配列(train_X) と結果の配列 (train_y) を学習させ、テストデータの数値の配列 (test_X) を与えると予測結果 (test_y) が帰ってくるというそれだけです。'''\n'''###教師データをつくる'''\n# 過去の株価と上がり下がり(結果)を学習する\n# まずは一番面倒な株価の調整後終値(Adj Clouse)から教師データを作るまでのコードを用意します。\n# これは終値のリストを渡すと train_X と train_y が返るようにすれば良いでしょう。\n\n\ndef train_data(adjclosed): # arr = test_X\n train_X = [] # 教師データ\n train_y = [] # 上げ下げの結果の配列\n\n # 30 日間のデータを学習、 1 日ずつ後ろ(today方向)にずらしていく\n for i in np.arange(-30, -15):\n s = i + 14 # 14 日間の変化を素性にする\n feature = adjclosed.iloc[i:s] # i~s行目を取り出す\n if feature[-1] < adjclosed[s]: # その翌日、株価は上がったか?\n train_y.append(1) # YES なら 1 を\n else:\n train_y.append(0) # NO なら 0 を\n train_X.append(feature.values)\n\n # 教師データ(train_X)と上げ下げの結果(train_y)のセットを返す\n return np.array(train_X), np.array(train_y)\n\n\n#%%\n# \n# \n# main()\n#これで train_X (教師データの配列=学習データ) と train_y (それに対する 1 か 0 かのラベル=結果) が返ってきます。\nlearning = train_data(adjclosed) # adjclosed = test_X\n\n\n\n'''###決定木のインスタンスを生成'''\nclf = DecisionTreeClassifier(max_depth=2, random_state=0)\n\n'''###学習させる'''\n# train_X(教師データの配列=学習データ) と train_y(それに対する 1 か 0 かのラベル=結果) \nclf.fit(learning[0], learning[1])\n#clf.fit(train_X, train_y)\n#これであとは clf.predict() 関数にテストデータを渡すことで予測結果が返ってくるようになります。\n\n'''実際に予想する'''\n\n# 過去 30 日間のデータでテストデータを作成する\n#for i in np.arange(-30, -15):\ni=-15\ns = i + 14\n\ntest_X = adjclosed.iloc[i:s].values # '''テストデータの数値の配列 (test_X)'''i~s\nX = np.array(test_X).reshape(-1, 14)\n\nprint(\"test_X= \",test_X)\n\n#print(\"test_X:テストデータの数値=\", X)\n\n#clf.predict() 関数にテストデータXを渡すことで予測結果が返ってくる\nresults = clf.predict(X) # 予測結果 (test_y)\nprint(\"test_y:予測結果=\", clf.predict(X))\n\nif clf.predict(X) < 1: # その翌日、株価は上がったか?\n res = \"Decline=下落\"\nelse:\n res = \"Soaring=高騰\"\n\nprint(\"予測結果:\",res)\n\n'''\nライブラリ\t用途\npandas\t データの格納と前処理(クレンジング、統合、変換など)\nsklearn\t 様々な機械学習アルゴリズムを使用した学習と予測\nmatplotlib\tグラフ描画によるデータ可視化\njupyter\t ブラウザ上で対話形式のプログラミング\n\n\nモデルの学習と評価\nモデルの学習をしていきますが、今回は以下の分類アルゴリズムを検証してみたいと思います。\n\nアルゴリズム\t概要\nロジスティック回帰\t 0〜1の確率で返される2択の予測結果を分類に使用する手法\nサポートベクターマシン\tクラスを最大に分ける境界線を引いて、分類する手法\nK近傍法\t 予測対象データの近傍にあるデータ群の多数決で分類する手法\nランダムフォレスト\t 決定木(Yes/Noの分岐条件)を複数作って多数決で分類する手法\n\n以下の4種類のデータに分割しています。\n変数名\tデータ種別\t用途\nX_train\t説明変数\t学習データ\nX_test\t説明変数\t評価データ\ny_train\t目的変数\t学習データ\ny_test\t目的変数\t評価データ\n\n\n\n'''" ]
[ [ "sklearn.metrics.roc_auc_score", "torch.sigmoid", "pandas.read_csv", "torch.load", "torch.nn.LSTM", "torch.manual_seed", "numpy.arange", "torch.nn.BCELoss", "torch.tensor", "numpy.round", "torch.nn.Linear", "numpy.zeros_like", "torch.no_grad", "torch.cuda.is_available", "sklearn.metrics.f1_score", "sklearn.metrics.accuracy_score" ], [ "matplotlib.pyplot.legend", "pandas.to_datetime", "matplotlib.pyplot.title", "sklearn.metrics.accuracy_score", "sklearn.metrics.confusion_matrix", "sklearn.model_selection.TimeSeriesSplit", "matplotlib.pyplot.subplot", "numpy.mean", "sklearn.preprocessing.StandardScaler", "numpy.array", "matplotlib.pyplot.show", "numpy.where", "sklearn.metrics.ConfusionMatrixDisplay", "matplotlib.pyplot.figure" ], [ "numpy.arange", "sklearn.tree.DecisionTreeClassifier", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Hsy-Intel/fedlearner
[ "d5d0bb5549e115eaf0dec5a00a78dcb21ac0909d", "d5d0bb5549e115eaf0dec5a00a78dcb21ac0909d", "d5d0bb5549e115eaf0dec5a00a78dcb21ac0909d" ]
[ "example/sparse_model/follower.py", "fedlearner/trainer/sparse_estimator.py", "test/data_join/test_data_block_visitor.py" ]
[ "# Copyright 2020 The FedLearner Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n# pylint: disable=no-else-return, inconsistent-return-statements\n\nimport tensorflow.compat.v1 as tf\nimport fedlearner.trainer as flt\n\nROLE = 'follower'\nparser = flt.trainer_worker.create_argument_parser()\nparser.add_argument('--batch-size', type=int, default=8,\n help='Training batch size.')\nparser.add_argument('--fid_version', type=int, default=1,\n help=\"the version of fid\")\nargs = parser.parse_args()\n\ndef input_fn(bridge, trainer_master=None):\n dataset = flt.data.DataBlockLoader(args.batch_size, ROLE,\n bridge, trainer_master).make_dataset()\n def parse_fn(example):\n feature_map = dict()\n feature_map = {\"fids\": tf.VarLenFeature(tf.int64)}\n feature_map[\"example_id\"] = tf.FixedLenFeature([], tf.string)\n features = tf.parse_example(example, features=feature_map)\n return features, dict(y=tf.constant(0))\n dataset = dataset.map(map_func=parse_fn,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return dataset\n\ndef serving_input_receiver_fn():\n features = {}\n features['fids_indices'] = tf.placeholder(dtype=tf.int64, shape=[None],\n name='fids_indices')\n features['fids_values'] = tf.placeholder(dtype=tf.int64, shape=[None],\n name='fids_values')\n features['fids_dense_shape'] = tf.placeholder(dtype=tf.int64, shape=[None],\n name='fids_dense_shape')\n return tf.estimator.export.build_raw_serving_input_receiver_fn(features)()\n\ndef model_fn(model, features, labels, mode):\n global_step = tf.train.get_or_create_global_step()\n\n flt.feature.FeatureSlot.set_default_bias_initializer(\n tf.zeros_initializer())\n flt.feature.FeatureSlot.set_default_vec_initializer(\n tf.random_uniform_initializer(-0.0078125, 0.0078125))\n flt.feature.FeatureSlot.set_default_bias_optimizer(\n tf.train.FtrlOptimizer(learning_rate=0.01))\n flt.feature.FeatureSlot.set_default_vec_optimizer(\n tf.train.AdagradOptimizer(learning_rate=0.01))\n\n if args.fid_version == 1:\n slots = [512, 1023]\n else:\n model.set_use_fid_v2(True)\n slots = [512, 1023, 32767]\n hash_size = 101\n embed_size = 16\n for slot_id in slots:\n fs = model.add_feature_slot(slot_id, hash_size)\n fc = model.add_feature_column(fs)\n fc.add_vector(embed_size)\n\n model.freeze_slots(features)\n\n embed_output = model.get_vec()\n\n output_size = len(slots) * embed_size\n fc1_size = 64\n w1f = tf.get_variable(\n 'w1f', shape=[output_size, fc1_size], dtype=tf.float32,\n initializer=tf.random_uniform_initializer(-0.01, 0.01))\n b1f = tf.get_variable(\n 'b1f', shape=[fc1_size], dtype=tf.float32,\n initializer=tf.zeros_initializer())\n\n act1_f = tf.nn.relu(tf.nn.bias_add(tf.matmul(embed_output, w1f), b1f))\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n gact1_f = model.send('act1_f', act1_f, require_grad=True)\n optimizer = tf.train.GradientDescentOptimizer(0.1)\n train_op = model.minimize(\n optimizer, act1_f, grad_loss=gact1_f, global_step=global_step)\n return model.make_spec(mode, loss=tf.math.reduce_mean(act1_f),\n train_op=train_op,)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n return model.make_spec(mode, predictions={'act1_f': act1_f})\n\n\nif __name__ == '__main__':\n flt.trainer_worker.train(\n ROLE, args, input_fn,\n model_fn, serving_input_receiver_fn)\n", "# Copyright 2020 The FedLearner Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# coding: utf-8\n# pylint: disable=protected-access\n\nimport tensorflow.compat.v1 as tf\n\nfrom tensorflow.contrib import graph_editor as ge\n\nfrom fedlearner.trainer import embedding\nfrom fedlearner.trainer import estimator\nfrom fedlearner.trainer import feature\nfrom fedlearner.trainer import operator\nfrom fedlearner.trainer import utils\n\n\nclass ConfigRunError(Exception):\n pass\n\n\nclass SparseFLModel(estimator.FLModel):\n def __init__(self, role, bridge, example_ids, exporting=False,\n config_run=True,\n bias_tensor=None, vec_tensor=None,\n bias_embedding=None, vec_embedding=None,\n feature_columns=None):\n super(SparseFLModel, self).__init__(role,\n bridge, example_ids, exporting)\n\n self._config_run = config_run\n self._num_shards = 1\n if config_run:\n self._bias_tensor = tf.placeholder(tf.float32, shape=[None, None])\n self._vec_tensor = tf.placeholder(tf.float32, shape=[None, None])\n else:\n self._bias_tensor = bias_tensor\n self._vec_tensor = vec_tensor\n self._bias_embedding = bias_embedding\n self._vec_embedding = vec_embedding\n self._feature_columns = feature_columns\n\n self._frozen = False\n self._slot_ids = []\n self._feature_slots = {}\n self._feature_column_v1s = {}\n self._use_fid_v2 = False\n self._num_embedding_groups = 3\n\n def add_feature_slot(self, *args, **kwargs):\n assert not self._frozen, \"Cannot modify model after finalization\"\n fs = feature.FeatureSlot(*args, **kwargs)\n if self._use_fid_v2:\n assert 0 <= fs.slot_id < utils.MAX_SLOTS_v2, \\\n \"Invalid slot id %d\"%fs.slot_id\n else:\n assert 0 <= fs.slot_id < utils.MAX_SLOTS, \\\n \"Invalid slot id %d\"%fs.slot_id\n self._slot_ids.append(fs.slot_id)\n self._feature_slots[fs.slot_id] = fs\n return fs\n\n def add_feature_column(self, *args, **kwargs):\n assert not self._frozen, \"Cannot modify model after finalization\"\n fc = feature.FeatureColumnV1(*args, **kwargs)\n slot_id = fc.feature_slot.slot_id\n assert slot_id in self._feature_slots and \\\n self._feature_slots[slot_id] is fc.feature_slot, \\\n \"FeatureSlot with id %d must be added to Model first\"%slot_id\n assert slot_id not in self._feature_column_v1s, \\\n \"Only one FeatureColumnV1 can be created for each slot\"\n self._feature_column_v1s[slot_id] = fc\n return fc\n\n def set_use_fid_v2(self, use_fid_v2):\n self._use_fid_v2 = use_fid_v2\n\n def get_bias(self):\n return self._bias_tensor\n\n def get_vec(self):\n return self._vec_tensor\n\n def _get_bias_slot_configs(self):\n if not self._config_run:\n return self._bias_embedding.config if self._bias_embedding else None\n\n slot_list = []\n fs_map = {}\n for slot_id in self._slot_ids:\n fs = self._feature_slots[slot_id]\n key = (id(fs._bias_initializer), id(fs._bias_optimizer))\n fs_map[key] = fs\n slot_list.append((fs.slot_id, 1, fs.hash_table_size, key))\n if not slot_list:\n return None\n\n bias_config = utils._compute_slot_config(slot_list, 1,\n self._use_fid_v2)\n bias_config['name'] = 'bias'\n bias_config['slot_list'] = slot_list\n bias_config['initializers'] = [fs_map[i]._bias_initializer\n for i in bias_config['weight_group_keys']]\n bias_config['optimizers'] = [fs_map[i]._bias_optimizer\n for i in bias_config['weight_group_keys']]\n bias_config['use_fid_v2'] = self._use_fid_v2\n return bias_config\n\n def _get_vec_slot_configs(self):\n if not self._config_run:\n return self._vec_embedding.config if self._vec_embedding else None\n\n slot_list = []\n fs_map = {}\n for slot_id in self._slot_ids:\n if slot_id not in self._feature_column_v1s:\n continue\n fc = self._feature_column_v1s[slot_id]\n fs = fc.feature_slot\n if fc.feature_slot.dim > 1:\n key = (id(fs._vec_initializer), id(fs._vec_optimizer))\n fs_map[key] = fs\n slot_list.append((slot_id, fs.dim - 1, fs.hash_table_size, key))\n if not slot_list:\n return None\n\n vec_config = utils._compute_slot_config(slot_list,\n self._num_embedding_groups,\n self._use_fid_v2)\n vec_config['name'] = 'vec'\n vec_config['slot_list'] = slot_list\n vec_config['initializers'] = [fs_map[i]._vec_initializer\n for i in vec_config['weight_group_keys']]\n vec_config['optimizers'] = [fs_map[i]._vec_optimizer\n for i in vec_config['weight_group_keys']]\n vec_config['use_fid_v2'] = self._use_fid_v2\n return vec_config\n\n def get_feature_columns(self):\n return self._feature_column_v1s\n\n def freeze_slots(self, features):\n assert not self._frozen, \"Already finalized\"\n if self._config_run:\n raise ConfigRunError()\n\n self._sparse_v2opt = {}\n bias_config = self._get_bias_slot_configs()\n if bias_config:\n bias_weights = self._bias_embedding.weights\n for i, opt in enumerate(bias_config['optimizers']):\n for j in range(self._num_shards):\n self._sparse_v2opt[bias_weights[i][j]] = opt\n\n vec_config = self._get_vec_slot_configs()\n if vec_config:\n vec_weights = self._vec_embedding.weights\n for i, opt in enumerate(vec_config['optimizers']):\n for j in range(self._num_shards):\n self._sparse_v2opt[vec_weights[i][j]] = opt\n\n placeholders = []\n dims = []\n for slot_id, _, _, _ in vec_config['slot_list']:\n fc = self._feature_column_v1s[slot_id]\n for sslice in fc.feature_slot.feature_slices:\n dims.append(sslice.len)\n placeholders.append(fc.get_vector(sslice))\n vec_split = tf.split(self._vec_tensor, dims, axis=1)\n ge.swap_ts(vec_split, placeholders)\n\n for slot in self._feature_slots.values():\n slot._frozen = True\n self._frozen = True\n\n\nclass SparseFLEstimator(estimator.FLEstimator):\n def __init__(self,\n cluster_server,\n trainer_master,\n bridge,\n role,\n model_fn,\n is_chief=False):\n super(SparseFLEstimator, self).__init__(\n cluster_server, trainer_master, bridge, role, model_fn, is_chief)\n\n self._bias_slot_configs = None\n self._vec_slot_configs = None\n self._slot_configs = None\n try:\n ps_indices = cluster_server.cluster_spec.task_indices('ps')\n except ValueError:\n ps_indices = None\n finally:\n self._embedding_devices = [None,] if not ps_indices else \\\n ['/job:ps/task:%d'%i for i in ps_indices]\n self._num_shards = len(self._embedding_devices)\n\n def _preprocess_fids(self, fids, configs):\n if fids.indices.shape.rank == 2:\n fids = tf.IndexedSlices(indices=fids.indices[:, 0],\n values=fids.values,\n dense_shape=fids.dense_shape)\n features = {}\n for config in configs:\n features.update(operator._multidevice_preprocess_fids(\n fids, config, num_shards=self._num_shards))\n return features\n\n def _set_model_configs(self, mode): #features, labels, mode):\n with tf.Graph().as_default() as g:\n M = SparseFLModel(self._role,\n self._bridge,\n None, #features['example_id'],\n config_run=True)\n try:\n self._model_fn(M, None, None, mode) # features, labels, mode)\n except ConfigRunError as e:\n self._bias_slot_configs = M._get_bias_slot_configs()\n self._vec_slot_configs = M._get_vec_slot_configs()\n self._feature_columns = M.get_feature_columns()\n self._slot_configs = [self._bias_slot_configs,\n self._vec_slot_configs]\n return self._slot_configs\n raise UserWarning(\"Failed to get model config. Did you forget to call \\\n freeze_slots in model_fn?\")\n\n def _get_features_and_labels_from_input_fn(self, input_fn, mode):\n slot_configs = self._set_model_configs(mode) # features, labels, mode)\n def input_fn_wrapper(*args, **kwargs):\n dataset = input_fn(self._bridge, self._trainer_master)\n def mapper(features, *args):\n features.update(self._preprocess_fids(features.pop('fids'),\n slot_configs))\n return (features,) + args if args else features\n dataset = dataset.map(\n mapper, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n dataset = dataset.prefetch(2)\n return dataset\n\n return super(SparseFLEstimator, self\n )._get_features_and_labels_from_input_fn(input_fn_wrapper, mode)\n\n def _get_model_spec(self, features, labels, mode):\n features = features.copy()\n if mode == tf.estimator.ModeKeys.PREDICT:\n fids = tf.IndexedSlices(\n indices=features.pop('fids_indices'),\n values=features.pop('fids_values'),\n dense_shape=features.pop('fids_dense_shape'))\n features.update(self._preprocess_fids(\n fids, self._slot_configs))\n\n bias_embedding = embedding.Embedding(self._bias_slot_configs,\n devices=self._embedding_devices)\n bias_tensor = bias_embedding.lookup(features)\n if self._vec_slot_configs is not None:\n vec_embedding = embedding.Embedding(self._vec_slot_configs,\n devices=self._embedding_devices)\n vec_tensor = vec_embedding.lookup(features)\n else:\n vec_embedding = None\n vec_tensor = None\n\n model = SparseFLModel(self._role, self._bridge,\n features.get('example_id', None),\n config_run=False,\n bias_tensor=bias_tensor,\n bias_embedding=bias_embedding,\n vec_tensor=vec_tensor,\n vec_embedding=vec_embedding,\n feature_columns=self._feature_columns)\n\n spec = self._model_fn(model, features, labels, mode)\n assert model._frozen, \"Please finalize model in model_fn\"\n return spec, model\n", "# Copyright 2020 The FedLearner Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n\nimport unittest\nimport os\nimport random\n\nimport tensorflow.compat.v1 as tf\ntf.enable_eager_execution()\nfrom google.protobuf import text_format\nimport tensorflow_io\nfrom tensorflow.compat.v1 import gfile\n\nfrom fedlearner.common import db_client\nfrom fedlearner.common import common_pb2 as common_pb\nfrom fedlearner.common import data_join_service_pb2 as dj_pb\nfrom fedlearner.data_join import (\n data_block_manager, common,\n data_block_visitor, raw_data_manifest_manager\n)\nfrom fedlearner.data_join.data_block_manager import DataBlockBuilder\nfrom fedlearner.data_join.raw_data_iter_impl.tf_record_iter import TfExampleItem\n\nclass TestDataBlockVisitor(unittest.TestCase):\n def setUp(self):\n data_source = common_pb.DataSource()\n data_source.data_source_meta.name = \"milestone-x\"\n data_source.data_source_meta.partition_num = 4\n data_source.data_source_meta.start_time = 0\n data_source.data_source_meta.end_time = 10000\n data_source.output_base_dir = \"./ds_output\"\n data_source.role = common_pb.FLRole.Follower\n self.data_source = data_source\n self.kvstore = db_client.DBClient('etcd', True)\n common.commit_data_source(self.kvstore, self.data_source)\n if gfile.Exists(data_source.output_base_dir):\n gfile.DeleteRecursively(data_source.output_base_dir)\n self.data_block_matas = []\n self.manifest_manager = raw_data_manifest_manager.RawDataManifestManager(\n self.kvstore, self.data_source)\n partition_num = self.data_source.data_source_meta.partition_num\n for i in range(partition_num):\n self._create_data_block(i)\n\n def _create_data_block(self, partition_id):\n dbm = data_block_manager.DataBlockManager(self.data_source, partition_id)\n self.assertEqual(dbm.get_dumped_data_block_count(), 0)\n self.assertEqual(dbm.get_lastest_data_block_meta(), None)\n\n leader_index = 0\n follower_index = 65536\n for i in range(64):\n builder = DataBlockBuilder(\n common.data_source_data_block_dir(self.data_source),\n self.data_source.data_source_meta.name,\n partition_id, i,\n dj_pb.WriterOptions(output_writer='TF_RECORD'), None\n )\n builder.set_data_block_manager(dbm)\n for j in range(4):\n feat = {}\n example_id = '{}'.format(i * 1024 + j).encode()\n feat['example_id'] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[example_id]))\n event_time = random.randint(0, 10)\n feat['event_time'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[event_time]))\n feat['leader_index'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[leader_index]))\n feat['follower_index'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[follower_index]))\n example = tf.train.Example(features=tf.train.Features(feature=feat))\n builder.append_item(TfExampleItem(example.SerializeToString()),\n leader_index, follower_index)\n leader_index += 1\n follower_index += 1\n self.data_block_matas.append(builder.finish_data_block())\n\n def test_data_block_visitor(self):\n self._test_round(10, 2, 4)\n self._test_round(63, 1, 7)\n\n def _test_round(self, dumped_index, start_time, end_time):\n partition_num = self.data_source.data_source_meta.partition_num\n for i in range(partition_num):\n self.manifest_manager.forward_peer_dumped_index(i, dumped_index)\n visitor = data_block_visitor.DataBlockVisitor(\n self.data_source.data_source_meta.name, 'etcd', True\n )\n reps = visitor.LoadDataBlockRepByTimeFrame(start_time, end_time)\n metas = [meta for meta in self.data_block_matas if\n (not (meta.end_time > end_time or meta.end_time <= start_time) and\n meta.data_block_index <= dumped_index)]\n self.assertEqual(len(reps), len(metas))\n for meta in metas:\n self.assertTrue(meta.block_id in reps)\n rep = reps[meta.block_id]\n self.assertEqual(meta.block_id, rep.block_id)\n self.assertEqual(meta.start_time, rep.start_time)\n self.assertEqual(meta.end_time, rep.end_time)\n self.assertEqual(meta.partition_id, rep.partition_id)\n self.assertEqual(meta, rep.data_block_meta)\n data_block_fpath = os.path.join(common.data_source_data_block_dir(self.data_source),\n common.partition_repr(meta.partition_id),\n meta.block_id + common.DataBlockSuffix)\n self.assertEqual(data_block_fpath, rep.data_block_fpath)\n\n for i in range(0, 100):\n rep = visitor.LoadDataBlockReqByIndex(random.randint(0, partition_num-1),\n random.randint(0, dumped_index))\n try:\n meta = [meta for meta in self.data_block_matas if \\\n meta.block_id == rep.block_id][0]\n except Exception as e:\n print(e)\n self.assertEqual(meta.block_id, rep.block_id)\n self.assertEqual(meta.start_time, rep.start_time)\n self.assertEqual(meta.end_time, rep.end_time)\n self.assertEqual(meta.partition_id, rep.partition_id)\n self.assertEqual(meta, rep.data_block_meta)\n data_block_fpath = os.path.join(common.data_source_data_block_dir(self.data_source),\n common.partition_repr(meta.partition_id),\n meta.block_id + common.DataBlockSuffix)\n self.assertEqual(data_block_fpath, rep.data_block_fpath)\n self.assertIsNone(visitor.LoadDataBlockReqByIndex(random.randint(0, partition_num-1),\n random.randint(dumped_index, 10000)))\n\n def tearDown(self):\n if gfile.Exists(self.data_source.output_base_dir):\n gfile.DeleteRecursively(self.data_source.output_base_dir)\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "tensorflow.compat.v1.train.AdagradOptimizer", "tensorflow.compat.v1.math.reduce_mean", "tensorflow.compat.v1.train.FtrlOptimizer", "tensorflow.compat.v1.zeros_initializer", "tensorflow.compat.v1.train.get_or_create_global_step", "tensorflow.compat.v1.FixedLenFeature", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.random_uniform_initializer", "tensorflow.compat.v1.VarLenFeature", "tensorflow.compat.v1.matmul", "tensorflow.compat.v1.train.GradientDescentOptimizer", "tensorflow.compat.v1.estimator.export.build_raw_serving_input_receiver_fn", "tensorflow.compat.v1.constant", "tensorflow.compat.v1.parse_example" ], [ "tensorflow.compat.v1.split", "tensorflow.contrib.graph_editor.swap_ts", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.IndexedSlices", "tensorflow.compat.v1.Graph" ], [ "tensorflow.compat.v1.train.BytesList", "tensorflow.compat.v1.gfile.Exists", "tensorflow.compat.v1.enable_eager_execution", "tensorflow.compat.v1.train.Int64List", "tensorflow.compat.v1.gfile.DeleteRecursively", "tensorflow.compat.v1.train.Features" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lzhangbj/Tracking
[ "1cb3d28f95c6e9bbe27c1eec8b0cb19cc2045ff0" ]
[ "test_net.py" ]
[ "# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport pprint\nimport pdb\nimport time\nimport cv2\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport pickle\nfrom roi_data_layer.roidb import combined_roidb\nfrom roi_data_layer.roibatchLoader import roibatchLoader\nfrom model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\nfrom model.rpn.bbox_transform import clip_boxes\nfrom model.nms.nms_wrapper import nms\nfrom model.rpn.bbox_transform import bbox_transform_inv\nfrom model.utils.net_utils import save_net, load_net, vis_detections\nfrom model.faster_rcnn.vgg16 import vgg16\nfrom model.faster_rcnn.resnet import resnet, resnet_rfcn\n\nfrom subprocess import call\nimport torch.cuda as cutorch\n\nimport pdb\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\ndef gpuStats():\n for i in range(cutorch.device_count()):\n sys.stdout.write(\"{}: {:d}/{:d}\\n\".format(cutorch.get_device_name(i), int(cutorch.memory_allocated(i)*1e-6), int(cutorch.max_memory_allocated(i)*1e-6)))\n\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')\n parser.add_argument('--dataset', dest='dataset',\n help='training dataset',\n default='pascal_voc', type=str)\n parser.add_argument('--cfg', dest='cfg_file',\n help='optional config file',\n default='cfgs/res101.yml', type=str)\n parser.add_argument('--video', dest='video',\n help='if using video mode or not',\n action='store_true') \n parser.add_argument('--net', dest='net',\n help='vgg16, res50, res101, res152',\n default='res101', type=str)\n parser.add_argument('--set', dest='set_cfgs',\n help='set config keys', default=None,\n nargs=argparse.REMAINDER)\n parser.add_argument('--load_dir', dest='load_dir',\n help='directory to load models', default=\"models\",\n type=str)\n parser.add_argument('--cuda', dest='cuda',\n help='whether use CUDA',\n action='store_true')\n parser.add_argument('--ls', dest='large_scale',\n help='whether use large imag scale',\n action='store_true')\n parser.add_argument('--mGPUs', dest='mGPUs',\n help='whether use multiple GPUs',\n action='store_true')\n parser.add_argument('--cag', dest='class_agnostic',\n help='whether perform class_agnostic bbox regression',\n action='store_true')\n parser.add_argument('--parallel_type', dest='parallel_type',\n help='which part of model to parallel, 0: all, 1: model before roi pooling',\n default=0, type=int)\n parser.add_argument('--checksession', dest='checksession',\n help='checksession to load model',\n default=1, type=int)\n parser.add_argument('--checkepoch', dest='checkepoch',\n help='checkepoch to load network',\n default=1, type=int)\n parser.add_argument('--checkpoint', dest='checkpoint',\n help='checkpoint to load network',\n default=10021, type=int)\n parser.add_argument('--vis', dest='vis',\n help='visualization mode',\n action='store_true')\n args = parser.parse_args()\n return args\n\nlr = cfg.TRAIN.LEARNING_RATE\nmomentum = cfg.TRAIN.MOMENTUM\nweight_decay = cfg.TRAIN.WEIGHT_DECAY\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n print('Called with args:')\n print(args)\n\n if torch.cuda.is_available() and not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n np.random.seed(cfg.RNG_SEED)\n if args.dataset == \"pascal_voc\":\n args.imdb_name = \"voc_2007_trainval\"\n args.imdbval_name = \"voc_2007_test\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"pascal_voc_0712\":\n args.imdb_name = \"voc_2007_trainval+voc_2012_trainval\"\n args.imdbval_name = \"voc_2007_test\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"coco\":\n args.imdb_name = \"coco_2014_train+coco_2014_valminusminival\"\n args.imdbval_name = \"coco_2014_minival\"\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"imagenet\":\n args.imdb_name = \"imagenet_train\"\n args.imdbval_name = \"imagenet_val\"\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"vg\":\n args.imdb_name = \"vg_150-50-50_minitrain\"\n args.imdbval_name = \"vg_150-50-50_minival\"\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"imagenet_vid\":\n args.imdb_name = \"vid_2015_train\"\n args.imdbval_name = \"vid_2015_val\" # useless now\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '30']\n elif args.dataset == \"imagenet_vid_img\":\n args.imdb_name = \"vid_img_2015_train\"\n args.imdbval_name = \"vid_img_2015_val\"\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '30']\n\n\n args.cfg_file = \"cfgs/{}_ls.yml\".format(args.net) if args.large_scale else \"cfgs/{}.yml\".format(args.net)\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n print('Using config:')\n pprint.pprint(cfg)\n\n cfg.TRAIN.USE_FLIPPED = False\n imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)\n imdb.competition_mode(on=True)\n\n print('{:d} roidb entries'.format(len(roidb)))\n\n input_dir = args.load_dir + \"/\" + args.net + \"/\" + args.dataset\n if not os.path.exists(input_dir):\n raise Exception('There is no input directory for loading network from ' + input_dir)\n if not args.video:\n # load_name = os.path.join(input_dir,\n # 'rfcn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))\n load_name = \"rfcn_detect.pth\"\n else:\n load_name = os.path.join(input_dir,\n 'track_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint)) \n\n # initilize the network here.\n if args.net == 'vgg16':\n fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res101':\n if not args.video:\n fasterRCNN = resnet_rfcn(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)\n else:\n fasterRCNN = TrackingCell(imdb.classes, class_agnostic=args.class_agnostic,pretrained_rfcn = None)\n elif args.net == 'res50':\n fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res152':\n fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)\n else:\n print(\"network is not defined\")\n pdb.set_trace()\n\n fasterRCNN.create_architecture()\n\n print(\"load checkpoint %s\" % (load_name))\n checkpoint = torch.load(load_name)\n fasterRCNN.load_state_dict(checkpoint['model'])\n if 'pooling_mode' in checkpoint.keys():\n cfg.POOLING_MODE = checkpoint['pooling_mode']\n\n\n print('load model successfully!')\n # initilize the tensor holder here.\n im_data = torch.FloatTensor(1)\n im_info = torch.FloatTensor(1)\n num_boxes = torch.LongTensor(1)\n gt_boxes = torch.FloatTensor(1)\n\n # ship to cuda\n if args.cuda:\n im_data = im_data.cuda()\n im_info = im_info.cuda()\n num_boxes = num_boxes.cuda()\n gt_boxes = gt_boxes.cuda()\n\n # make variable\n im_data = Variable(im_data)\n im_info = Variable(im_info)\n num_boxes = Variable(num_boxes)\n gt_boxes = Variable(gt_boxes)\n\n if args.cuda:\n cfg.CUDA = True\n\n if args.cuda:\n fasterRCNN.cuda()\n\n start = time.time()\n max_per_image = 100\n\n vis = args.vis\n\n if vis:\n thresh = 0.05\n else:\n thresh = 0.0\n\n if args.video:\n save_name = \"track_10\"\n else:\n save_name = 'rfcn_10'\n num_images = imdb.num_images\n all_boxes = [[[] for _ in xrange(num_images)]\n for _ in xrange(imdb.num_classes)]\n\n output_dir = get_output_dir(imdb, save_name)\n if not args.video:\n dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \\\n imdb.num_classes, training=False, normalize = False)\n else:\n dataset = vidbatchLoader(imdb._video_structure, roidb, ratio_list, ratio_index, args.batch_size, \\\n imdb.num_classes, training=False, normalize=False)\n \n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,\n shuffle=False, num_workers=0,\n pin_memory=True)\n\n data_iter = iter(dataloader)\n\n _t = {'im_detect': time.time(), 'misc': time.time()}\n # det_file = os.path.join(output_dir, '{}_{}_{}_detections.pkl'.format(args.checksession, args.checkepoch, args.checkpoint))\n det_file = \"pretrained_detections.pkl\"\n fasterRCNN.eval()\n empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))\n with torch.no_grad():\n for i in range(num_images):\n # gpuStats()\n data = next(data_iter)\n im_data.data.resize_(data[0].size()).copy_(data[0])\n im_info.data.resize_(data[1].size()).copy_(data[1])\n gt_boxes.data.resize_(data[2].size()).copy_(data[2])\n num_boxes.data.resize_(data[3].size()).copy_(data[3])\n # gpuStats()\n if not self.video:\n det_tic = time.time()\n rois, cls_prob, bbox_pred, \\\n rpn_loss_cls, rpn_loss_box, \\\n RCNN_loss_cls, RCNN_loss_bbox, \\\n rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)\n # gpuStats()\n else:\n _, track_cls_loss, track_loc_loss, track_predict_loc_loss, \\\n rpn_loss_cls, rpn_loss_box, \\\n RCNN_loss_cls, RCNN_loss_bbox = fasterRCNN(im_data,\\\n im_info, \\\n gt_boxes,\\\n num_boxes) # zl forward\n\n\n scores = cls_prob.data # ( batch, max_num_rois, n_classes) \n boxes = rois.data[:, :, 1:5]\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred.data\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n if args.class_agnostic:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4) # (1, max_num_rois*n_classes, 4)\n else:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))\n\n pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)\n pred_boxes = clip_boxes(pred_boxes, im_info.data, 1) # (batch, num_rois, 4)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n pred_boxes /= im_info.data[0][-1] # divided by scale\n\n scores = scores.squeeze() # suppose batchsize is 1, shape of (max_num_rois, n_classes)\n pred_boxes = pred_boxes.squeeze() # suppose batchsize is 1, shape of (max_num_rois, 4)\n det_toc = time.time()\n detect_time = det_toc - det_tic\n misc_tic = time.time()\n if vis:\n im = cv2.imread(imdb.image_path_at(i))\n im2show = np.copy(im)\n for j in xrange(1, imdb.num_classes):\n inds = torch.nonzero(scores[:,j]>thresh).view(-1)\n # if there is det\n if inds.numel() > 0:\n cls_scores = scores[:,j][inds]\n _, order = torch.sort(cls_scores, 0, True)\n if args.class_agnostic:\n cls_boxes = pred_boxes[inds, :]\n else:\n cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4] # if not class agnostic, pred shape is (max_num_roid, 4*n_classes)\n \n # cls_boxes_shape (inds, 4)\n # cls_scores shape (indes,)\n cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)\n # cls_dets = torch.cat((cls_boxes, cls_scores), 1)\n cls_dets = cls_dets[order]\n keep = nms(cls_dets, cfg.TEST.NMS)\n cls_dets = cls_dets[keep.view(-1).long()]\n if vis:\n im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)\n all_boxes[j][i] = cls_dets.cpu().numpy()\n else:\n all_boxes[j][i] = empty_array\n\n # Limit to max_per_image detections *over all classes*\n if max_per_image > 0:\n image_scores = np.hstack([all_boxes[j][i][:, -1] for j in xrange(1, imdb.num_classes)])\n if len(image_scores) > max_per_image:\n image_thresh = np.sort(image_scores)[-max_per_image]\n for j in xrange(1, imdb.num_classes):\n keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]\n all_boxes[j][i] = all_boxes[j][i][keep, :]\n\n misc_toc = time.time()\n nms_time = misc_toc - misc_tic\n\n sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \\r' \\\n .format(i + 1, num_images, detect_time, nms_time))\n sys.stdout.flush()\n\n if vis:\n cv2.imwrite('result.png', im2show)\n pdb.set_trace()\n #cv2.imshow('test', im2show)\n #cv2.waitKey(0)\n # gpuStats()\n\n with open(det_file, 'wb') as f:\n pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)\n\n\n # with open(det_file, 'rb') as f:\n # all_boxes = pickle.load(f)\n\n\n print('Evaluating detections')\n imdb.evaluate_detections(all_boxes, output_dir)\n\n end = time.time()\n print(\"test time: %0.4fs\" % (end - start))\n" ]
[ [ "torch.load", "torch.utils.data.DataLoader", "torch.FloatTensor", "torch.no_grad", "torch.cuda.is_available", "numpy.where", "torch.autograd.Variable", "numpy.copy", "torch.sort", "torch.nonzero", "torch.LongTensor", "torch.cuda.device_count", "numpy.array", "numpy.random.seed", "numpy.tile", "numpy.sort", "torch.cuda.max_memory_allocated", "torch.cuda.get_device_name", "torch.cuda.memory_allocated" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NitishaS-812k/Mp3-Encoder
[ "318342e62b93e6bc63fdb2735c24c62918e68231", "318342e62b93e6bc63fdb2735c24c62918e68231", "318342e62b93e6bc63fdb2735c24c62918e68231" ]
[ "subband_filtering.py", "psychoacoustic.py", "scaled_fft.py" ]
[ "import numpy as np\n\ndef subband_filtering(x, h):\n \"\"\" \n\n implementing the efficient version of the subband filter\n as specified by the MP3 standard\n\n Arguments:\n x: a new 512-point data buffer, in time-reversed order [x[n],x[n-1],...,x[n-511]].\n h: The prototype filter of the filter bank\n\n Returns:\n s: 32 new output samples\n \"\"\"\n\n r = np.multiply(x,h)\n\n q = np.arange(64) \n\n c = np.sum((-1)**np.arange(8)[:, np.newaxis] * r[q + 64*np.arange(8)[:, np.newaxis]], axis=0)\n\n s = np.sum(np.cos(np.pi / 64. * (2 * np.arange(32)[:, np.newaxis] + 1) * (np.arange(q.shape[0]) - 16))*c, axis=1)\n\n return s\n", "import numpy as np\nimport sys\nfrom parameters import *\nfrom common import add_db\n\nimport scaled_fft\n\ndef smr_bit_allocation(params,smr):\n \"\"\"Calculate bit allocation in subbands from signal-to-mask ratio.\"\"\"\n \n bit_allocation = np.zeros(N_SUBBANDS, dtype='uint8')\n bits_header = 32\n bits_alloc = 4 * N_SUBBANDS * params.nch\n bits_available = (params.nslots + params.padbit) * SLOT_SIZE - (bits_header + bits_alloc)\n bits_available /= params.nch\n \n if bits_available <= 2 * FRAMES_PER_BLOCK + 6:\n sys.exit('Insufficient bits for encoding.')\n\n \n snr = params.table.snr\n mnr = snr[bit_allocation[:]] - smr\n\n while bits_available >= FRAMES_PER_BLOCK:\n subband = np.argmin(mnr)\n\n if bit_allocation[subband] == 15:\n mnr[subband] = INF\n continue\n\n if bit_allocation[subband] == 0:\n bits_needed = 2 * FRAMES_PER_BLOCK + 6\n else:\n bits_needed = FRAMES_PER_BLOCK\n\n if bits_needed > bits_available:\n mnr[subband] = INF\n continue\n\n if bit_allocation[subband] == 0:\n bit_allocation[subband] = 2\n else:\n bit_allocation[subband] += 1\n\n bits_available -= bits_needed\n mnr[subband] = snr[bit_allocation[subband]-1] - smr[subband]\n \n return bit_allocation\n\n\n\n\n\nclass TonalComponents:\n \"\"\"Marking of tonal and non-tonal components in the psychoacoustic model.\"\"\"\n \n def __init__(self, X):\n self.spl = np.copy(X)\n self.flag = np.zeros(X.size, dtype='uint8')\n self.tonecomps = []\n self.noisecomps = []\n\n\n\n\n\ndef model1(samples, params, sfindices):\n \"\"\"Psychoacoustic model as described in ISO/IEC 11172-3, Annex D.1.\"\"\"\n \n table = params.table\n\n X = scaled_fft.scaled_fft_db(samples)\n\n scf = table.scalefactor[sfindices] \n subband_spl = np.zeros(N_SUBBANDS)\n for sb in range(N_SUBBANDS):\n subband_spl[sb] = np.max(X[int(1 + sb * SUB_SIZE): int(1 + sb * SUB_SIZE + SUB_SIZE)])\n subband_spl[sb] = np.maximum(subband_spl[sb], 20 * np.log10(scf[0,sb] * 32768) - 10)\n \n peaks = []\n for i in range(3, FFT_SIZE // 2 - 6):\n if X[i]>=X[i+1] and X[i]>X[i-1]:\n peaks.append(i)\n\n\n #determining tonal and non-tonal components\n tonal = TonalComponents(X)\n tonal.flag[0:3] = IGNORE\n \n for k in peaks:\n is_tonal = True\n if k > 2 and k < 63:\n testj = [-2,2]\n elif k >= 63 and k < 127:\n testj = [-3,-2,2,3]\n else:\n testj = [-6,-5,-4,-3,-2,2,3,4,5,6]\n for j in testj:\n if tonal.spl[k] - tonal.spl[k+j] < 7:\n is_tonal = False\n break\n if is_tonal:\n tonal.spl[k] = add_db(tonal.spl[k-1:k+2])\n tonal.flag[k+np.arange(testj[0], testj[-1] + 1)] = IGNORE\n tonal.flag[k] = TONE\n tonal.tonecomps.append(k)\n \n\n #non-tonal components for each critical band\n for i in range(table.cbnum - 1):\n weight = 0.0\n msum = DBMIN\n for j in range(table.cbound[i], table.cbound[i+1]):\n if tonal.flag[i] == UNSET:\n msum = add_db((tonal.spl[j], msum))\n weight += np.power(10, tonal.spl[j] / 10) * (table.bark[table.map[j]] - i)\n if msum > DBMIN:\n index = weight/np.power(10, msum / 10.0)\n center = table.cbound[i] + np.int(index * (table.cbound[i+1] - table.cbound[i])) \n if tonal.flag[center] == TONE:\n center += 1\n tonal.flag[center] = NOISE\n tonal.spl[center] = msum\n tonal.noisecomps.append(center)\n \n \n #decimation of tonal and non-tonal components\n #under the threshold in quiet\n for i in range(len(tonal.tonecomps)):\n if i >= len(tonal.tonecomps):\n break\n k = tonal.tonecomps[i]\n if tonal.spl[k] < table.hear[table.map[k]]:\n tonal.tonecomps.pop(i)\n tonal.flag[k] = IGNORE\n i -= 1\n\n for i in range(len(tonal.noisecomps)):\n if i >= len(tonal.noisecomps):\n break\n k = tonal.noisecomps[i]\n if tonal.spl[k] < table.hear[table.map[k]]:\n tonal.noisecomps.pop(i)\n tonal.flag[k] = IGNORE\n i -= 1\n\n\n #decimation of tonal components closer than 0.5 Bark\n for i in range(len(tonal.tonecomps) -1 ):\n if i >= len(tonal.tonecomps) -1:\n break\n this = tonal.tonecomps[i]\n next = tonal.tonecomps[i+1]\n if table.bark[table.map[this]] - table.bark[table.map[next]] < 0.5:\n if tonal.spl[this]>tonal.spl[next]:\n tonal.flag[next] = IGNORE\n tonal.tonecomps.remove(next)\n else:\n tonal.flag[this] = IGNORE\n tonal.tonecomps.remove(this)\n\n \n\n #individual masking thresholds\n masking_tonal = []\n masking_noise = []\n\n for i in range(table.subsize):\n masking_tonal.append(())\n zi = table.bark[i]\n for j in tonal.tonecomps:\n zj = table.bark[table.map[j]]\n dz = zi - zj\n if dz >= -3 and dz <= 8:\n avtm = -1.525 - 0.275 * zj - 4.5\n if dz >= -3 and dz < -1:\n vf = 17 * (dz + 1) - (0.4 * X[j] + 6)\n elif dz >= -1 and dz < 0:\n vf = dz * (0.4 * X[j] + 6)\n elif dz >= 0 and dz < 1:\n vf = -17 * dz\n else:\n vf = -(dz - 1) * (17 - 0.15 * X[j]) - 17\n masking_tonal[i] += (X[j] + vf + avtm,)\n\n for i in range(table.subsize):\n masking_noise.append(())\n zi = table.bark[i]\n for j in tonal.noisecomps:\n zj = table.bark[table.map[j]]\n dz = zi - zj\n if dz >= -3 and dz <= 8:\n avnm = -1.525 - 0.175 * zj - 0.5\n if dz >= -3 and dz < -1:\n vf = 17 * (dz + 1) - (0.4 * X[j] + 6)\n elif dz >= -1 and dz < 0:\n vf = dz * (0.4 * X[j] + 6)\n elif dz >= 0 and dz < 1:\n vf = -17 * dz\n else:\n vf = -(dz - 1) * (17 - 0.15 * X[j]) - 17\n masking_noise[i] += (X[j] + vf + avnm,)\n\n\n #global masking thresholds\n masking_global = []\n for i in range(table.subsize):\n maskers = (table.hear[i],) + masking_tonal[i] + masking_noise[i]\n masking_global.append(add_db(maskers))\n\n\n #minimum masking thresholds\n mask = np.zeros(N_SUBBANDS)\n for sb in range(N_SUBBANDS):\n first = table.map[int(sb * SUB_SIZE)]\n after_last = table.map[int((sb + 1) * SUB_SIZE - 1)] + 1\n mask[sb] = np.min(masking_global[first:after_last])\n\n\n #signal-to-mask ratio for each subband\n smr = subband_spl - mask\n \n\n subband_bit_allocation = smr_bit_allocation(params, smr)\n return subband_bit_allocation\n\n", "import numpy as np\n\ndef scaled_fft_db(x):\n \"\"\" \n 1) Computes a 512-point Hann window and use it to weigh the input data.\n 2) Computes the DFT of the weighed input, take the magnitude in dBs and\n normalize so that the maximum value is 96dB.\n 3) Return the first 257 values of the normalized spectrum\n\n Arguments:\n x: 512-point input buffer.\n\n Returns:\n first 257 points of the normalized spectrum, in dBs\n \"\"\"\n\n #computing length of input buffer\n n = len(x)\n\n #python has an inbuilt function to return a hanning window of length n\n hanning_window = np.hanning(n)\n\n #applying the window along the input buffer\n y = np.multiply(x,hanning_window)\n\n #fourier transform of y on real values of x\n fft_y = np.fft.rfft(y)\n\n #taking magnitude\n abs_fft_y = np.absolute(fft_y)\n\n #normalizing by dividing by length of buffer\n normlized_fft_y = np.divide(abs_fft_y, n)\n\n #we only require the first 257 values of the normalised spectrum since input signl is real\n weighted = normlized_fft_y[0:258]\n\n #convert magnitude to dB's if value is 0, it is set to -100 dB\n weighted_in = [20*(np.log10(m)) if m!= 0 else -100 for m in weighted]\n\n #rescaling output to have a maximum of -96 dB\n weighted_in = 96 - max(weighted_in) + weighted_in\n\n return weighted_in[0:258]" ]
[ [ "numpy.arange", "numpy.multiply" ], [ "numpy.power", "numpy.min", "numpy.arange", "numpy.int", "numpy.copy", "numpy.log10", "numpy.argmin", "numpy.zeros" ], [ "numpy.absolute", "numpy.multiply", "numpy.fft.rfft", "numpy.log10", "numpy.hanning", "numpy.divide" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sumanurawat/bert
[ "0f3a4fac1b886ec196868d469ea1fb410bd38e26" ]
[ "optimization.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Functions and classes related to optimization (weight updates).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport tensorflow as tf\n\n\ndef create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\n \"\"\"Creates an optimizer training op.\"\"\"\n global_step = tf.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float / warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n if use_tpu:\n optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer)\n\n tvars = tf.trainable_variables()\n grads = tf.gradients(loss, tvars)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n return train_op\n\n\nclass AdamWeightDecayOptimizer(tf.compat.v1.train.Optimizer):\n \"\"\"A basic Adam optimizer that includes \"correct\" L2 weight decay.\"\"\"\n\n def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n" ]
[ [ "tensorflow.train.polynomial_decay", "tensorflow.multiply", "tensorflow.constant", "tensorflow.zeros_initializer", "tensorflow.cast", "tensorflow.gradients", "tensorflow.train.get_or_create_global_step", "tensorflow.clip_by_global_norm", "tensorflow.square", "tensorflow.trainable_variables", "tensorflow.sqrt", "tensorflow.group", "tensorflow.compat.v1.tpu.CrossShardOptimizer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Chrisyunhua/cantools
[ "c0e3a92a23af997de20b4e3ceef323e1dd0f15ec" ]
[ "cantools/subparsers/plot.py" ]
[ "\n'''\nDecode \"candump\" CAN frames or the output of \"cantools decode\"\nread from standard input and plot them using matplotlib.\nYou can select which signals to plot by specifying them on the command line.\nEach signal is one argument and has the pattern \"[bo.]sg[:fmt]\"\nwhere bo is the name of the message, sg is the name of the signal\nand fmt is the format of the graph.\nThe wildcards * (any number of any character)\nand ? (exactly one arbitrary character)\ncan be used inside of sg and bo.\nIf bo is omitted it defaults to *.\n\nfmt is passed to matplotlib and can be used to specify\nthe line style, markers and color.\nFor example the following values can be combined:\nLine style:\n '-' solid line style,\n '--' dashed line style,\n '-.' dash-dot line style and\n ':' dotted line style.\nMarkers:\n '.' point marker,\n ',' pixel marker,\n 'o' circle marker,\n 's' square marker,\n 'D' diamond marker,\n 'x' x marker\n and many more.\nColors:\n 'b' blue,\n 'g' green,\n 'r' red,\n 'c' cyan,\n 'm' magenta,\n 'y' yellow,\n 'k' black and\n 'w' white.\n 'C0'...'C9' the colors defined by the current style\nhttps://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html\n\nIf the first character of fmt is a '|' stem is used instead of plot.\n\nSignals can be separated by a '-' to show them in different subplots.\n\nSignals can be separated by a ',' to make them refer to different vertical axes in the same subplot.\nI recommend using this with the option --auto-color-ylabels.\n\nAll signals (independent of the subplot and vertical axis) share the same horizontal axis.\n'''\n\nimport sys\nimport re\nimport binascii\nimport struct\nimport datetime\nimport argparse\nfrom argparse_addons import Integer\ntry:\n from matplotlib import pyplot as plt\nexcept ImportError:\n plt = None\n\nfrom .. import database\nfrom .. import errors\n\n\nPYPLOT_BASE_COLORS = \"bgrcmykwC\"\n\n\nclass MatplotlibNotInstalledError(errors.Error):\n\n def __init__(self):\n super().__init__(\"The matplotlib package not installed and is required \"\n \"for producing plots.\")\n\n\nif plt is not None:\n #TODO: I am not allowing \"%H:%M\" as input (for --start or --stop) because it could be misinterpreted as \"%M:%S\". Should this output format be changed?\n # I don't think the ambiguity is a problem for the output because if it is not obvious from the context it can be easily clarified with --xlabel.\n # However, it seems very unintuitive if the same format which is used for output is not allowed for input.\n # If you do change it, remember to uncomment the tests in test_plot_unittests.py.\n plt.rcParams[\"date.autoformatter.hour\"] = \"%H:%M\"\n plt.rcParams[\"date.autoformatter.minute\"] = \"%H:%M\"\n plt.rcParams[\"date.autoformatter.microsecond\"] = \"%H:%M:%S.%f\"\n\n\n# Matches 'candump' output, i.e. \"vcan0 1F0 [8] 00 00 00 00 00 00 1B C1\".\nRE_CANDUMP = re.compile(r'^\\s*(?:\\((?P<time>.*?)\\))?\\s*\\S+\\s+(?P<frameid>[0-9A-F]+)\\s*\\[\\d+\\]\\s*(?P<data>[0-9A-F ]*)(?:\\s*::.*)?$')\n# Matches 'cantools decode' output, i.e. \")\" or \" voltage: 0 V,\".\nRE_DECODE = re.compile(r'\\w+\\(|\\s+\\w+:\\s+[0-9.+-]+(\\s+.*)?,?|\\)')\n# Matches 'candump -l' (or -L) output, i.e. \"(1594172461.968006) vcan0 1F0#0000000000001BC1\"\nRE_CANDUMP_LOG = re.compile(r'^\\((?P<time>\\d+\\.\\d+)\\)\\s+\\S+\\s+(?P<frameid>[\\dA-F]+)#(?P<data>[\\dA-F]*)$')\n\n\ndef _mo_unpack(mo):\n '''extract the data from a re match object'''\n timestamp = mo.group('time')\n frame_id = mo.group('frameid')\n frame_id = '0' * (8 - len(frame_id)) + frame_id\n frame_id = binascii.unhexlify(frame_id)\n frame_id = struct.unpack('>I', frame_id)[0]\n data = mo.group('data')\n data = data.replace(' ', '')\n data = binascii.unhexlify(data)\n\n return timestamp, frame_id, data\n\nclass TimestampParser:\n\n '''\n Parses the values for the horizontal axis\n and generates the corresponding axis label.\n Preferably timestamps are used but if none\n are given it falls back to line numbers.\n '''\n\n # candump -ta, -tz and -td have the same timestamp syntax: a floating number in seconds.\n # In case of -td using timestamps does not seem useful and a user should use --line-numbers.\n # The following constant shall distinguish between -ta and -tz.\n # If the first timestamp is bigger than THRESHOLD_ABSOLUTE_SECONDS I am assuming -ta is used\n # and convert timestamps to datetime objects which will print a date.\n # Otherwise I'll assume -tz is used and format them using timedelta objects.\n # I am not using zero to compare against in case the beginning of the log file is stripped.\n THRESHOLD_ABSOLUTE_SECONDS = 60*60*24*7\n\n FORMAT_ABSOLUTE_TIMESTAMP = \"%Y-%m-%d %H:%M:%S.%f\"\n\n def __init__(self, args):\n self.use_timestamp = None\n self.relative = None\n self._parse_timestamp = None\n self.first_timestamp = None\n self.args = args\n\n def init_start_stop(self, x0):\n if self.use_timestamp and self.relative:\n parse = self.parse_user_input_relative_time\n elif self.use_timestamp:\n parse = self.parse_user_input_absolute_time\n else:\n parse = lambda s,x0: int(s)\n\n if self.args.start is not None:\n self.args.start = parse(self.args.start, x0)\n x0 = self.args.start\n self.first_timestamp = x0\n if self.args.stop is not None:\n self.args.stop = parse(self.args.stop, x0)\n\n def parse_user_input_relative_time(self, user_input, first_timestamp):\n try:\n return float(user_input)\n except:\n pass\n\n patterns_hour = ['%H:%M:', '%H:%M:%S', '%H:%M:%S.%f']\n patterns_minute = [':%M:%S', '%M:%S.', '%M:%S.%f']\n patterns_day = ['%d day', '%d days']\n\n day_time_sep = ', '\n for pattern_day in tuple(patterns_day):\n for pattern_time in ['%H:%M']+patterns_hour:\n patterns_day.append(pattern_day+day_time_sep+pattern_time)\n\n for pattern in patterns_minute + patterns_hour + patterns_day:\n t = self.strptimedelta_in_seconds(user_input, pattern)\n if t is not None:\n return t\n\n raise ValueError(\"Failed to parse relative time %r.\\n\\nPlease note that an input like 'xx:xx' is ambiguous. It could be either 'HH:MM' or 'MM:SS'. Please specify what you want by adding a leading or trailing colon: 'HH:MM:' or ':MM:SS' (or 'MM:SS.').\" % user_input)\n\n def strptimedelta_in_seconds(self, user_input, pattern):\n '''\n Parse the string representation of a time delta object.\n Return value: int in seconds or None if parsing failed.\n '''\n # I cannot use `datetime.datetime.strptime(user_input, pattern) - datetime.datetime.strptime(\"\", \"\")` because it treats no day as 1 day\n p = pattern\n p = p.replace('%H', '{hour}')\n p = p.replace('%M', '{min}')\n p = p.replace('%S', '{s}')\n p = p.replace('%f', '{ms}')\n p = p.replace('%d', '{day}')\n p = re.escape(p)\n p = p.replace(r'\\{hour\\}', '(?P<hour>[0-9][0-9]?)')\n p = p.replace(r'\\{min\\}', '(?P<min>[0-9][0-9]?)')\n p = p.replace(r'\\{s\\}', '(?P<s>[0-9][0-9]?)')\n p = p.replace(r'\\{ms\\}', '(?P<ms>[0-9]+)')\n p = p.replace(r'\\{day\\}', '(?P<day>[0-9][0-9]?)')\n p += '$'\n m = re.match(p, user_input)\n if m is None:\n return None\n\n d = m.groupdict('0')\n seconds = float(d.pop('s','0') + '.' + d.pop('ms','0'))\n d = {key:int(d[key]) for key in d}\n return ((d.pop('day',0)*24 + d.pop('hour',0))*60 + d.pop('min',0))*60 + seconds\n\n def parse_user_input_absolute_time(self, user_input, first_timestamp):\n patterns_year = ['%Y-%m-%d', '%d.%m.%Y']\n patterns_month = ['%m-%d', '%d.%m.']\n patterns_day = ['%d.']\n patterns_hour = ['%H:%M:', '%H:%M:%S', '%H:%M:%S.%f']\n patterns_minute = [':%M:%S', '%M:%S.', '%M:%S.%f']\n patterns_second = ['%S', '%S.%f']\n\n date_time_sep = ' '\n for patterns in (patterns_year, patterns_month, patterns_day):\n for pattern_date in tuple(patterns):\n for pattern_time in ['%H:%M']+patterns_hour:\n patterns.append(pattern_date+date_time_sep+pattern_time)\n\n patterns_year.append('%Y-%m')\n\n for attrs, patterns in [\n (['year', 'month', 'day', 'hour', 'minute'], patterns_second),\n (['year', 'month', 'day', 'hour'], patterns_minute),\n (['year', 'month', 'day'], patterns_hour),\n (['year', 'month'], patterns_day),\n (['year'], patterns_month),\n ([], patterns_year),\n ]:\n for p in patterns:\n try:\n out = datetime.datetime.strptime(user_input, p)\n except ValueError:\n pass\n else:\n kw = {a:getattr(first_timestamp,a) for a in attrs}\n out = out.replace(**kw)\n return out\n\n raise ValueError(\"Failed to parse absolute time %r.\\n\\nPlease note that an input like 'xx:xx' is ambiguous. It could be either 'HH:MM' or 'MM:SS'. Please specify what you want by adding a leading or trailing colon: 'HH:MM:' or ':MM:SS' (or 'MM:SS.').\" % user_input)\n\n def first_parse_timestamp(self, timestamp, linenumber):\n if timestamp is None:\n self.use_timestamp = False\n return linenumber\n\n try:\n out = self.parse_absolute_timestamp(timestamp)\n self.use_timestamp = True\n self.relative = False\n self.first_timestamp = out\n self._parse_timestamp = self.parse_absolute_timestamp\n return out\n except ValueError:\n pass\n\n try:\n if float(timestamp) > self.THRESHOLD_ABSOLUTE_SECONDS:\n out = self.parse_absolute_seconds(timestamp)\n self.relative = False\n self.first_timestamp = out\n self._parse_timestamp = self.parse_absolute_seconds\n else:\n out = self.parse_seconds(timestamp)\n self.relative = True\n self._parse_timestamp = self.parse_seconds\n\n self.use_timestamp = True\n return out\n except ValueError:\n pass\n\n self.use_timestamp = False\n return linenumber\n\n def parse_timestamp(self, timestamp, linenumber):\n if self.use_timestamp is None:\n x = self.first_parse_timestamp(timestamp, linenumber)\n self.init_start_stop(x)\n return x\n\n if self.use_timestamp:\n return self._parse_timestamp(timestamp)\n else:\n return linenumber\n\n def parse_absolute_timestamp(self, timestamp):\n return datetime.datetime.strptime(timestamp, self.FORMAT_ABSOLUTE_TIMESTAMP)\n\n @staticmethod\n def parse_absolute_seconds(timestamp):\n return datetime.datetime.fromtimestamp(float(timestamp))\n\n @staticmethod\n def parse_seconds(timestamp):\n return float(timestamp)\n\n def get_label(self):\n if self.use_timestamp:\n if self.relative:\n label = \"relative time\"\n else:\n label = \"absolute time\"\n else:\n label = \"line number\"\n\n if isinstance(self.first_timestamp, datetime.datetime):\n label += self.first_timestamp.strftime(\" (start: %d.%m.%Y)\")\n\n return label\n\ndef _do_decode(args):\n '''\n The entry point of the program.\n It iterates over all input lines, parses them\n and passes the data to a Plotter object.\n '''\n if plt is None:\n raise MatplotlibNotInstalledError()\n\n if args.list_styles:\n print(\"available matplotlib styles:\")\n for style in plt.style.available:\n print(\"- %s\" % style)\n return\n\n if args.show_errors:\n args.show_invalid_syntax = True\n args.show_unknown_frames = True\n args.show_invalid_data = True\n if args.quiet:\n args.ignore_invalid_syntax = True\n args.ignore_unknown_frames = True\n args.ignore_invalid_data = True\n\n dbase = database.load_file(args.database,\n encoding=args.encoding,\n frame_id_mask=args.frame_id_mask,\n strict=not args.no_strict)\n re_format = None\n timestamp_parser = TimestampParser(args)\n if args.show_invalid_syntax:\n # we cannot use a timestamp if we have failed to parse the line\n timestamp_parser.use_timestamp = False\n if args.line_numbers:\n timestamp_parser.use_timestamp = False\n\n if args.style is not None:\n plt.style.use(args.style)\n\n plotter = Plotter(dbase, args)\n\n line_number = 1\n while True:\n line = sys.stdin.readline()\n\n # Break at EOF.\n if not line:\n break\n\n line = line.strip('\\r\\n')\n if not line:\n continue\n\n # Auto-detect on first valid line.\n if re_format is None:\n mo = RE_CANDUMP.match(line)\n\n if mo:\n re_format = RE_CANDUMP\n else:\n mo = RE_CANDUMP_LOG.match(line)\n\n if mo:\n re_format = RE_CANDUMP_LOG\n else:\n mo = re_format.match(line)\n\n if mo:\n timestamp, frame_id, data = _mo_unpack(mo)\n timestamp = timestamp_parser.parse_timestamp(timestamp, line_number)\n if args.start is not None and timestamp < args.start:\n line_number += 1\n continue\n elif args.stop is not None and timestamp > args.stop:\n break\n plotter.add_msg(timestamp, frame_id, data)\n elif RE_DECODE.match(line):\n continue\n else:\n plotter.failed_to_parse_line(line_number, line)\n\n line_number += 1\n\n plotter.plot(timestamp_parser.get_label())\n\n\nclass Plotter:\n\n '''\n Decodes the data received from _do_decode further\n and stores them in a Signals object.\n Shows or exports the data plotted by Signals.\n '''\n\n # ------- initialization -------\n\n def __init__(self, dbase, args):\n self.dbase = dbase\n self.decode_choices = not args.no_decode_choices\n self.show_invalid_syntax = args.show_invalid_syntax\n self.show_unknown_frames = args.show_unknown_frames\n self.show_invalid_data = args.show_invalid_data\n self.ignore_invalid_syntax = args.ignore_invalid_syntax\n self.ignore_unknown_frames = args.ignore_unknown_frames\n self.ignore_invalid_data = args.ignore_invalid_data\n self.output_filename = args.output_file\n self.signals = Signals(args.signals, args.case_sensitive, args.break_time, args, args.auto_color_ylabels)\n\n self.x_invalid_syntax = []\n self.x_unknown_frames = []\n self.x_invalid_data = []\n\n # ------- while reading data -------\n\n def add_msg(self, timestamp, frame_id, data):\n try:\n message = self.dbase.get_message_by_frame_id(frame_id)\n except KeyError:\n if self.show_unknown_frames:\n self.x_unknown_frames.append(timestamp)\n if not self.ignore_unknown_frames:\n print('Unknown frame id {0} (0x{0:x})'.format(frame_id))\n return\n\n try:\n decoded_signals = message.decode(data, self.decode_choices)\n except Exception as e:\n if self.show_invalid_data:\n self.x_invalid_data.append(timestamp)\n if not self.ignore_invalid_data:\n print('Failed to parse data of frame id {0} (0x{0:x}): {1}'.format(frame_id, e))\n return\n\n for signal in decoded_signals:\n x = timestamp\n y = decoded_signals[signal]\n signal = message.name + '.' + signal\n self.signals.add_value(signal, x, y)\n\n def failed_to_parse_line(self, timestamp, line):\n if self.show_invalid_syntax:\n self.x_invalid_syntax.append(timestamp)\n if not self.ignore_invalid_syntax:\n print(\"Failed to parse line: %r\" % line)\n\n # ------- at end -------\n\n def plot(self, xlabel):\n self.signals.plot(xlabel, self.x_invalid_syntax, self.x_unknown_frames, self.x_invalid_data)\n if self.output_filename:\n plt.savefig(self.output_filename)\n print(\"Result written to %s\" % self.output_filename)\n else:\n plt.show()\n\nclass Signals:\n\n '''\n Parses the command line options which signals should be plotted\n and saves the corresponding values in Graph objects.\n Automatically inserts None values as specified by break_time.\n Plots the values using matplotlib.pyplot.\n '''\n\n # added between signal names used as default ylabel\n YLABEL_SEP = ', '\n\n # before re.escape\n SEP_SUBPLOT = '-'\n SEP_AXES = ','\n\n SEP_FMT = ':'\n FMT_STEM = '|'\n\n # after re.escape\n SEP_SG = re.escape('.')\n\n WILDCARD_MANY = re.escape('*')\n WILDCARD_ONE = re.escape('?')\n\n COLOR_INVALID_SYNTAX = '#ff0000'\n COLOR_UNKNOWN_FRAMES = '#ffab00'\n COLOR_INVALID_DATA = '#ff00ff'\n ERROR_LINEWIDTH = 1\n\n FIRST_SUBPLOT = 1\n FIRST_AXIS = 0\n\n # ------- initialization -------\n\n def __init__(self, signals, case_sensitive, break_time, global_subplot_args, auto_color_ylabels):\n self.args = signals\n self.global_subplot_args = global_subplot_args\n self.signals = []\n self.values = {}\n self.re_flags = 0 if case_sensitive else re.I\n self.break_time = break_time\n self.break_time_uninit = True\n self.subplot = self.FIRST_SUBPLOT\n self.subplot_axis = self.FIRST_AXIS\n self.subplot_args = dict()\n self.subplot_argparser = argparse.ArgumentParser()\n self.subplot_argparser.add_argument('signals', nargs='*')\n add_subplot_options(self.subplot_argparser)\n\n i0 = 0\n while True:\n try:\n i1 = signals.index(self.SEP_SUBPLOT, i0)\n except ValueError:\n i1 = None\n\n try:\n i12 = signals.index(self.SEP_AXES, i0)\n except ValueError:\n i12 = None\n if i1 is None or i12 is not None and i12 < i1:\n i1 = i12\n\n subplot_signals = signals[i0:i1]\n subplot_args = self.subplot_argparser.parse_args(subplot_signals)\n if auto_color_ylabels and subplot_args.color is None:\n subplot_args.color = \"C%s\" % self.subplot_axis\n self.subplot_args[(self.subplot, self.subplot_axis)] = subplot_args\n self._ylabel = \"\"\n for sg in subplot_args.signals:\n self.add_signal(sg)\n if subplot_args.ylabel is None and self._ylabel:\n subplot_args.ylabel = self._ylabel\n\n if i1 is None:\n break\n\n if signals[i1] == self.SEP_SUBPLOT:\n self.subplot += 1\n self.subplot_axis = self.FIRST_AXIS\n else:\n self.subplot_axis += 1\n i0 = i1 + 1\n\n if not self.signals:\n self.add_signal('*')\n\n self.compile_reo()\n\n def init_break_time(self, datatype):\n if self.break_time <= 0:\n self.break_time = None\n elif datatype == datetime.datetime:\n self.half_break_time = datetime.timedelta(seconds=self.break_time/2)\n self.break_time = datetime.timedelta(seconds=self.break_time)\n else:\n self.half_break_time = self.break_time / 2\n self.break_time_uninit = False\n\n def add_signal(self, signal):\n if self.SEP_FMT in signal:\n signal, fmt = signal.split(self.SEP_FMT, 1)\n if fmt.startswith(self.FMT_STEM):\n fmt = fmt[len(self.FMT_STEM):]\n plt_func = 'stem'\n else:\n plt_func = 'plot'\n else:\n fmt = ''\n plt_func = 'plot'\n\n if self._ylabel:\n self._ylabel += self.YLABEL_SEP\n self._ylabel += signal\n\n signal = re.escape(signal)\n if self.SEP_SG not in signal:\n signal = self.WILDCARD_MANY + self.SEP_SG + signal\n signal = signal.replace(self.WILDCARD_MANY, '.*')\n signal = signal.replace(self.WILDCARD_ONE, '.')\n signal += '$'\n reo = re.compile(signal, self.re_flags)\n\n sgo = Signal(reo, self.subplot, self.subplot_axis, plt_func, fmt)\n self.signals.append(sgo)\n\n def compile_reo(self):\n self.reo = re.compile('|'.join(sg.reo.pattern for sg in self.signals), re.I)\n\n # ------- while reading data -------\n\n def add_value(self, signal, x, y):\n if not self.is_displayed_signal(signal):\n return\n\n if signal not in self.values:\n graph = Graph()\n self.values[signal] = graph\n else:\n graph = self.values[signal]\n last_x = graph.x[-1]\n if self.break_time_uninit:\n self.init_break_time(type(x))\n if self.break_time and last_x + self.break_time < x:\n x_break = last_x + self.half_break_time\n graph.x.append(x_break)\n graph.y.append(None)\n graph.x.append(x)\n graph.y.append(y)\n\n def is_displayed_signal(self, signal):\n return self.reo.match(signal)\n\n # ------- at end -------\n\n SUBPLOT_DIRECT_NAMES = ('title', 'ylabel')\n def plot(self, xlabel, x_invalid_syntax, x_unknown_frames, x_invalid_data):\n self.default_xlabel = xlabel\n splot = None\n last_subplot = self.FIRST_SUBPLOT - 1\n last_axis = None\n axis_format_uninitialized = True\n sorted_signal_names = sorted(self.values.keys())\n self.legend_handles = []\n self.legend_labels = []\n for sgo in self.signals:\n if sgo.subplot > last_subplot:\n if splot is None:\n axes = None\n else:\n axes = splot.axes\n self.finish_subplot(splot, self.subplot_args[(last_subplot, last_axis)])\n\n splot = plt.subplot(self.subplot, 1, sgo.subplot, sharex=axes)\n\n last_subplot = sgo.subplot\n last_axis = sgo.axis\n elif sgo.axis > last_axis:\n self.finish_axis(splot, self.subplot_args[(last_subplot, last_axis)])\n splot = splot.twinx()\n last_axis = sgo.axis\n\n plotted = False\n for signal_name in sorted_signal_names:\n graph = self.values[signal_name]\n if not sgo.match(signal_name):\n continue\n if graph.plotted_signal:\n if not self.is_replotting_desired(sgo, graph.plotted_signal):\n continue\n else:\n graph.plotted_signal = sgo\n\n x = graph.x\n y = graph.y\n if axis_format_uninitialized and x:\n if isinstance(x[0], float):\n splot.axes.xaxis.set_major_formatter(lambda x,pos: str(datetime.timedelta(seconds=x)))\n axis_format_uninitialized = False\n l = getattr(splot, sgo.plt_func)(x, y, sgo.fmt, label=signal_name)\n color = self.subplot_args[(sgo.subplot, sgo.axis)].color\n if color is not None and self.contains_no_color(sgo.fmt):\n for p in l:\n p.set_color(color)\n plotted = True\n\n if not plotted:\n print(\"WARNING: signal %r with format %r was not plotted.\" % (sgo.reo.pattern, sgo.fmt))\n\n self.plot_error(splot, x_invalid_syntax, 'invalid syntax', self.COLOR_INVALID_SYNTAX)\n self.plot_error(splot, x_unknown_frames, 'unknown frames', self.COLOR_UNKNOWN_FRAMES)\n self.plot_error(splot, x_invalid_data, 'invalid data', self.COLOR_INVALID_DATA)\n self.finish_subplot(splot, self.subplot_args[(last_subplot, last_axis)])\n\n def finish_axis(self, splot, subplot_args):\n kw = {key:val for key,val in vars(subplot_args).items() if val is not None and key in self.SUBPLOT_DIRECT_NAMES}\n for key in self.SUBPLOT_DIRECT_NAMES:\n if key not in kw:\n val = getattr(self.global_subplot_args, key)\n if val is not None:\n kw[key] = val\n if kw:\n splot.set(**kw)\n\n if subplot_args.xlabel is not None:\n xlabel = subplot_args.xlabel\n elif self.global_subplot_args.xlabel is not None:\n xlabel = self.global_subplot_args.xlabel\n else:\n xlabel = self.default_xlabel\n splot.set_xlabel(xlabel)\n\n if subplot_args.ymin is None:\n subplot_args.ymin = self.global_subplot_args.ymin\n if subplot_args.ymax is None:\n subplot_args.ymax = self.global_subplot_args.ymax\n if subplot_args.ymin is not None or subplot_args.ymax is not None:\n splot.axes.set_ylim(subplot_args.ymin, subplot_args.ymax)\n\n if subplot_args.color is not None:\n splot.yaxis.label.set_color(subplot_args.color)\n splot.tick_params(axis='y', which='both', colors=subplot_args.color)\n\n handles, labels = splot.get_legend_handles_labels()\n self.legend_handles.extend(handles)\n self.legend_labels.extend(labels)\n\n def finish_subplot(self, splot, subplot_args):\n self.finish_axis(splot, subplot_args)\n splot.legend(self.legend_handles, self.legend_labels)\n self.legend_handles = list()\n self.legend_labels = list()\n\n def contains_no_color(self, fmt):\n for c in fmt:\n if c in PYPLOT_BASE_COLORS:\n return False\n return True\n\n def plot_error(self, splot, xs, label, color):\n if xs:\n label += \" (%s)\" % len(xs)\n xs = iter(xs)\n splot.axvline(next(xs), color=color, linewidth=self.ERROR_LINEWIDTH, label=label)\n for x in xs:\n splot.axvline(x, color=color, linewidth=self.ERROR_LINEWIDTH)\n\n def is_replotting_desired(self, current_signal, previously_plotted_signal):\n if current_signal.reo.pattern == previously_plotted_signal.reo.pattern:\n # if the user bothers to type out the same regex twice\n # it is probably intended to be plotted twice\n return True\n if '.' not in current_signal.reo.pattern:\n # if the user bothers to type out a complete signal name without wildcards\n # he/she probably means to plot this signal even if it has been plotted already\n return True\n\n return False\n\n\nclass Signal:\n\n '''\n Stores meta information about signals to be plotted:\n - a regex matching all signals it refers to\n - the format how it should be plotted\n - the subplot in which to display the signal\n\n It does *not* store the values to be plotted.\n They are stored in Graph.\n Signal and Graph have a one-to-many-relationship.\n '''\n\n # ------- initialization -------\n\n def __init__(self, reo, subplot, axis, plt_func, fmt):\n self.reo = reo\n self.subplot = subplot\n self.axis = axis\n self.plt_func = plt_func\n self.fmt = fmt\n\n # ------- while reading data -------\n\n def match(self, signal):\n return self.reo.match(signal)\n\nclass Graph:\n\n '''\n A container for the values to be plotted.\n The corresponding signal names are the keys in Signals.values.\n The format how to plot this data is stored in Signals.signals (a list of Signal objects).\n\n plotted_signal stores a Signal object with which this graph has been plotted already\n to avoid undesired replotting of the same data in case the user gives two regex\n matching the same signal, one more specific to match a certain signal with a special format\n and one more generic matching the rest with another format.\n '''\n\n __slots__ = ('x', 'y', 'plotted_signal')\n\n def __init__(self):\n self.x = []\n self.y = []\n self.plotted_signal = None\n\n\nclass RawDescriptionArgumentDefaultsHelpFormatter(\n argparse.RawDescriptionHelpFormatter, argparse.ArgumentDefaultsHelpFormatter):\n pass\n\n\ndef add_subparser(subparsers):\n '''\n Is called from ../__init__.py.\n It adds the options for this subprogram to the argparse parser.\n It sets the entry point for this subprogram by setting a default values for func.\n '''\n decode_parser = subparsers.add_parser(\n 'plot',\n description=__doc__,\n formatter_class=RawDescriptionArgumentDefaultsHelpFormatter)\n decode_parser.add_argument(\n '-c', '--no-decode-choices',\n action='store_true',\n help='Do not convert scaled values to choice strings.')\n decode_parser.add_argument(\n '-e', '--encoding',\n help='File encoding of dbc file.')\n decode_parser.add_argument(\n '--no-strict',\n action='store_true',\n help='Skip database consistency checks.')\n decode_parser.add_argument(\n '-m', '--frame-id-mask',\n type=Integer(0),\n help=('Only compare selected frame id bits to find the message in the '\n 'database. By default the candump and database frame ids must '\n 'be equal for a match.'))\n decode_parser.add_argument(\n '-I', '--case-sensitive',\n action='store_true',\n help='Match the signal names case sensitive.')\n decode_parser.add_argument(\n '-l', '--line-numbers',\n action='store_true',\n help='Use line numbers instead of time stamps on the horizontal axis (useful with `candump -td`).')\n decode_parser.add_argument(\n '-t', '--break-time',\n default=100,\n type=float,\n help=('If the time distance between two consecutive signals is longer than this value '\n 'the line in the plot will be interrupted. The value is given in seconds '\n '(if timestamps are used) or input lines (if line numbers are used). '\n '-1 means infinite. '))\n\n decode_parser.add_argument(\n '--show-invalid-syntax',\n action='store_true',\n help='Show a marker for lines which could not be parsed. This implies -l.')\n decode_parser.add_argument(\n '--show-unknown-frames',\n action='store_true',\n help='Show a marker for messages which are not contained in the database file.')\n decode_parser.add_argument(\n '--show-invalid-data',\n action='store_true',\n help='Show a marker for messages with data which could not be parsed.')\n decode_parser.add_argument(\n '-s', '--show-errors',\n action='store_true',\n help='Show all error messages in the plot. This is an abbreviation for all --show-* options. This implies -l.')\n\n decode_parser.add_argument(\n '--ignore-invalid-syntax',\n action='store_true',\n help='Don\\'t print an error message for lines which could not be parsed.')\n decode_parser.add_argument(\n '--ignore-unknown-frames',\n action='store_true',\n help='Don\\'t print an error message for messages which are not contained in the database file.')\n decode_parser.add_argument(\n '--ignore-invalid-data',\n action='store_true',\n help='Don\\'t print an error message for messages with data which could not be parsed.')\n decode_parser.add_argument(\n '-q', '--quiet',\n action='store_true',\n help='Don\\'t print any error messages. This is an abbreviation for all --ignore-* options.')\n\n decode_parser.add_argument(\n '-o', '--output-file',\n help='A file to write the plot to instead of displaying it in a window.')\n\n decode_parser.add_argument(\n '-ss', '--start',\n help='A start time or line number. Everything before is ignored. '\n 'This filters the lines/messages to be processed. It does *not* set the minimum value of the x-axis.')\n decode_parser.add_argument(\n '-to', '--stop',\n help='An end time or line number. Everything after is ignored. '\n 'This filters the lines/messages to be processed. It does *not* set the maximum value of the x-axis.')\n\n decode_parser.add_argument(\n '--style',\n help='The matplotlib style to be used.')\n decode_parser.add_argument(\n '--list-styles',\n action='store_true',\n help='Print all available matplotlib styles without drawing a plot.')\n decode_parser.add_argument(\n '-ac', '--auto-color-ylabels',\n action='store_true',\n help='This is equivalent to applying --color C0 to the first y-axis, --color C1 to the second and so on.')\n\n decode_parser.add_argument(\n 'database',\n help='Database file.')\n decode_parser.add_argument(\n 'signals',\n nargs='*',\n help='The signals to be plotted.')\n decode_parser.set_defaults(func=_do_decode)\n\n subplot_arggroup = decode_parser.add_argument_group('subplot arguments',\n '''\\\nThe following options can be used to configure the subplots/axes.\nIf they shall apply to a specific subplot/axis they must be placed among the signals for that subplot/axis and a -- must mark the end of the global optional arguments.\nOtherwise they are used as default value for each subplot/axis.\n''')\n add_subplot_options(subplot_arggroup)\n\ndef add_subplot_options(arg_group):\n arg_group.add_argument('--title')\n arg_group.add_argument('--color',\n help='The color to be used for the y-label and the signals (unless a different color is given for the signal). '\n 'All string formats explained in the following link are allowed: https://matplotlib.org/tutorials/colors/colors.html')\n arg_group.add_argument('--xlabel')\n arg_group.add_argument('--ylabel')\n arg_group.add_argument('--ymin', type=float)\n arg_group.add_argument('--ymax', type=float)\n return arg_group\n" ]
[ [ "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.style.use", "matplotlib.pyplot.savefig" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
juancanete86/Nuclei-Competition
[ "d66df6f79ca55f63b99ccd870886718450bc5403" ]
[ "Nuclei_keras.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 25 11:42:23 2018\n\n@author: jcanete\n\"\"\"\n\nimport os\nimport sys\nimport random\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\nfrom tqdm import tqdm\nfrom itertools import chain\nfrom skimage.io import imread, imshow, imread_collection, concatenate_images\nfrom skimage.transform import resize\nfrom skimage.morphology import label\n\nfrom keras.models import Model, load_model\nfrom keras.layers import Input\nfrom keras.layers.core import Lambda\nfrom keras.layers.convolutional import Conv2D\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras import backend as K\n\nfrom ConvModel import ConvModel\n\nimport tensorflow as tf\n#from tensorflow.python.client import device_lib\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\nsess = tf.Session(config=config)\n#sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n\n#print (device_lib.list_local_devices())\n\ntf.test.gpu_device_name()\n\n# Set some parameters\nIMG_WIDTH = 256\nIMG_HEIGHT = 256\nIMG_CHANNELS = 3\ncurrent_path = os.getcwd()\ndsb_data_dir = os.path.join(current_path, \"Resources\")\nTRAIN_PATH = os.path.join(dsb_data_dir, 'stage1_train')\nTEST_PATH = os.path.join(dsb_data_dir, 'stage1_test')\n\nwarnings.filterwarnings('ignore', category=UserWarning, module='skimage')\nseed = 42\nrandom.seed = seed\nnp.random.seed = seed\n\n# Get train and test IDs\ntrain_ids = next(os.walk(TRAIN_PATH))[1]\ntest_ids = next(os.walk(TEST_PATH))[1]\n\n# Get and resize train images and masks\nX_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)\nY_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\nprint('Getting and resizing train images and masks ... ')\nsys.stdout.flush()\nfor n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):\n # path = TRAIN_PATH + id_\n path = os.path.join(TRAIN_PATH, id_)\n \n img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]\n img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)\n X_train[n] = img\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n for mask_file in next(os.walk(path + '/masks/'))[2]:\n mask_ = imread(path + '/masks/' + mask_file)\n mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant', \n preserve_range=True), axis=-1)\n mask = np.maximum(mask, mask_)\n Y_train[n] = mask\n\n# Get and resize test images\nX_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)\nsizes_test = []\nprint('Getting and resizing test images ... ')\nsys.stdout.flush()\nfor n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):\n # path = TEST_PATH + id_\n path = os.path.join(TEST_PATH, id_)\n img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]\n sizes_test.append([img.shape[0], img.shape[1]])\n img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)\n X_test[n] = img\n\nprint('Done!')\n\n# Check if training data looks all right\nix = random.randint(0, len(train_ids))\nimshow(X_train[ix])\nplt.show()\nimshow(np.squeeze(Y_train[ix]))\nplt.show()\n\n# Define IoU metric\ndef mean_iou(y_true, y_pred):\n prec = []\n for t in np.arange(0.5, 0.95, 0.05):\n y_pred_ = tf.to_int32(y_pred > t)\n score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2, y_true)\n K.get_session().run(tf.local_variables_initializer())\n with tf.control_dependencies([up_opt]):\n score = tf.identity(score)\n prec.append(score)\n return K.mean(K.stack(prec), axis=0)\n\n# Build U-Net model\ninputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))\ns = Lambda(lambda x: x / 255) (inputs)\n\nconv_model = ConvModel.ConvModel(inputs, s)\n\n# Ready the model\nconv_model.u_net()\noutputs = Conv2D(1, (1, 1), activation='sigmoid') (conv_model.model)\n\nmodel = Model(inputs=[inputs], outputs=[outputs])\nmodel.compile(optimizer='nadam', loss='binary_crossentropy', metrics=[mean_iou])\nmodel.summary()\n\n\n# Fit model\nearlystopper = EarlyStopping(patience=7, verbose=1)\ncheckpointer = ModelCheckpoint('model-dsbowl2018.h5', verbose=1, save_best_only=True)\nresults = model.fit(X_train, Y_train, validation_split=0.1, batch_size=32, epochs=150, \n callbacks=[earlystopper, checkpointer])\n\n# Predict on train, val and test\nmodel = load_model('model-dsbowl2018.h5', custom_objects={'mean_iou': mean_iou})\npreds_train = model.predict(X_train[:int(X_train.shape[0]*0.9)], verbose=1)\npreds_val = model.predict(X_train[int(X_train.shape[0]*0.9):], verbose=1)\npreds_test = model.predict(X_test, verbose=1)\n\n# Threshold predictions\npreds_train_t = (preds_train > 0.5).astype(np.uint8)\npreds_val_t = (preds_val > 0.5).astype(np.uint8)\npreds_test_t = (preds_test > 0.5).astype(np.uint8)\n\n# Create list of upsampled test masks\npreds_test_upsampled = []\nfor i in range(len(preds_test)):\n preds_test_upsampled.append(resize(np.squeeze(preds_test[i]), \n (sizes_test[i][0], sizes_test[i][1]), \n mode='constant', preserve_range=True))\n\n# Perform a sanity check on some random training samples\nix = random.randint(0, len(preds_train_t))\nimshow(X_train[ix])\nplt.show()\nimshow(np.squeeze(Y_train[ix]))\nplt.show()\nimshow(np.squeeze(preds_train_t[ix]))\nplt.show()\n\n# Perform a sanity check on some random validation samples\nix = random.randint(0, len(preds_val_t))\nimshow(X_train[int(X_train.shape[0]*0.9):][ix])\nplt.show()\nimshow(np.squeeze(Y_train[int(Y_train.shape[0]*0.9):][ix]))\nplt.show()\nimshow(np.squeeze(preds_val_t[ix]))\nplt.show()\n\n\n# Run-length encoding stolen from https://www.kaggle.com/rakhlin/fast-run-length-encoding-python\ndef rle_encoding(x):\n dots = np.where(x.T.flatten() == 1)[0]\n run_lengths = []\n prev = -2\n for b in dots:\n if (b>prev+1): run_lengths.extend((b + 1, 0))\n run_lengths[-1] += 1\n prev = b\n return run_lengths\n\ndef prob_to_rles(x, cutoff=0.5):\n lab_img = label(x > cutoff)\n for i in range(1, lab_img.max() + 1):\n yield rle_encoding(lab_img == i)\n\n\nnew_test_ids = []\nrles = []\nfor n, id_ in enumerate(test_ids):\n rle = list(prob_to_rles(preds_test_upsampled[n]))\n rles.extend(rle)\n new_test_ids.extend([id_] * len(rle))\n\n\n# Create submission DataFrame\nsub = pd.DataFrame()\nsub['ImageId'] = new_test_ids\nsub['EncodedPixels'] = pd.Series(rles).apply(lambda x: ' '.join(str(y) for y in x))\nsub.to_csv('sub-dsbowl2018-1.csv', index=False)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "tensorflow.metrics.mean_iou", "numpy.maximum", "pandas.Series", "tensorflow.test.gpu_device_name", "tensorflow.local_variables_initializer", "tensorflow.control_dependencies", "numpy.arange", "numpy.squeeze", "tensorflow.identity", "pandas.DataFrame", "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.to_int32", "matplotlib.pyplot.show", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
arthus701/algopy
[ "1e2430f803289bbaed6bbdff6c28f98d7767835c", "1e2430f803289bbaed6bbdff6c28f98d7767835c", "1e2430f803289bbaed6bbdff6c28f98d7767835c", "1e2430f803289bbaed6bbdff6c28f98d7767835c" ]
[ "experimental/tests/scalar_reverse.py", "experimental/tests/trash/unit_test_with_sympy.py", "algopy/globalfuncs.py", "algopy/__init__.py" ]
[ "#!/usr/bin/env python\n\ntry:\n\tfrom reverse_mode import *\nexcept:\n\timport sys\n\tsys.path = ['..'] + sys.path\n\tfrom reverse_mode import *\n\n#############################################################\n# TESTING CLASS TC\n#############################################################\n# TESTING ALL FUNCTIONS FOR BASIC FUNCTIONALITY\n\n\n# testing the __init_function\ndef test_constructor_single_direction_list_as_input():\n\tinputlist = [3.]\n\ta = Tc(inputlist)\n\tassert a.t0 == 3.\n\tassert numpy.prod(a.tc[:] == numpy.array([inputlist[1:]]).T)\n\t\n\tinputlist = [3.,1.,2.]\n\ta = Tc(inputlist)\n\tassert a.t0 == 3.\n\tassert numpy.prod(a.tc[:] == numpy.array([inputlist[1:]]).T)\n\ndef test_constructor_single_direction_array_as_input():\n\tinputarray = numpy.array([3.,1.,2.])\n\ta = Tc(inputarray)\n\tassert a.t0 == 3.\n\tassert numpy.prod(a.tc[:] == array([inputarray[1:]]).T)\n\ndef test_constructor_single_direction_variable_input_length():\n\ta = Tc(3.)\n\tassert a.t0 == 3.\n\tassert numpy.prod(a.tc[:] == numpy.array([[]]).T)\n\n\t# Todo: variable inputlength!\n\tassert True\n\n\n# incremental operators\ndef test_incremental_addition_single_direction_Tc_Tc_same_order():\n\tD = 4\n\tinputarray1 = numpy.array([[1.* i for i in range(D)]]).T\n\tinputarray2 = numpy.array([[1. + i for i in range(D)]]).T\n\tinputarray3 = inputarray1.copy() #need to copy since Tc constructor does not copy the memory\n\n\ta = Tc(inputarray1)\n\tb = Tc(inputarray2)\n\n\ta += b\n\tassert a.t0 == 1.\n\tassert numpy.prod(a.tc == (inputarray3[1:] + inputarray2[1:]))\n\ndef test_incremental_addition_multiple_directions_with_constant():\n\tD = 4\n\tt0 = 2.\n\ttc = array([[3,5,7],[23,43,45]])\n\ta = Tc(t0,tc)\n\ta += 2.\n\tassert a.t0 == 4.\n\tassert numpy.prod(a.tc == tc)\n\t\ndef test_incremental_addition_single_direction_Tc_Tc_different_order():\n\tD = 4\n\tE = 7\n\tG = min(D,E)\n\tinputarray1 = numpy.array([[1.* i for i in range(D)]]).T\n\tinputarray2 = numpy.array([[1. + i for i in range(E)]]).T\n\tinputarray3 = inputarray1.copy() #need to copy since Tc constructor does not copy the memory\n\n\ta = Tc(inputarray1)\n\tb = Tc(inputarray2)\n\n\ta += b\n\tassert a.t0 == 1.\n\tassert numpy.prod(a.tc[:G-1] == (inputarray3[1:G] + inputarray2[1:G]))\n\tassert numpy.prod(a.tc[G-1:] == inputarray2[G:])\n\n\ndef test_incremental_addition_multiple_directions_Tc_Tc_same_order():\n\tD = 4\n\tNdir = 3\n\tinputarray1 = numpy.array([[1.* i + D*j for i in range(D)] for j in range(Ndir) ]).T\n\tinputarray2 = numpy.array([[1. + i + D*j for i in range(D)] for j in range(Ndir)]).T\n\tinputarray3 = inputarray1.copy() #need to copy since Tc constructor does not copy the memory\n\n\ta = Tc(3.,inputarray1)\n\tb = Tc(7.,inputarray2)\n\n\ta += b\n\n\tprint('inputarray3=\\n',inputarray3)\n\tprint('inputarray2=\\n',inputarray2)\n\tprint('inputarray3[1:] + inputarray2[1:]=\\n',inputarray3[:] + inputarray2[:])\n\tassert a.t0 == 10.\n\tassert numpy.prod(a.tc == (inputarray3[:] + inputarray2[:]))\n\ndef test_incremental_substraction_multiple_directions_Tc_Tc_different_order():\n\tD = 4\n\tE = 7\n\tG = min(D,E)\n\tNdir = 3\n\tinputarray1 = numpy.array([[1.* i + D*j for i in range(D)] for j in range(Ndir) ]).T\n\tinputarray2 = numpy.array([[1. + i + D*j for i in range(E)] for j in range(Ndir)]).T\n\tinputarray3 = inputarray1.copy() #need to copy since Tc constructor does not copy the memory\n\n\ta = Tc(3.,inputarray1)\n\tb = Tc(7.,inputarray2)\n\n\ta -= b\n\n\tprint('inputarray3=\\n',inputarray3)\n\tprint('inputarray2=\\n',inputarray2)\n\tprint('inputarray3[:G] - inputarray2[:G]=\\n',inputarray3[:G] - inputarray2[:G])\n\tprint('a.tc[G:]=\\n',a.tc[G:])\n\tprint('-inputarray2[G:]=\\n',-inputarray2[G:])\n\tassert a.t0 == -4.\n\tassert numpy.prod(a.tc[:G] == (inputarray3[:G] - inputarray2[:G]))\n\tassert numpy.prod(a.tc[G:] == -inputarray2[G:])\n\n\ndef test_incremental_multiplication_single_direction_Tc_Tc_same_order():\n\tinputarray1 = numpy.array([[0.,1.,2.]]).T\n\tinputarray2 = numpy.array([[7.,11.,13.]]).T\n\tinputarray3 = inputarray1.copy() #need to copy since Tc constructor does not copy the memory\n\n\ta = Tc(inputarray1)\n\tb = Tc(inputarray2)\n\n\ta *= b\n\tassert a.t0 == 0.\n\tassert a.tc[0] == ( inputarray3[0,0] * inputarray2[1,0] + inputarray3[1,0] * inputarray2[0,0] )\n\tassert a.tc[1] == ( inputarray3[0,0] * inputarray2[2,0]\n\t + inputarray3[1,0] * inputarray2[1,0]\n\t\t\t\t\t + inputarray3[2,0] * inputarray2[0,0] )\n\n\t\t\t\t\t \ndef test_incremental_multiplication_single_direction_Tc_Tc_different_order():\n\tinputarray1 = numpy.array([[0.,1.,2.]]).T\n\tinputarray2 = numpy.array([[7.,11.]]).T\n\tinputarray3 = inputarray1.copy() #need to copy since Tc constructor does not copy the memory\n\n\ta = Tc(inputarray1)\n\tb = Tc(inputarray2)\n\n\ta *= b\n\tassert a.t0 == 0.\n\tassert a.tc[0] == ( inputarray3[0,0] * inputarray2[1,0] + inputarray3[1,0] * inputarray2[0,0] )\n\tassert a.tc[1] == ( inputarray3[1,0] * inputarray2[1,0]\n\t\t\t\t\t + inputarray3[2,0] * inputarray2[0,0] )\n\ndef test_incremental_multiplication_multiple_directions_Tc_Tc_same_order():\n\tinputarray1 = numpy.array([[0.,1.,2.],[3.,4.,5.],[2345.,12.,34.]])\n\tinputarray2 = numpy.array([[7.,11.,13.],[1.,2.,4.],[32.,4.,13.]])\n\tinputarray3 = inputarray1.copy() #need to copy since Tc constructor does not copy the memory\n\n\ta = Tc(3.,inputarray1)\n\tb = Tc(7.,inputarray2)\n\n\ta *= b\n\n\tprint('inputarray3=\\n',inputarray3)\n\tprint('inputarray2=\\n',inputarray2)\n\tprint('3. * inputarray2[0,:] + inputarray3[0,:] * 7.=\\n',3. * inputarray2[0,:] + inputarray3[0,:] * 7.)\n\tprint('a.tc[0,:]=\\n', a.tc[0,:])\n\t\n\tassert a.t0 == 21.\n\tassert numpy.prod(a.tc[0,:] == ( 3. * inputarray2[0,:] + inputarray3[0,:] * 7. ))\n\tassert numpy.prod(a.tc[1,:] == ( 3. * inputarray2[1,:]\n\t + inputarray3[0,:] * inputarray2[0,:]\n\t\t\t\t\t + inputarray3[1,:] * 7. )\n\t\t\t\t\t )\n\tassert numpy.prod(a.tc[2,:] == (\n\t 3. * inputarray2[2,:]\n\t + inputarray3[0,:] * inputarray2[1,:]\n\t\t\t\t\t + inputarray3[1,:] * inputarray2[0,:]\n\t\t\t\t\t + inputarray3[2,:] * 7. )\n\t\t\t\t\t )\n\ndef test_incremental_multiplication_multiple_directions_Tc_scalar():\n\tt0 = 2.\n\ttc = array([[1.,2,3],[4,5,6]])\n\ta = Tc(t0,tc.copy())\n\ta *= 2.\n\tprint('a=',a)\n\tprint('a.tc=',a.tc)\n\tprint('2*tc=',2.*tc)\n\tassert a == 4.\n\tassert prod(a.tc == 2.*tc)\n\n\n\t\t\t\t\t \n\ndef test_incremental_division_single_direction_Tc_Tc_same_order():\n\tinputarray1 = numpy.array([[1.,1.,2.]]).T\n\tinputarray2 = numpy.array([[7.,11.,13.]]).T\n\tinputarray3 = inputarray1.copy() #need to copy since Tc constructor does not copy the memory\n\n\ta = Tc(inputarray1)\n\tb = Tc(inputarray2)\n\n\ta /= b\n\tprint('a.tc=\\n',a.tc)\n\tprint('a.tc true=\\n', '[',( 1./inputarray2[0,0] *( inputarray3[1,0] - a.t0 * inputarray2[1,0] )),',',( 1./inputarray2[0,0] *( inputarray3[2,0] - a.t0 * inputarray2[2,0] - a.tc[0] * inputarray2[1,0] )), ']')\n\tassert a.t0 == inputarray3[0,0]/inputarray2[0,0]\n\tassert abs(a.tc[0] - ( 1./inputarray2[0,0] *( inputarray3[1,0] - a.t0 * inputarray2[1,0] )))<10**(-8)\n\tassert abs(a.tc[1] - ( 1./inputarray2[0,0] *( inputarray3[2,0] - a.t0 * inputarray2[2,0] - a.tc[0] * inputarray2[1,0] )))<10**(-8)\n\ndef test_incremental_division_single_direction_Tc_Tc_different_order():\n\tinputarray1 = numpy.array([[1.,1.,2.]]).T\n\tinputarray2 = numpy.array([[7.,11.]]).T\n\tinputarray3 = inputarray1.copy() #need to copy since Tc constructor does not copy the memory\n\n\ta = Tc(inputarray1)\n\tb = Tc(inputarray2)\n\n\ta /= b\n\tprint('a.tc=\\n',a.tc)\n\tprint('a.tc true=\\n', '[',( 1./inputarray2[0,0] *( inputarray3[1,0] - a.t0 * inputarray2[1,0] )),',',( 1./inputarray2[0,0] *( inputarray3[2,0] - a.tc[0] * inputarray2[1,0] )), ']')\n\tassert a.t0 == inputarray3[0,0]/inputarray2[0,0]\n\tassert abs(a.tc[0] - ( 1./inputarray2[0,0] *( inputarray3[1,0] - a.t0 * inputarray2[1,0] )))<10**(-8)\n\tassert abs(a.tc[1] - ( 1./inputarray2[0,0] *( inputarray3[2,0] - a.tc[0] * inputarray2[1,0] )))<10**(-8)\n\n\n\n\n\n\n\n\n\n# binary operators\ndef test_operators_single_direction_Tc_Tc_same_order():\n\tD = 4\n\tinputarray1 = numpy.array([1.* i+12 for i in range(D)])\n\tinputarray2 = numpy.array([1. + i for i in range(D)])\n\ta = Tc(inputarray1)\n\tb = Tc(inputarray2)\n\n\t# functional test\n\tc = a+b\n\tc = a-b\n\tc = a*b\n\tc = a/b\n\n\t# identity test\n\tc = a-a\n\tassert c.t0 == 0\n\tassert numpy.prod(c.tc == 0)\n\n\tc = a/a\n\tassert c.t0 == 1\n\tassert numpy.prod(c.tc == 0)\n\n\n\ndef test_addition_single_direction_Tc_Tc_different_order():\n\tD = 4\n\tE = 7\n\tG = min(D,E) - 1 # first element is t0, thefefore subtract 1\n\tinputarray1 = numpy.array([[1.* i for i in range(D)]]).T\n\tinputarray2 = numpy.array([[1. + i for i in range(E)]]).T\n\n\ta = Tc(inputarray1)\n\tb = Tc(inputarray2)\n\n\tc = a+b\n\n\tprint('a.tc=',a.tc)\n\tprint('b.tc=',b.tc)\n\tprint('c.tc=',c.tc)\n\tprint('a.tc[:G]=',a.tc[:G])\n\tprint('b.tc[:G]=',b.tc[:G])\n\n\tassert c.t0 == a.t0 + b.t0\n\tassert numpy.prod(c.tc[:G] == (a.tc[:G] + b.tc[:G]))\n\tassert numpy.prod(c.tc[G:] == (b.tc[G:]))\n\ndef test_addition_multiple_directions_Tc_float():\n\tt0 = 2.\n\ttc = array([[1.,2,3],[23,43,51]])\n\ta = Tc(t0,tc)\n\tc = a+2\n\td = 2+a\n\n\tprint('c.t0=',c.t0)\n\tprint('t0=',t0+2)\n\tprint('c.tc=',c.tc)\n\tprint('tc=',tc)\n\tassert c.t0 == 4.\n\tassert prod(c.tc == tc)\n\n\tassert d.t0 == 4.\n\tassert prod(d.tc == tc)\n\n\t\n\ndef test_division_single_direction_Tc_Tc_different_order():\n\ta = Tc(1,[[0.]])\n\tb = Tc(3.,[[5.],[7.]])\n\n\tc = a/b\n\n\tprint(' c.tc[0,0]=', c.tc[0,0])\n\tprint('-(a.t0/b.t0**2)*b.tc[0,0]=',-(a.t0/b.t0**2)*b.tc[0,0])\n\tprint('c.tc[1,0]=',c.tc[1,0])\n\tprint('( - (a.t0/b.t0**2)*b.tc[1,0] + 2*(a.t0/b.t0**3)*b.tc[1,0]**2 )=',( - (a.t0/b.t0**2)*b.tc[1,0] + (a.t0/b.t0**3)*b.tc[0,0]**2 ))\n\t\n\tassert c.t0 == a.t0 / b.t0\n\tassert abs(c.tc[0,0] + (a.t0/b.t0**2)*b.tc[0,0]) < 10**-6\n\tassert abs(c.tc[1,0] -( - (a.t0/b.t0**2)*b.tc[1,0] + (a.t0/b.t0**3)*b.tc[0,0]**2 )) < 10**-6\n\n# unary operators\n\ndef test_sqrt():\n\ta = Tc(2.25)\n\tb = sqrt(a)\n\tprint(a,b)\n\tassert sqrt(a.t0) == b.t0\n\n\ta = Tc([2.25,1.,0.])\n\tb = sqrt(a)\n\tprint(a,b)\n\n\tprint(0.5*a.t0**(-0.5))\n\tprint(-0.25*a.t0**(-1.5)/2.)\n\t\n\tassert sqrt(a.t0) == b.t0\n\tassert 0.5*a.t0**(-0.5) == b.tc[0,0]\n\tassert abs(-0.25*a.t0**(-1.5) - 2*b.tc[1,0])<10**-3\n\ndef test_integer_power():\n\ta = Tc([2.25,1.,0.])\n\tb = a**3\n\tc = a*a*a\n\tassert b.t0 == c.t0\n\tassert prod(b.tc[:] == c.tc[:])\n\n\ndef test_exponential():\n\ta = Tc([2.25,1.,0.])\n\tb = exp(a)\n\tprint(b)\n\tassert b.t0 == exp(a.t0)\n\tassert b.tc[0,0] == exp(a.t0)\n\tassert 2*b.tc[1,0] == exp(a.t0)\n\ndef test_logarithm():\n\ta = Tc([23.,1.,0.])\n\tb = log(a)\n\n\tprint(b)\n\tassert b.t0 == log(a.t0)\n\tassert b.tc[0,0] == 1./a.t0\n\tassert 2*b.tc[1,0] == -1./(a.t0*a.t0)\n\ndef test_sin_and_cos():\n\ta = Tc([23.,1.,0.])\n\ts = sin(a)\n\tc = cos(a)\n\n\tassert s.t0 == sin(a.t0)\n\tassert c.t0 == cos(a.t0)\n\n\tassert s.tc[0,0] == cos(a.t0)\n\tassert c.tc[0,0] == -sin(a.t0)\n\n\tassert 2*s.tc[1,0] == -sin(a.t0)\n\tassert 2*c.tc[1,0] == -cos(a.t0)\n\n\n# conditional operators\ndef test_lt_conditional():\n\ta = Tc(1,[[1,2,3]])\n\tb = Tc(1,[[1,2,3]])\n\tc = Tc(2,[[1,2,3]])\n\td = Tc(-1,[[1,2,3]])\n\n\t# < operator\n\tassert not (a<b)\n\tassert not (b<a)\n\tassert not (a<d)\n\tassert (a<c)\n\n\t# <= operator\n\tassert (a<=b)\n\tassert (b<=a)\n\tassert (a<=c)\n\tassert not (a<=d)\n\n\t# == operator\n\tassert (a==b)\n\tassert not (a==c)\n\tassert not (a==d)\n\n\t# != operator\n\tassert not (a!=b)\n\tassert (a!=c)\n\tassert (a!=d)\n\n\t# >= operator\n\tassert (a>=b)\n\tassert (b>=a)\n\tassert (a>=d)\n\tassert not (a>=c)\n\n\t# > operator\n\tassert not (a>b)\n\tassert not (b>a)\n\tassert (a>d)\n\tassert not (a>c)\n\n#############################################################\n# TESTING CLASS Function AND CGraph\n#############################################################\n\n\ndef test_graph_addition_with_constant():\n\tcg = CGraph()\n\tt0 = 2\n\ttc = array([[0,1],[1,0]])\n\ta = Function(Tc(t0,tc))\n\tb = a + 2\n\tc = 2 + a\n\n\tassert b.x.t0 == 4\n\tassert c.x.t0 == 4\n\n\tassert prod(b.x.tc == tc)\n\tassert prod(c.x.tc == tc)\n\t\n\ndef test_plotting_simple_cgraph():\n\tcg = CGraph()\n\tx = Function(Tc([11.,1.]))\n\ty = Function(Tc([13.,1.]))\n\tz = Function(Tc([13.,1.]))\n\tf = (x * y) + z*(x+y*(x*z))\n\tcg.independentFunctionList = [x,y]\n\tcg.dependentFunctionList = [z]\n\tcg.plot('trash/cg_example.png',method='circo')\n\t# no assert, this is only a functionality test,\n\ndef test_forward_mode():\n\t# first compute correct result by Taylor propagation\n\tx = Tc([11.,1.])\n\ty = Tc([13.,2.])\n\tz = Tc([13.,3.])\n\tf_tc = (x * y) + z*(x+y*(x*z))\n\n\tcg = CGraph()\n\tx = Function(Tc([11.]))\n\ty = Function(Tc([13.,1.]))\n\tz = Function(Tc([13.,1.,3,5,23]))\n\tf = (x * y) + z*(x+y*(x*z))\n\tcg.independentFunctionList = [x,y,z]\n\tcg.dependentFunctionList = [f]\n\tcg.forward([Tc([11.,1.]), Tc([13.,2.]), Tc([13.,3.])])\n\n\tprint(f)\n\tprint(f_tc)\n\tassert f.x.t0 == f_tc.t0\n\tassert f.x.tc[0] == f_tc.tc[0]\n\ndef test_reverse_mode_first_order():\n\timport sympy\n\tx,y,z = sympy.symbols('x','y','z')\n\tfs = (x * y) + z*(x+y*(x*z))\n\tgsx = fs.diff(x)\n\tgsy = fs.diff(y)\n\tgsz = fs.diff(z)\n\tdfdx = lambda x,y,z: eval(gsx.__str__())\n\tdfdy = lambda x,y,z: eval(gsy.__str__())\n\tdfdz = lambda x,y,z: eval(gsz.__str__())\n\n\tcg = CGraph()\n\tx = Function(Tc([11.]))\n\ty = Function(Tc([13.]))\n\tz = Function(Tc([17.]))\n\tf = (x * y) + z*(x+y*(x*z))\n\tcg.independentFunctionList = [x,y,z]\n\tcg.dependentFunctionList = [f]\n\tcg.reverse([Tc([1.])])\n\t#print f\n\t#print x\n\t#print y\n\t#print z\n\n\tprint('x.xbar.t0=',x.xbar.t0)\n\tprint('dfdx(11.,13.,17.)', dfdx(11.,13.,17.))\n\tassert x.xbar.t0 == dfdx(11.,13.,17.)\n\tassert y.xbar.t0 == dfdy(11.,13.,17.)\n\tassert z.xbar.t0 == dfdz(11.,13.,17.)\n\n\ndef test_reverse_mode_second_order():\n\t\"\"\"computing first column of the Hessian\"\"\"\n\timport sympy\n\tx = sympy.symbols('x')\n\tfs = x*x\n\tgsxx = fs.diff(x).diff(x)\n\t\n\td2fdxdx = lambda x,y,z: eval(gsxx.__str__())\n\n\tcg = CGraph()\n\tx = Function(Tc([11.,1.]))\n\tf = x*x\n\tcg.independentFunctionList = [x]\n\tcg.dependentFunctionList = [f]\n\tcg.reverse([Tc(1.)])\n\n\tprint(cg)\n\n\n\tprint('x.xbar.tc[0,0]=',x.xbar.tc[0,0])\n\tprint('d2fdxdx(11.,13.,17.)=',d2fdxdx(11.,13.,17.))\n\n\tassert x.xbar.tc[0,0] == d2fdxdx(11.,13.,17.)\n\n\n\ndef test_reverse_mode_second_order_two_variables():\n\t\"\"\"computing first column of the Hessian\"\"\"\n\timport sympy\n\tx,y = sympy.symbols('x','y')\n\tfs = y*(y*x)\n\tgsxx = fs.diff(x).diff(x)\n\tgsxy = fs.diff(x).diff(y)\n\t\n\td2fdxdx = lambda x,y: eval(gsxx.__str__())\n\td2fdxdy = lambda x,y: eval(gsxy.__str__())\n\n\n\tcg = CGraph()\n\tx = Function(Tc([11.,1.]))\n\ty = Function(Tc([13.]))\n\tf = y*x*y\n\tcg.independentFunctionList = [x,y]\n\tcg.dependentFunctionList = [f]\n\n\tprint(cg)\n\tcg.reverse([Tc(1.)])\n\n\tprint(cg)\n\n\n\tprint('x.xbar.tc[0,0]=',x.xbar.tc[0,0])\n\tprint('d2fdxdx(11.,13.,17.)=',d2fdxdx(11.,13.))\n\n\tprint('y.xbar.tc[0,0]=',y.xbar.tc[0,0])\n\tprint('d2fdxdy(11.,13.,17.)',d2fdxdy(11.,13.))\n\n\tassert x.xbar.tc[0,0] == d2fdxdx(11.,13.)\n\tassert y.xbar.tc[0,0] == d2fdxdy(11.,13.)\n\n\ndef test_reverse_mode_second_order_three_variables():\n\t\"\"\"computing first column of the Hessian\"\"\"\n\timport sympy\n\tx,y,z = sympy.symbols('x','y','z')\n\tfs = (x * y) + z*(x+y*(x*z))\n\tgsxx = fs.diff(x).diff(x)\n\tgsxy = fs.diff(x).diff(y)\n\tgsxz = fs.diff(x).diff(z)\n\t\n\td2fdxdx = lambda x,y,z: eval(gsxx.__str__())\n\td2fdxdy = lambda x,y,z: eval(gsxy.__str__())\n\td2fdxdz = lambda x,y,z: eval(gsxz.__str__())\n\n\tcg = CGraph()\n\tx = Function(Tc([11.,1.]))\n\ty = Function(Tc([13.]))\n\tz = Function(Tc([17.]))\n\tf = (x * y) + z*(x+y*(x*z))\n\tcg.independentFunctionList = [x,y,z]\n\tcg.dependentFunctionList = [f]\n\n\tprint(cg)\n\tcg.reverse([Tc(1.)])\n\tprint(cg)\n\n\n\tprint('x.xbar.tc[0,0]=',x.xbar.tc[0,0])\n\tprint('d2fdxdx(11.,13.,17.)=',d2fdxdx(11.,13.,17.))\n\n\tprint('y.xbar.tc[0,0]=',y.xbar.tc[0,0])\n\tprint('d2fdxdy(11.,13.,17.)',d2fdxdy(11.,13.,17.))\n\n\tprint('z.xbar.tc[0,0]=',z.xbar.tc[0,0])\n\tprint('d2fdxdz(11.,13.,17.)',d2fdxdz(11.,13.,17.))\n\n\tassert x.xbar.tc[0,0] == d2fdxdx(11.,13.,17.)\n\tassert y.xbar.tc[0,0] == d2fdxdy(11.,13.,17.)\n\tassert z.xbar.tc[0,0] == d2fdxdz(11.,13.,17.)\n\ndef test_inner_product_gradient():\n\timport numpy\n\tA = numpy.array([[11., 3.],[3.,17.]])\n\tdef fun(x):\n\t\treturn 0.5* numpy.dot(x, numpy.dot(A,x))\n\tcg = CGraph()\n\tx = numpy.array([Function(Tc(2.)), Function(Tc(7.))])\n\tf = fun(x)\n\tcg.independentFunctionList = x\n\tcg.dependentFunctionList = [f]\n\n\tcg.reverse([Tc(1.)])\n\n\ty = numpy.dot(A,[2.,7.])\n\n\tprint('x[0].xbar=',x[0].xbar)\n\tprint('x[1].xbar=',x[1].xbar)\n\tprint('y[0]=',y[0])\n\tprint('y[1]=',y[1])\n\n\tassert x[0].xbar.t0 == y[0]\n\tassert x[1].xbar.t0 == y[1]\n\t\ndef test_vector_forward_inner_product_hessian():\n\timport numpy\n\tA = numpy.array([[11., 3.],[3.,17.]])\n\tdef fun(x):\n\t\treturn 0.5* numpy.dot(x, numpy.dot(A,x))\n\tcg = CGraph()\n\tx = numpy.array([Function(Tc(2.,[[1.],[0.]])), Function(Tc(7.,[[0.],[1.]]))])\n\tf = fun(x)\n\tcg.independentFunctionList = x\n\tcg.dependentFunctionList = [f]\n\n\tcg.reverse([Tc(1.)])\n\n\tprint('x[0].xbar.tc=',x[0].xbar.tc)\n\tprint('x[1].xbar.tc=',x[1].xbar.tc)\n\tprint('A=',A)\n\n\tassert numpy.prod(x[0].xbar.tc[:,0] == A[:,0])\n\tassert numpy.prod(x[1].xbar.tc[:,0] == A[:,1])\n\n\tcg.plot('trash/inner_product.png',method='circo')\n\ndef test_conditionals():\n\tdef ge(a,b):\n\t\tif a>=b:\n\t\t\treturn a*b\n\t\telse:\n\t\t\treturn a/b\n\n\tdef gt(a,b):\n\t\tif a>b:\n\t\t\treturn a*b\n\t\telse:\n\t\t\treturn a/b\n\n\tdef le(a,b):\n\t\tif a<=b:\n\t\t\treturn a*b\n\t\telse:\n\t\t\treturn a/b\n\n\tdef lt(a,b):\n\t\tif a<b:\n\t\t\treturn a*b\n\t\telse:\n\t\t\treturn a/b\n\t\t\n\tdef eq(a,b):\n\t\tif a==b:\n\t\t\treturn a*b\n\t\telse:\n\t\t\treturn a/b\n\n\tdef ne(a,b):\n\t\tif a!=b:\n\t\t\treturn a*b\n\t\telse:\n\t\t\treturn a/b\n\t\n\t\t\n\tcg = CGraph()\n\ta = Function(Tc([1.,2.,3.]))\n\tb = Function(Tc([34.,2.]))\n\tc = Function(Tc([34.,3.]))\n\n\tc = ge(a,b)\n\td = ge(b,a)\n\tassert c.x.t0 == 1./34\n\tassert d.x.t0 == 34\n\n\tc = gt(a,b)\n\td = gt(b,a)\n\tassert c.x.t0 == 1./34\n\tassert d.x.t0 == 34\n\n\n\tc = le(b,a)\n\td = le(a,b)\n\tassert c.x.t0 == 34\n\tassert d.x.t0 == 34\n\n\tc = lt(b,a)\n\td = lt(a,b)\n\tassert c.x.t0 == 34\n\tassert d.x.t0 == 34\n\n\tc = eq(a,b)\n\td = eq(b,c)\n\tassert c.x.t0 == 1./34\n\tassert d.x.t0 == 34**2\n\n\tc = ne(a,b)\n\td = ne(b,c)\n\tassert c.x.t0 == 34\n\tassert d.x.t0 == 1\n\t\n\n\n\ndef test_graph__sqrt():\n\tcg = CGraph()\n\tx = Function(Tc([121.,1.,0.]))\n\tf = sqrt(x)\n\t#f = exp(cos(sin(x)+y)+x)\n\tcg.independentFunctionList = [x]\n\tcg.dependentFunctionList = [f]\n\tcg.reverse([Tc(1)])\n\n\tprint('x.x=\\n',x.x)\n\tprint('x.xbar=\\n',x.xbar)\n\tprint('x.bar.tc[0,0]=',x.xbar.tc[0,0])\n\tprint('0.25 * x.x.t0**(-1.5)=',0.25 * x.x.t0**(-1.5))\n\n\tprint('2* x.xbar.tc[1,0]=',2* x.xbar.tc[1,0])\n\tprint(' 3./8 * x.x.t0**-2.5=', 3./8 * x.x.t0**-2.5)\n\n\tprint(cg)\n\t\n\tassert x.xbar.t0 == 0.5 / sqrt(x.x.t0)\n\tassert abs(x.xbar.tc[0,0] + 0.25 * x.x.t0**(-1.5)) < 10**-6\n\tassert abs( 2* x.xbar.tc[1,0] - 3./8 * x.x.t0**-2.5) < 10**-6\n\n\ndef test_graph_sin():\n\tcg = CGraph()\n\tx = Function(Tc([1.,1.,0.]))\n\tf = sin(x)\n\t#f = exp(cos(sin(x)+y)+x)\n\tcg.independentFunctionList = [x]\n\tcg.dependentFunctionList = [f]\n\tcg.reverse([Tc(1.)])\n\n\tprint('x.xbar.t0=',x.xbar.t0)\n\tprint('cos(x.x.t0)=',cos(x.x.t0))\n\n\tprint(' x.xbar.tc[0,0]=', x.xbar.tc[0,0])\n\tprint('-sin(x.x.t0)=',-sin(x.x.t0))\n\n\tassert x.xbar.t0 == cos(x.x.t0)\n\tassert x.xbar.tc[0,0] == -sin(x.x.t0)\n\n\tassert 2.*x.xbar.tc[1,0] == -cos(x.x.t0)\n\t\n\t\ndef test_graph_exp():\n\tcg = CGraph()\n\tx = Function(Tc([1.,1.,0.]))\n\tf = exp(x)\n\tcg.independentFunctionList = [x]\n\tcg.dependentFunctionList = [f]\n\tcg.reverse([Tc(1.)])\n\n\tassert x.xbar.t0 == exp(x.x.t0)\n\tassert x.xbar.tc[0,0] == exp(x.x.t0)\n\tassert 2*x.xbar.tc[1,0] == exp(x.x.t0)\n\n\ndef test_graph_plotting_all_implemented_functions():\n\tA = array([[11., 3.],[3.,17.]])\n\tdef fun(x):\n\t\treturn 0.5* dot(x, dot(A,x))\n\t\n\tcg = CGraph()\n\tx = Function(Tc([1.,1.,0.]))\n\ty = Function(Tc([5.,1.,0.]))\n\tg = fun([x,y])\n\tf = sqrt(exp(cos(sin(x)/y)-x))\n\tf = f*g\n\tcg.independentFunctionList = [x]\n\tcg.dependentFunctionList = [f]\n\tcg.reverse([Tc(1)])\n\tcg.plot('trash/cgraph_all_implemented_functions.png',method='dot')\n\tcg.plot('trash/cgraph_all_implemented_functions.svg',method='dot')\n\n\n#############################################################\n# TESTING HIGH LEVEL FUNCTIONS\n#############################################################\n\ndef test_gradient_by_taping_then_gradient_from_graph():\n\n\t# defining the function\n\tA = array([[11., 3.],[3.,17.]])\n\tdef fun(x):\n\t\treturn 0.5* dot(x, dot(A,x))\n\n\t# tape the function\n\tx = array([1.,2.])\n\tcg = tape(fun,x)\n\n\t# compute gradient\n\tg = gradient_from_graph(cg)\n\n\tg_true = dot(A,x)\n\tassert prod(g == g_true)\n\ndef test_hessian():\n\t# defining the function\n\tA = array([[11., 3.],[3.,17.]])\n\tdef fun(x):\n\t\treturn 0.5* dot(x, dot(A,x))\n\n\t# compute the Hessian\n\tx = array([3.,7.])\n\tH = hessian(fun,x)\n\tprint(H)\n\n\tassert prod(H == A)\n\t\n", "#!/usr/bin/env python\n#######################################################\n# This is a Unit Test t that makes use of the Python #\n# Module Sympy. #\n#######################################################\n\nimport sympy as sym\nfrom numpy import array, zeros, ones, shape\nfrom numpy.random import random\nfrom numpy.linalg import norm\nfrom forward_mode import *\n\nN = 4\nD = 3\nM = N + 3\n\nxs = array([[sym.Symbol('x%d%d'%(n,d)) for d in range(D)] for n in range(N)])\n# computing the function f: R^(NxD) -> R symbolically\nfs = 0\nfor n in range(1,N):\n\tfor m in range(n):\n\t\ttmp = 0\n\t\tfor d in range(D):\n\t\t\ttmp += (xs[n,d] - xs[m,d])**2\n\t\ttmp = sym.sqrt(tmp)\n\t\tfs += tmp\n\n# computing the gradient symbolically\ndfs = array([[sym.diff(fs, xs[n,d]) for d in range(D)] for n in range(N)])\n\n# computing the Hessian symbolically\nddfs = array([[[[ sym.diff(dfs[m,e], xs[n,d]) for d in range(D)] for n in range(N)] for e in range(D) ] for m in range(N)])\n\n\n# function f\ndef f(x):\n\tretval = 0.\n\tfor n in range(1,N):\n\t\tfor m in range(n):\n\t\t\tretval += 1./ norm(x[n,:] - x[m,:])\n\treturn retval\n\ndef df(x):\n\tg = zeros(shape(x),dtype=float)\n\tfor n in range(N):\n\t\tfor d in range(D):\n\t\t\tfor m in range(N):\n\t\t\t\tif n != m:\n\t\t\t\t\tg[n,d] += (x[n,d] - x[m,d])/norm(x[n,:]-x[m,:])\n\treturn g\n\ndef ddf(x):\n\tN,D = shape(x)\n\tH = zeros((N,D,N,D),dtype=float)\n\tfor n in range(N):\n\t\tfor d in range(D):\n\t\t\tfor m in range(N):\n\t\t\t\tfor e in range(D):\n\t\t\t\t\tfor l in range(N):\n\t\t\t\t\t\tif l==n:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tH[n,d,m,e] -= (( (m==n) * (d==e) - (m==l)*(d==e) ) - 3* (x[n,d] - x[l,d])/norm(x[n,:]-x[l,:])**2 * ( (n==m) - (m==l))*( x[n,e] - x[l,e]))/norm(x[n,:] - x[l,:])**3\n\treturn H\n\ndef sym_df(x):\n\tsymdict = dict()\n\tfor n in range(N):\n\t\tfor d in range(D):\n\t\t\tsymdict[xs[n,d]] = x[n,d]\n\treturn array([[dfs[n,d].subs_dict(symdict).evalf() for d in range(D)] for n in range(N)])\n\ndef sym_ddf(x):\n\tsymdict = dict()\n\tfor n in range(N):\n\t\tfor d in range(D):\n\t\t\tsymdict[xs[n,d]] = x[n,d]\n\treturn array([[[[ ddfs[m,e,n,d].subs_dict(symdict).evalf() for d in range(D)] for n in range(N)] for e in range(D)] for m in range(N)],dtype=float)\n\ndef ad_df(x):\n\treturn gradient(f,x)\n\ndef ad_ddf(x):\n\treturn hessian(f,x)\n\n\n# point at which the derivatives should be evaluated\nx = random((N,D))\nprint('\\n\\n')\nprint('Sympy vs Hand Derived Gradient check (should be almost zero)')\nprint(df(x) - sym_df(x))\nprint('Sympy vs Ad Derive Gradient check (should be almost zero)')\nprint(ad_df(x) - sym_df(x))\n\n#print '\\n\\n'\n#print 'Hessian check (should be almost zero)'\n#print ddf(x) - sym_ddf(x)\n\n\n\n\n\n", "import math\nimport numpy\n\nimport string\nfrom . import utils\nfrom algopy import UTPM\nfrom algopy import Function\n\n# override numpy definitions\nnumpy_function_names = [\n 'exp', 'expm1', 'log', 'log1p', 'sqrt', 'pow',\n 'sin', 'cos', 'tan',\n 'arcsin', 'arccos', 'arctan',\n 'sinh', 'cosh', 'tanh',\n 'sign', 'absolute', 'square', 'negative', 'reciprocal',\n 'minimum', 'maximum',\n #'clip',\n 'trace', 'diag',\n 'triu', 'tril', 'reshape',\n 'tile',\n 'conjugate']\n\n\nfunction_template = string.Template('''\ndef $function_name(*args, **kwargs):\n \"\"\"\n generic implementation of $function_name\n\n this function calls, depending on the input arguments,\n either\n\n * numpy.$function_name\n * numpy.linalg.$function_name\n * args[i].__class__\n\n \"\"\"\n case,arg = 0,0\n for na,a in enumerate(args):\n if hasattr(a.__class__, '$function_name'):\n case = 1\n arg = na\n break\n\n if case==1:\n return getattr(args[arg].__class__, '$function_name')(*args, **kwargs)\n\n elif case==0:\n return $namespace.__getattribute__('$function_name')(*args, **kwargs)\n\n else:\n return $namespace.__getattribute__('$function_name')(*args, **kwargs)\n''')\n\nfor function_name in numpy_function_names:\n exec(function_template.substitute(function_name=function_name,\n namespace='numpy'))\n\n\ndef sum(x, axis=None, dtype=None, out=None):\n \"\"\" generic sum function\n calls either numpy.sum or Function.sum resp. UTPM.sum depending on\n the input\n \"\"\"\n\n if isinstance(x, numpy.ndarray) or numpy.isscalar(x):\n return numpy.sum(x, axis=axis, dtype=dtype, out = out)\n\n elif isinstance(x, UTPM) or isinstance(x, Function):\n return x.sum(axis = axis, dtype = dtype, out = out)\n\n else:\n raise ValueError('don\\'t know what to do with this input!')\nsum.__doc__ += numpy.sum.__doc__\n\n\ndef real(x):\n \"\"\"\n algopy equivalent to numpy.real\n \"\"\"\n\n if isinstance(x, numpy.ndarray) or numpy.isscalar(x):\n return numpy.real(x)\n\n elif isinstance(x, UTPM) or isinstance(x, Function):\n return x.__class__.real(x)\n\n else:\n raise ValueError('don\\'t know what to do with this input!')\nreal.__doc__ += numpy.real.__doc__\n\n\ndef imag(x):\n \"\"\"\n algopy equivalent to numpy.imag\n \"\"\"\n\n if isinstance(x, numpy.ndarray) or numpy.isscalar(x):\n return numpy.imag(x)\n\n elif isinstance(x, UTPM) or isinstance(x, Function):\n return x.__class__.imag(x)\n\n else:\n raise ValueError('don\\'t know what to do with this input!')\nimag.__doc__ += numpy.imag.__doc__\n\n\n\ndef prod(x):\n \"\"\" generic sum function\n calls either numpy.sum or Function.sum resp. UTPM.sum depending on\n the input\n \"\"\"\n\n if isinstance(x, numpy.ndarray) or numpy.isscalar(x):\n return numpy.prod(x)\n\n elif isinstance(x, UTPM) or isinstance(x, Function):\n return x.prod()\n\n else:\n raise ValueError('don\\'t know what to do with this input!')\nsum.__doc__ += numpy.sum.__doc__\n\n\ndef logdet(x):\n \"\"\"\n computes log(det(x))\n \"\"\"\n\n if isinstance(x, numpy.ndarray) or numpy.isscalar(x):\n return numpy.linalg.slogdet(x)[1]\n\n elif isinstance(x, UTPM) or isinstance(x, Function):\n return x.__class__.logdet(x)\n\n else:\n raise ValueError('don\\'t know what to do with this input!')\n\n\n\n\ndef coeff_op(x, sl, shp):\n return x.coeff_op(sl, shp)\n\n\ndef init_UTPM_jacobian(x):\n # print 'type(x)=', type(x)\n if isinstance(x, Function):\n return x.init_UTPM_jacobian()\n\n elif isinstance(x, numpy.ndarray):\n return UTPM.init_jacobian(x)\n\n elif isinstance(x, UTPM):\n # print x.data.shape\n return UTPM.init_UTPM_jacobian(x.data[0,0])\n\n else:\n raise ValueError('don\\'t know what to do with this input!')\n\n\ndef extract_UTPM_jacobian(x):\n if isinstance(x, Function):\n return x.extract_UTPM_jacobian()\n\n elif isinstance(x, UTPM):\n return UTPM.extract_UTPM_jacobian(x)\n else:\n raise ValueError('don\\'t know what to do with this input!')\n\n\ndef zeros( shape, dtype=float, order = 'C'):\n \"\"\"\n generic implementation of numpy.zeros\n \"\"\"\n\n if numpy.isscalar(shape):\n shape = (shape,)\n\n if isinstance(dtype,type):\n return numpy.zeros(shape, dtype=dtype,order=order)\n\n elif isinstance(dtype, numpy.ndarray):\n return numpy.zeros(shape,dtype=dtype.dtype, order=order)\n\n elif isinstance(dtype, UTPM):\n D,P = dtype.data.shape[:2]\n tmp = numpy.zeros((D,P) + shape ,dtype = dtype.data.dtype)\n tmp*= dtype.data.flatten()[0]\n return dtype.__class__(tmp)\n\n elif isinstance(dtype, Function):\n return dtype.pushforward(zeros, [shape, dtype, order])\n\n else:\n return numpy.zeros(shape,dtype=type(dtype), order=order)\nzeros.__doc__ += numpy.zeros.__doc__\n\n\ndef ones( shape, dtype=float, order = 'C'):\n \"\"\"\n generic implementation of numpy.ones\n \"\"\"\n\n if numpy.isscalar(shape):\n shape = (shape,)\n\n if isinstance(dtype,type):\n return numpy.ones(shape, dtype=dtype,order=order)\n\n\n elif isinstance(dtype, numpy.ndarray):\n return numpy.ones(shape,dtype=dtype.dtype, order=order)\n\n elif isinstance(dtype, UTPM):\n D,P = dtype.data.shape[:2]\n tmp = numpy.zeros((D,P) + shape ,dtype = dtype.data.dtype)\n tmp[0,...] = 1.\n return UTPM(tmp)\n\n elif isinstance(dtype, Function):\n return dtype.pushforward(ones, [shape, dtype, order])\n\n else:\n return numpy.ones(shape,dtype=type(dtype), order=order)\nones.__doc__ += numpy.ones.__doc__\n\n\ndef zeros_like(a, dtype=None, order = 'C'):\n \"\"\"\n generic implementation of numpy.zeros_like\n \"\"\"\n if dtype is None:\n dtype = a\n return zeros( a.shape, dtype=dtype, order = order)\nzeros_like.__doc__ += numpy.zeros_like.__doc__\n\n\ndef ones_like(a, dtype=None, order = 'C'):\n \"\"\"\n generic implementation of numpy.ones_like\n \"\"\"\n if dtype is None:\n dtype = a\n return ones( a.shape, dtype=dtype, order = order)\nones_like.__doc__ += numpy.ones_like.__doc__\n\n\ndef dot(a,b):\n \"\"\"\n Same as NumPy dot but in UTP arithmetic\n \"\"\"\n if isinstance(a,Function) or isinstance(b,Function):\n return Function.dot(a,b)\n\n elif isinstance(a,UTPM) or isinstance(b,UTPM):\n return UTPM.dot(a,b)\n\n else:\n return numpy.dot(a,b)\ndot.__doc__ += numpy.dot.__doc__\n\n\ndef outer(a,b):\n \"\"\"\n Same as NumPy outer but in UTP arithmetic\n \"\"\"\n if isinstance(a,Function) or isinstance(b,Function):\n return Function.outer(a,b)\n\n elif isinstance(a,UTPM) or isinstance(b,UTPM):\n return UTPM.outer(a,b)\n\n else:\n return numpy.outer(a,b)\nouter.__doc__ += numpy.outer.__doc__\n\n\ndef symvec(A, UPLO='F'):\n if isinstance(A, UTPM):\n return UTPM.symvec(A, UPLO=UPLO)\n\n elif isinstance(A, Function):\n return Function.symvec(A, UPLO=UPLO)\n\n elif isinstance(A, numpy.ndarray):\n return utils.symvec(A, UPLO=UPLO)\n\n else:\n raise NotImplementedError('don\\'t know what to do with this instance')\nsymvec.__doc__ = utils.symvec.__doc__\n\n\ndef vecsym(v):\n if isinstance(v, UTPM):\n return UTPM.vecsym(v)\n\n elif isinstance(v, Function):\n return Function.vecsym(v)\n\n elif isinstance(v, numpy.ndarray):\n return utils.vecsym(v)\n\n else:\n raise NotImplementedError('don\\'t know what to do with this instance')\nvecsym.__doc__ = utils.vecsym.__doc__\n\n", "\"\"\"\n=============================================================\nAlgoPy, a library for Automatic Differentation (AD) in Python\n=============================================================\n\nDescription:\n\n AlgoPy allows you to differentiate functions implemented as computer programs\n by using Algorithmic Differentiation (AD) techniques in the forward and\n reverse mode.\n\n The forward mode propagates univariate Taylor polynomials of arbitrary order.\n Hence it is also possible to use AlgoPy to evaluate higher-order derivative tensors.\n\n Speciality of AlgoPy is the possibility to differentiate functions that contain\n matrix functions as +,-,*,/, dot, solve, qr, eigh, cholesky.\n\n\nRationale:\n\n Many programs for scientific computing make use of numerical linear algebra.\n The defacto standard for array manipulations in Python is NumPy.\n AlgoPy allows you to write code that can either be evaluated by NumPy, or with\n AlgoPy with little or no modifications to your code.\n\n Note that this does not mean that any code you wrote can be differentiated with AlgoPy,\n but rather that you can write code that can be evaluated with or without AlgoPy.\n\n\nHow to cite AlgoPy::\n\n @article{Walter2011,\n title = \"Algorithmic differentiation in Python with AlgoPy\",\n journal = \"Journal of Computational Science\",\n volume = \"\",\n number = \"0\",\n pages = \" - \",\n year = \"2011\",\n note = \"\",\n issn = \"1877-7503\",\n doi = \"10.1016/j.jocs.2011.10.007\",\n url = \"http://www.sciencedirect.com/science/article/pii/S1877750311001013\",\n author = \"Sebastian F. Walter and Lutz Lehmann\",\n keywords = \"Automatic differentiation\",\n keywords = \"Cholesky decomposition\",\n keywords = \"Hierarchical approach\",\n keywords = \"Higher-order derivatives\",\n keywords = \"Numerical linear algebra\",\n keywords = \"NumPy\",\n keywords = \"Taylor arithmetic\"\n }\n\n\"\"\"\n\nimport os\n__install_path__ = os.path.realpath(__file__)\n\n\n# check that dependencies are satisfied\n\nfrom ._npversion import NumpyVersion\n\n_min_numpy_version = '1.5.0'\n_preferred_numpy_version = '1.6.2'\n_min_scipy_version = '0.11.0'\n\ntry:\n import numpy\n\n # ignore warnings \"ComplexWarning: Casting complex values to real discards the imaginary part\"\n import warnings\n warnings.simplefilter(\"ignore\", numpy.ComplexWarning)\n\nexcept ImportError as e:\n raise ImportError(\n \"NumPy import error (%s)\\n\"\n \"NumPy is a requirement of AlgoPy.\\n\"\n \"Please install NumPy >= %s\" % (e, _preferred_numpy_version))\n\nif NumpyVersion(numpy.version.version) < _min_numpy_version:\n raise ImportError(\n \"NumPy version %s was detected.\\n\"\n \"Please install NumPy >= %s\" % (\n numpy.version.version, _preferred_numpy_version))\n\ntry:\n import scipy\nexcept ImportError as e:\n raise ImportError(\n \"SciPy import error (%s)\\n\"\n \"SciPy is a requirement of AlgoPy.\\n\"\n \"Please install SciPy >= \" + _min_scipy_version)\n\nif NumpyVersion(scipy.version.version) < _min_scipy_version:\n raise ImportError(\n \"SciPy version %s was detected.\\n\"\n \"Please install SciPy >= %s\" % (\n scipy.version.version, _min_scipy_version))\n\n\n# testing\nfrom numpy.testing import Tester\ntest = Tester().test\n\n# import standard submodules and important classes/functions\nfrom . import tracer\nfrom .tracer import CGraph, Function\n\nfrom . import utpm\nfrom .utpm import UTPM, UTP\n\nfrom . import globalfuncs\nfrom .globalfuncs import *\n\nfrom .compound import *\n\nfrom . import special\n\nfrom . import linalg\nfrom .linalg import *\n\nfrom . import nthderiv\n\nfrom . import fft\n\ntry:\n from . import version\n __version__ = version.version\n\nexcept ImportError:\n __version__ = 'nobuild'\n\n\n\n\n" ]
[ [ "numpy.dot", "numpy.array", "numpy.prod" ], [ "numpy.shape", "numpy.random.random", "numpy.linalg.norm", "numpy.zeros" ], [ "numpy.dot", "numpy.imag", "numpy.linalg.slogdet", "numpy.ones", "numpy.real", "numpy.prod", "numpy.isscalar", "numpy.outer", "numpy.zeros", "numpy.sum" ], [ "numpy.testing.Tester" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
WoutDavid/ST-nextflow-pipeline
[ "8de3da218ec4f10f183e1163fe782c19fd8dd841" ]
[ "src/quality_control/bin/createSpotDetectionQCHTML.py" ]
[ "import json\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport sys\n\n# Argparsing\nargument_index = 1\ntemplate = sys.argv[argument_index]\nargument_index +=1\n\nrecall_json = sys.argv[argument_index]\nargument_index +=1\n\nrecall_plot = sys.argv[argument_index]\nargument_index +=1\n\nprecision_jsons_list = [sys.argv[i] for i in range(argument_index, len(sys.argv))]\n\nprecision_rows_list = []\n# convert jsons back to dicts for html conversion\nfor json_path in precision_jsons_list:\n with open(json_path, 'r') as json_file:\n data = json.load(json_file)\n precision_rows_list.append(data)\nprecision_df = pd.DataFrame(precision_rows_list)\nprecision_df = precision_df.sort_values(by='Round #')\nprecision_html_table = precision_df.to_html(index=False)\n\n# Same for recall json\nrecall_rows_list = []\nwith open(recall_json, 'r') as json_file:\n data=json.load(json_file)\n recall_rows_list.append(data)\nrecall_df = pd.DataFrame(recall_rows_list)\nrecall_html_table = recall_df.to_html(index=False)\n\n\n# Create html\n\nwith open(template, 'r') as template_file:\n contents = template_file.read()\n template_soup = BeautifulSoup(contents, features=\"html.parser\")\n\np_list = template_soup.find_all('p')\np_index = 0\n\n# Read recall table tag\nrecall_soup = BeautifulSoup(recall_html_table, features=\"html.parser\")\ntable_tag = recall_soup.find('table')\np_list[p_index].insert_after(table_tag)\np_index+=1\n\nimage_tag = template_soup.new_tag('img')\nimage_tag['src']= f\"./recall/{recall_plot}\"\nimage_tag['width']= 700\nimage_tag['height']= 500\np_list[p_index].insert_after(image_tag)\np_index+=1\n\n\n\nprecision_soup = BeautifulSoup(precision_html_table, features=\"html.parser\")\ntable_tag = precision_soup.find('table')\np_list[p_index].insert_after(table_tag)\np_index+=1\n\nwith open('spot_detection_qc_report.html', 'w') as result_file:\n result_file.write(str( template_soup ))\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Aria461863631/GraphGallery
[ "7b62f80ab36b29013bea2538a6581fc696a80201" ]
[ "graphgallery/attack/targeted/experimental/sga2.py" ]
[ "import numpy as np\nimport tensorflow as tf\nfrom numba import njit\n\nfrom tensorflow.keras.losses import sparse_categorical_crossentropy\nfrom graphadv.attack.targeted.targeted_attacker import TargetedAttacker\nfrom graphadv.utils.surrogate_utils import train_a_surrogate\n\nfrom graphgallery.nn.layers import SGConvolution\nfrom graphgallery.nn.models import SGC\nfrom graphgallery import tqdm, astensor, ego_graph\n\n\nclass SGA(TargetedAttacker):\n def __init__(self,\n adj,\n x,\n labels,\n idx_train=None,\n hops=2,\n seed=None,\n name=None,\n device='CPU:0',\n surrogate=None,\n surrogate_args={},\n **kwargs):\n\n super().__init__(adj,\n x=x,\n labels=labels,\n seed=seed,\n name=name,\n device=device,\n **kwargs)\n\n if surrogate is None:\n surrogate = train_a_surrogate(self, 'SGC', idx_train,\n **surrogate_args)\n elif not isinstance(surrogate, SGC):\n raise RuntimeError(\n \"surrogate model should be the instance of `graphgallery.nn.SGC`.\"\n )\n\n self.hops = hops\n self.similar_nodes = [\n np.where(labels == class_)[0] for class_ in range(self.num_classes)\n ]\n\n with tf.device(self.device):\n W, b = surrogate.weights\n X = astensor(x)\n self.b = b\n self.XW = X @ W\n self.surrogate = surrogate\n self.SGC = SGConvolution(hops)\n self.loss_fn = sparse_categorical_crossentropy\n\n def reset(self):\n super().reset()\n # for the added self-loop\n self.selfloop_degree = (self.degree + 1.).astype(self.floatx)\n self.adj_flips = {}\n self.pos_dict = None\n self.wrong_label = None\n\n def attack(self,\n target,\n num_budgets=None,\n reduce_nodes=3,\n direct_attack=True,\n structure_attack=True,\n feature_attack=False,\n compute_A_grad=True,\n disable=False):\n\n super().attack(target, num_budgets, direct_attack, structure_attack,\n feature_attack)\n\n logit = self.surrogate.predict(target).ravel()\n top2 = logit.argsort()[-2:]\n self.wrong_label = top2[-1] if top2[-1] != self.target_label else top2[\n 0]\n assert self.wrong_label != self.target_label\n self.subgraph_preprocessing(reduce_nodes)\n\n offset = self.edge_lower_bound\n weights = self.weights\n with tf.device(self.device):\n for _ in tqdm(range(self.num_budgets),\n desc='Peturbing Graph',\n disable=disable):\n gradients = self.compute_gradient(\n compute_A_grad=compute_A_grad)\n gradients *= (-2. * weights) + 1.\n gradients = gradients[offset:]\n sorted_index = tf.argsort(gradients, direction='DESCENDING')\n\n for index in sorted_index:\n index_with_offset = index + offset\n u, v = self.indices[index_with_offset]\n if index_with_offset < self.non_edge_lower_bound and not self.allow_singleton and (\n self.selfloop_degree[u] <= 2\n or self.selfloop_degree[v] <= 2):\n continue\n\n if not self.is_modified(u, v):\n self.adj_flips[(u, v)] = index_with_offset\n self.update_subgraph(u, v, index_with_offset)\n break\n\n def subgraph_preprocessing(self, reduce_nodes):\n\n target = self.target\n wrong_label = self.wrong_label\n # neighbors = self.graph.adj_matrix[target].nonzero()[1]\n neighbors = self.graph.adj_matrix[target].indices\n wrong_label_nodes = self.similar_nodes[wrong_label]\n sub_edges, sub_nodes = self.ego_subgraph()\n sub_edges = np.vstack([sub_edges, sub_edges[:, [1, 0]]])\n\n if self.direct_attack or reduce_nodes is not None:\n influence_nodes = [target]\n wrong_label_nodes = np.setdiff1d(wrong_label_nodes, neighbors)\n else:\n influence_nodes = neighbors\n\n self.construct_sub_adj(influence_nodes, wrong_label_nodes, sub_nodes,\n sub_edges)\n\n if reduce_nodes is not None:\n if self.direct_attack:\n influence_nodes = [target]\n wrong_label_nodes = self.top_k_wrong_labels_nodes(\n k=self.num_budgets)\n\n else:\n influence_nodes = neighbors\n wrong_label_nodes = self.top_k_wrong_labels_nodes(\n k=reduce_nodes)\n\n self.construct_sub_adj(influence_nodes, wrong_label_nodes,\n sub_nodes, sub_edges)\n\n @tf.function\n def SGC_conv(self, XW, adj):\n return self.SGC([XW, adj])\n\n def compute_gradient(self, eps=2.24, compute_A_grad=False):\n\n weights = self.weights\n if not compute_A_grad:\n weights = normalize_GCN(self.indices, weights,\n self.selfloop_degree)\n\n with tf.GradientTape() as tape:\n tape.watch(weights)\n\n if not compute_A_grad:\n adj = tf.sparse.SparseTensor(self.indices, weights,\n self.graph.adj_matrix.shape)\n else:\n weights_norm = normalize_GCN(self.indices, weights,\n self.selfloop_degree)\n adj = tf.sparse.SparseTensor(self.indices, weights_norm,\n self.graph.adj_matrix.shape)\n\n output = self.SGC_conv(self.XW, adj)\n logit = tf.nn.softmax(((output[self.target] + self.b) / eps))\n loss = self.loss_fn(self.target_label, logit) - self.loss_fn(\n self.wrong_label, logit)\n\n gradients = tape.gradient(loss, weights)\n\n return gradients\n\n def ego_subgraph(self):\n return ego_graph(self.graph.adj_matrix, self.target, self.hops)\n\n def construct_sub_adj(self, influence_nodes, wrong_label_nodes, sub_nodes,\n sub_edges):\n length = len(wrong_label_nodes)\n potential_edges = np.vstack([\n np.stack([np.tile(infl, length), wrong_label_nodes], axis=1)\n for infl in influence_nodes\n ])\n\n if len(influence_nodes) > 1:\n # TODO: considering self-loops\n mask = self.graph.adj_matrix[tuple(potential_edges.T)].A1 == 0\n potential_edges = potential_edges[mask]\n\n nodes = np.union1d(sub_nodes, wrong_label_nodes)\n edge_weights = np.ones(sub_edges.shape[0], dtype=self.floatx)\n non_edge_weights = np.zeros(potential_edges.shape[0],\n dtype=self.floatx)\n self_loop_weights = np.ones(nodes.shape[0], dtype=self.floatx)\n self_loop = np.stack([nodes, nodes], axis=1)\n\n self.indices = np.vstack([\n self_loop, potential_edges[:, [1, 0]], sub_edges, potential_edges\n ])\n weights = np.hstack([\n self_loop_weights, non_edge_weights, edge_weights, non_edge_weights\n ])\n self.weights = tf.Variable(weights, dtype=self.floatx)\n self.edge_lower_bound = self_loop_weights.size + non_edge_weights.size\n self.non_edge_lower_bound = self.edge_lower_bound + edge_weights.size\n\n self.n_sub_edges = edge_weights.size // 2\n self.n_non_edges = non_edge_weights.size\n\n def top_k_wrong_labels_nodes(self, k):\n offset = self.non_edge_lower_bound\n weights = self.weights\n with tf.device(self.device):\n gradients = self.compute_gradient()[offset:]\n _, index = tf.math.top_k(gradients, k=k)\n\n wrong_label_nodes = self.indices[:, 1][index.numpy() + offset]\n\n return wrong_label_nodes\n\n def update_subgraph(self, u, v, index):\n weight = 1.0 - self.weights[index]\n degree_delta = 2. * weight - 1.\n if weight > 0:\n inv_index = index - self.n_non_edges - self.n_sub_edges * 2\n else:\n if index >= self.edge_lower_bound + self.n_sub_edges:\n inv_index = index - self.n_sub_edges\n else:\n inv_index = index + self.n_sub_edges\n\n self.weights[index].assign(weight)\n self.weights[inv_index].assign(weight)\n self.selfloop_degree[u] += degree_delta\n self.selfloop_degree[v] += degree_delta\n\n\ndef normalize_GCN(indices, weights, degree):\n row, col = indices.T\n inv_degree = tf.pow(degree, -0.5)\n normed_weights = weights * tf.gather(inv_degree, row) * tf.gather(\n inv_degree, col)\n return normed_weights\n" ]
[ [ "numpy.hstack", "tensorflow.device", "tensorflow.nn.softmax", "tensorflow.Variable", "tensorflow.pow", "numpy.union1d", "numpy.tile", "numpy.stack", "numpy.ones", "numpy.setdiff1d", "tensorflow.gather", "tensorflow.argsort", "tensorflow.sparse.SparseTensor", "tensorflow.math.top_k", "numpy.zeros", "numpy.where", "numpy.vstack", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
hakunanatasha/deeplearning_genetics
[ "f8ae543e04bcacc8a668175b7f9ea2a50b02c5e2" ]
[ "preprocessing/preprocess.py" ]
[ "\"\"\"\n2021.01.31\nType-hinting would make some of these structures clearer.\n\nReturns a dataset of one-hot-encoded sequences of DNA and whether the motif of \ninterest is present or absent in the data.\n\nThis is done after \"datagen\" or \"make_data.sh\" is run.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\n# ML Preprocessing (OHE of data + model selection)\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\n# Sequence padding\nimport torch\nfrom torch.nn.utils.rnn import pad_sequence\n\n\nclass makeOHE:\n \"\"\"\n Make binary one-hot-encoders\n\n Args:\n motif_files: dict(class_label: list_of_data_filepaths)\n alphabet: list[str] Exhaustive list of all letters available in sequences\n\n Takes the dictionary of class labels: list_of_CSV_files.\n Loads data, one-hot-encodes the (biological) sequences.\n Splits into train/test that are equally represented across classes.\n\n Args:\n motif_files: [dict class_label : path_to_class_data]\n alphabet: [list(str)] list of characters that appear in the sequences\n ptest: [float <1] percent of testing split\n pval: [float <1] percent of validation split\n batchfirst: [Bool] if data is returned as Nbatch x Nseq x Nfts\n \"\"\"\n\n def __init__(\n self,\n motif_files,\n alphabet=[\"A\", \"C\", \"G\", \"T\"],\n ptest=0.1,\n pval=0.2,\n seed=1234,\n batchfirst=True,\n pad_value=-1,\n ):\n assert (ptest + pval) < 1, \"p_test + p_val are too big\"\n\n self.motif_files = motif_files\n self.alphabet = sorted(alphabet) # Alphabetically sort labels\n self.alph_dict = {a: idx for idx, a in enumerate(self.alphabet)}\n\n self.ptest = ptest\n self.pval = pval\n self.seed = seed\n self.batchfirst = batchfirst\n self.pad_value = -1\n\n # Load all the CSVs and concatenate each class\n self.setup()\n\n # Encode each class and pad the sequences\n self.OHEncode()\n\n # Create a train/test split for each class\n print(\n \"Train %=\",\n (1 - self.ptest - self.pval),\n \"|| Test %=\",\n self.ptest,\n \"|| Val %=\",\n self.pval,\n )\n self.split_traintest()\n print(\"All Steps Completed!\")\n\n def setup(self):\n \"\"\"\n Loads motif data, gets the length and sets up one-hot-encodes\n \"\"\"\n self._check_input_list()\n self._load_data()\n self._get_length()\n self._setup_encoders()\n\n def OHEncode(self):\n \"\"\"\n Creates a one-hot-encoded vector for each sequence in all classes.\n - First, encodes all classes.\n - Then passes the full dataset to an RNN sequence padder that appends\n pad_value to all missing elements of the sequence and splits on class-size\n\n Returns: dict[Class_Label(str): torch.Tensor[Nbatch x Nseq_max x N_class]]\n \"\"\"\n Nalphabet = len(self.alphabet)\n Nclasses = len(list(self.data.items()))\n\n data = {}\n labels = {}\n countr = 0\n for k, df in self.data.items():\n print(\"Encoding label=\", k, \"|| Classes =\", countr + 1, \"/\", len(self.data))\n countr += 1\n # Get training sequences\n data.update(\n {\n k: [\n self.ohe(df.iloc[idx, :], self.i_encoder, Nalphabet)\n for idx in range(self.N[k])\n ]\n }\n )\n # Get Labels (list of the label repeating 'k' times)\n lval = [k for _ in range(self.N[k])]\n lval = self.ohe(lval, self.o_encoder, Nclasses)\n labels.update({k: lval})\n\n print(\"Making padded data in xdata and labels in ydata\")\n self.xdata = self.add_padding(data, self.N, self.batchfirst, self.pad_value)\n\n self.ydata = labels\n print(\"Completed Encoding!!\")\n\n def split_traintest(self):\n \"\"\"\n Wrapper around stratified-shuffle-split.\n\n Ensures the training/testing split are equally represented\n in the input classes.\n\n Returns xtrain/test ytrain/test assigned\n where x represents the concatenated data and y the concatenated labels\n \"\"\"\n print(\"Splitting Training + Testing\")\n y = np.array([k for k, v in self.N.items() for _ in range(v)])\n X = torch.cat(list(self.xdata.values()))\n labels = torch.from_numpy(np.concatenate(list(self.ydata.values())))\n\n # First, create the Training/ [Validation + Testing] set\n stratSplit = StratifiedShuffleSplit(\n n_splits=1,\n test_size=round(self.ptest + self.pval, 2),\n random_state=self.seed,\n )\n for train_idx, out_idx in stratSplit.split(np.zeros(X.shape[0]), y):\n self.xtrain, xout = X[train_idx], X[out_idx]\n self.ytrain, yout = labels[train_idx], labels[out_idx]\n ytmp = y[out_idx]\n\n # From the reserved data, create validation/testing\n if self.pval != 0:\n print(\"Making Validation + Testing\")\n ptest_2 = self.ptest / (self.ptest + self.pval) # Scaled Ratio\n valSplit = StratifiedShuffleSplit(\n n_splits=1, test_size=ptest_2, random_state=self.seed\n ) # make a new split with remaining data\n for val_idx, test_idx in valSplit.split(np.zeros(xout.shape[0]), ytmp):\n self.xval, self.xtest = xout[val_idx], xout[test_idx]\n self.yval, self.ytest = yout[val_idx], yout[test_idx]\n\n else:\n self.xtest = xout\n self.ytest = yout\n self.xval = None\n self.yval = None\n\n def translate(self, seq):\n \"\"\"\n Given an arbitrary sequence, returns the sequence value.\n\n Args:\n seq - [torch.Tensor] of Nbatches x Nseq x N_alphabet\n \"\"\"\n keep_vals = (seq.sum(axis=1) == 1).tolist() # Find non-padded values\n s = seq[keep_vals, :].argmax(axis=1) # Slice desired positions\n translated_seq = self.i_encoder.inverse_transform(s)\n return \"\".join(translated_seq.tolist())\n\n def _check_input_list(self):\n \"\"\"\n Input dictionary of motif files should be list. Converts.\n \"\"\"\n f = lambda v: v if isinstance(v[1], list) else (v[0], [v[1]])\n self.motif_files = dict(map(f, self.motif_files.items()))\n\n def _load_data(self):\n \"\"\"\n Load the sequence data from the CSV files of motif_files.\n Concatenates the set of CSV files.\n Expects all CSVs to have the format: Seq[str], Motif_1[int],...,Motif_i[int]\n where the column represents the number of times that motif appeared in the sequence.\n \"\"\"\n print(\"Loading CSV\")\n self.data = {\n k: pd.concat([pd.read_csv(fn) for fn in v])\n for k, v in self.motif_files.items()\n }\n self.label_ids = list(self.data.keys())\n\n def _get_length(self):\n \"\"\"\n Get the number of data points.\n\n Returns dict[Class_Label(str): number_of_examples(int)]\n \"\"\"\n self.N = {k: v.shape[0] for k, v in self.data.items()}\n\n def _setup_encoders(self):\n \"\"\"\n Given the alphabet (ATCG or 20 amino acids) set up an encoder\n to account for each dimension.\n\n Then, set up the one-hot encoding s.t. when you see the label,\n you generate a \"1\" in the appropriate dimension.\n \"\"\"\n print(\"Setting up Integer/Label Encoders\")\n self.i_encoder = LabelEncoder().fit(self.alphabet)\n self.o_encoder = LabelEncoder().fit(list(self.data.keys()))\n\n @staticmethod\n def add_padding(data, N, batchfirst=True, pad_value=-1):\n \"\"\"\n Pad all data sequences for each class.\n Collects all datapoints and then splits based on length of class\n\n Inputs:\n data: [Dict[int: dataframe]] dictionary of class label: dataset\n N: [Dict[int: int]] dictionary of class label: number of instances\n batchfirst: [bool] sequence padding order [Nbatch x Nseqlen x Nbases]\n pad_value: [int] value for non-equal lengths\n \"\"\"\n Ns = [0] + np.cumsum(list(N.values())).tolist()\n seqs = []\n pad = {}\n for k, v in data.items():\n seqs += v\n\n # Ensure all sequences are padded to same length\n alldata = pad_sequence([torch.Tensor(i) for i in seqs], batchfirst, pad_value)\n\n # Split on the classes\n Nsplits = [alldata[Ns[i] : Ns[i + 1], :, :] for i in range(len(N))]\n\n pad.update({k: Nsplits[idx] for idx, k in enumerate(data.keys())})\n\n return pad\n\n @staticmethod\n def ohe(row, integer_encoder, dimensions):\n \"\"\"\n One-hot-encodes the alphabet.\n Row is a single row of a CSV file from self.data\n OR a list with the class of interest.\n\n Returns:\n one_hot_encoded - tensor of Nseq x Nbases\n (intA, intB) - number of A and B\n \"\"\"\n if isinstance(row, list) is False:\n seq, intA, intB = row.tolist()\n else:\n seq = row\n intA = intB = None\n\n # Find out which label position it is\n ienc = integer_encoder.transform(list(seq))\n\n # One-hot encode the position on an N x 4 vector\n one_hot_encoded = np.zeros(shape=(len(seq), dimensions))\n one_hot_encoded[list(range(len(ienc))), ienc] = 1\n\n return one_hot_encoded # , (intA, intB)\n" ]
[ [ "pandas.read_csv", "torch.Tensor", "sklearn.model_selection.StratifiedShuffleSplit", "sklearn.preprocessing.LabelEncoder", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
dumpmemory/FLASH-pytorch
[ "8e0d2fd7925c0de9703d666ea2cc004327f6e544" ]
[ "train.py" ]
[ "from flash_pytorch import FLASHTransformer\nfrom flash_pytorch.autoregressive_wrapper import AutoregressiveWrapper\n\nimport random\nimport tqdm\nimport gzip\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, Dataset\n\n# constants\n\nNUM_BATCHES = int(1e5)\nBATCH_SIZE = 4\nGRADIENT_ACCUMULATE_EVERY = 4\nLEARNING_RATE = 2e-4\nVALIDATE_EVERY = 100\nGENERATE_EVERY = 500\nGENERATE_LENGTH = 512\nSEQ_LEN = 1024\n\n# helpers\n\ndef cycle(loader):\n while True:\n for data in loader:\n yield data\n\ndef decode_token(token):\n return str(chr(max(32, token)))\n\ndef decode_tokens(tokens):\n return ''.join(list(map(decode_token, tokens)))\n\n# instantiate GPT-like decoder model\n\nmodel = FLASHTransformer(\n num_tokens = 256,\n dim = 512,\n depth = 8,\n causal = True,\n group_size = 256,\n shift_tokens = True\n)\n\nmodel = AutoregressiveWrapper(model)\nmodel.cuda()\n\n# prepare enwik8 data\n\nwith gzip.open('./data/enwik8.gz') as file:\n X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)\n trX, vaX = np.split(X, [int(90e6)])\n data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)\n\nclass TextSamplerDataset(Dataset):\n def __init__(self, data, seq_len):\n super().__init__()\n self.data = data\n self.seq_len = seq_len\n\n def __getitem__(self, index):\n rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))\n full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()\n return full_seq.cuda()\n\n def __len__(self):\n return self.data.size(0) // self.seq_len\n\ntrain_dataset = TextSamplerDataset(data_train, SEQ_LEN)\nval_dataset = TextSamplerDataset(data_val, SEQ_LEN)\ntrain_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))\nval_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))\n\n# optimizer\n\noptim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)\n\n# training\n\nfor i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):\n model.train()\n\n for __ in range(GRADIENT_ACCUMULATE_EVERY):\n loss = model(next(train_loader))\n loss.backward()\n\n print(f'training loss: {loss.item()}')\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)\n optim.step()\n optim.zero_grad()\n\n if i % VALIDATE_EVERY == 0:\n model.eval()\n with torch.no_grad():\n loss = model(next(val_loader))\n print(f'validation loss: {loss.item()}')\n\n if i % GENERATE_EVERY == 0:\n model.eval()\n inp = random.choice(val_dataset)[:-1]\n prime = decode_tokens(inp)\n print(f'%s \\n\\n %s', (prime, '*' * 100))\n\n sample = model.generate(inp[None, ...], GENERATE_LENGTH)\n output_str = decode_tokens(sample[0])\n print(output_str)\n" ]
[ [ "torch.no_grad", "torch.utils.data.DataLoader", "torch.from_numpy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DBRTII/Deep-NILMtk
[ "cf6c50ec9891c17f9c626e23c0e1b6f488fefcb3", "cf6c50ec9891c17f9c626e23c0e1b6f488fefcb3" ]
[ "deep_nilmtk/data/pre_process/preprocess.py", "deep_nilmtk/models/pytorch/test/test_seq2point.py" ]
[ "import logging\n\nimport numpy as np\nimport pandas as pd\n\nfrom .normalize import normalize\n\ndef pad_data(data, sequence_length, pad_at_begin= False):\n \"\"\"\n Performs data padding for both target and aggregate consumption\n\n :param data: The aggregate power\n :type data: np.array\n :param context_size: The input sequence length\n :type context_size: int\n :param target_size: The target sequence length\n :type target_size: int\n :param pad_at_begin: Specified how the padded values are inserted, defaults to False\n :type pad_at_begin: bool, optional\n :return: The padded aggregate power.\n :rtype: np.array\n \"\"\"\n units_to_pad = 1 + sequence_length // 2\n padding = (sequence_length,) if pad_at_begin else (units_to_pad,units_to_pad+1)\n if data.ndim==1:\n new_mains = np.pad(data.reshape(-1), padding,'constant',constant_values=(0,0))\n return new_mains\n else:\n paddings = np.zeros((units_to_pad, data.shape[1]))\n new_mains = np.concatenate([paddings, data, paddings])\n return new_mains\n\ndef preprocess(mains, norm_type, submains=None, params=None):\n \"\"\"\n Preprocess the main data using normalization\n :param mains: the aggregate power\n :param submains: the power of appliances\n :return: pd.DataFrame, pd.DataFrame, dict\n \"\"\"\n logging.warning(\"Data is preprocessed using default pre-preocessing function\")\n logging.info(f\"Number of data Sources is :{len(mains)} \")\n mains = np.concatenate(mains, axis=0)\n logging.info(f\"Shape of data after concatenation is :{mains.shape} \")\n params, mains = normalize(mains, norm_type, params)\n if submains is not None:\n columns = [app_name for app_name, _ in submains]\n submains = pd.concat([\n pd.concat(targets) for _, targets in submains\n ], axis=1)\n submains.columns = columns\n logging.info(f'The target data contains the following appliances:{submains.columns} with shape {submains.shape}')\n return mains, params, pd.DataFrame(submains)\n\n return mains, params", "import unittest\nimport numpy as np\n\nfrom deep_nilmtk.models.torch.seq2point import Seq2Point\nimport tensorflow as tf\n\nfrom deep_nilmtk.utils.test import assertNumpyArraysEqual\n\nclass TestSeq2Point(unittest.TestCase):\n\n def test_froward(self):\n N = 2\n input_batch = np.random.random(64*125*1).reshape(64,125,1)\n model = Seq2Point(125,N).model\n output = model(input_batch)\n self.assertEqual(output.shape, (input_batch.shape[0], N))\n\n" ]
[ [ "numpy.concatenate", "pandas.concat", "numpy.zeros", "pandas.DataFrame" ], [ "numpy.random.random" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AICPS/roadscene2vec
[ "aea84d3aa0e339a58fd92bbee3140df9fa4abde8", "aea84d3aa0e339a58fd92bbee3140df9fa4abde8" ]
[ "roadscene2vec/learning/model/mrgcn.py", "roadscene2vec/scripts/4_test_model.py" ]
[ "import os\nimport sys\n\nsys.path.append(os.path.dirname(sys.path[0]))\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchnlp.nn import Attention\nfrom torch.nn import Linear, LSTM\nfrom torch_geometric.nn import RGCNConv, TopKPooling, FastRGCNConv\nfrom torch_geometric.nn import global_add_pool, global_mean_pool, global_max_pool\nfrom .rgcn_sag_pooling import RGCNSAGPooling\n\nclass MRGCN(nn.Module):\n \n def __init__(self, config):\n super(MRGCN, self).__init__()\n self.num_features = config.model_configuration['num_of_classes']\n self.num_relations = config.model_configuration['num_relations']\n self.num_classes = config.model_configuration['nclass']\n self.num_layers = config.model_configuration['num_layers'] #defines number of RGCN conv layers.\n self.hidden_dim = config.model_configuration['hidden_dim']\n self.layer_spec = None if config.model_configuration['layer_spec'] == None else list(map(int, config.model_configuration['layer_spec'].split(',')))\n self.lstm_dim1 = config.model_configuration['lstm_input_dim']\n self.lstm_dim2 = config.model_configuration['lstm_output_dim']\n self.rgcn_func = FastRGCNConv if config.model_configuration['conv_type'] == \"FastRGCNConv\" else RGCNConv\n self.activation = F.relu if config.model_configuration['activation'] == 'relu' else F.leaky_relu\n self.pooling_type = config.model_configuration['pooling_type']\n self.readout_type = config.model_configuration['readout_type']\n self.temporal_type = config.model_configuration['temporal_type']\n\n self.dropout = config.model_configuration['dropout']\n self.conv = []\n total_dim = 0\n\n if self.layer_spec == None:\n if self.num_layers > 0:\n self.conv.append(self.rgcn_func(self.num_features, self.hidden_dim, self.num_relations).to(config.model_configuration['device']))\n total_dim += self.hidden_dim\n for i in range(1, self.num_layers):\n self.conv.append(self.rgcn_func(self.hidden_dim, self.hidden_dim, self.num_relations).to(config.model_configuration['device']))\n total_dim += self.hidden_dim\n else:\n self.fc0_5 = Linear(self.num_features, self.hidden_dim)\n else:\n if self.num_layers > 0:\n print(\"using layer specification and ignoring hidden_dim parameter.\")\n print(\"layer_spec: \" + str(self.layer_spec))\n self.conv.append(self.rgcn_func(self.num_features, self.layer_spec[0], self.num_relations).to(config.model_configuration['device']))\n total_dim += self.layer_spec[0]\n for i in range(1, self.num_layers):\n self.conv.append(self.rgcn_func(self.layer_spec[i-1], self.layer_spec[i], self.num_relations).to(config.model_configuration['device']))\n total_dim += self.layer_spec[i]\n\n else:\n self.fc0_5 = Linear(self.num_features, self.hidden_dim)\n total_dim += self.hidden_dim\n\n if self.pooling_type == \"sagpool\":\n self.pool1 = RGCNSAGPooling(total_dim, self.num_relations, ratio=config.model_configuration['pooling_ratio'], rgcn_func=config.model_configuration['conv_type'])\n elif self.pooling_type == \"topk\":\n self.pool1 = TopKPooling(total_dim, ratio=config.model_configuration['pooling_ratio'])\n\n self.fc1 = Linear(total_dim, self.lstm_dim1)\n \n if \"lstm\" in self.temporal_type:\n self.lstm = LSTM(self.lstm_dim1, self.lstm_dim2, batch_first=True)\n self.attn = Attention(self.lstm_dim2)\n self.lstm_decoder = LSTM(self.lstm_dim2, self.lstm_dim2, batch_first=True)\n else:\n self.fc1_5 = Linear(self.lstm_dim1, self.lstm_dim2)\n\n self.fc2 = Linear(self.lstm_dim2, self.num_classes)\n\n\n def forward(self, x, edge_index, edge_attr, batch=None):\n attn_weights = dict()\n outputs = []\n if self.num_layers > 0:\n for i in range(self.num_layers):\n x = self.activation(self.conv[i](x, edge_index, edge_attr))\n x = F.dropout(x, self.dropout, training=self.training)\n outputs.append(x)\n x = torch.cat(outputs, dim=-1)\n else:\n x = self.activation(self.fc0_5(x))\n\n if self.pooling_type == \"sagpool\":\n x, edge_index, _, attn_weights['batch'], attn_weights['pool_perm'], attn_weights['pool_score'] = self.pool1(x, edge_index, edge_attr=edge_attr, batch=batch)\n elif self.pooling_type == \"topk\":\n x, edge_index, _, attn_weights['batch'], attn_weights['pool_perm'], attn_weights['pool_score'] = self.pool1(x, edge_index, edge_attr=edge_attr, batch=batch)\n else: \n attn_weights['batch'] = batch\n\n if self.readout_type == \"add\":\n x = global_add_pool(x, attn_weights['batch'])\n elif self.readout_type == \"mean\":\n x = global_mean_pool(x, attn_weights['batch'])\n elif self.readout_type == \"max\":\n x = global_max_pool(x, attn_weights['batch'])\n else:\n pass\n\n x = self.activation(self.fc1(x))\n \n if self.temporal_type == \"mean\":\n x = self.activation(self.fc1_5(x.mean(axis=0)))\n elif self.temporal_type == \"lstm_last\":\n x_predicted, (h, c) = self.lstm(x.unsqueeze(0))\n x = h.flatten()\n elif self.temporal_type == \"lstm_sum\":\n x_predicted, (h, c) = self.lstm(x.unsqueeze(0))\n x = x_predicted.sum(dim=1).flatten()\n elif self.temporal_type == \"lstm_attn\":\n x_predicted, (h, c) = self.lstm(x.unsqueeze(0))\n x, attn_weights['lstm_attn_weights'] = self.attn(h.view(1,1,-1), x_predicted)\n x, (h_decoder, c_decoder) = self.lstm_decoder(x, (h, c))\n x = x.flatten()\n elif self.temporal_type == \"lstm_seq\": #used for step-by-step sequence prediction. \n x_predicted, (h, c) = self.lstm(x.unsqueeze(0)) #x_predicted is sequence of predictions for each frame, h is hidden state of last item, c is last cell state\n x = x_predicted.squeeze(0) #we return x_predicted as we want to know the output of the LSTM for each value in the sequence\n else:\n pass\n \n return F.log_softmax(self.fc2(x), dim=-1), attn_weights", "import os\nimport sys\n\n#import check_gpu as cg\n#os.environ['CUDA_VISIBLE_DEVICES'] = cg.get_free_gpu()\nsys.path.append(os.path.dirname(sys.path[0]))\nfrom learning.util.image_trainer import Image_Trainer\nfrom learning.util.scenegraph_trainer import Scenegraph_Trainer\nfrom util.config_parser import configuration\nimport wandb\nimport torch.nn as nn\n\n#python 4_test_model.py --yaml_path C:\\users\\harsi\\research\\roadscene2vec\\roadscene2vec\\config\\learning_config.yaml \n\ndef test_Trainer(learning_config):\n ''' Training the dynamic kg algorithm with different attention layer choice.'''\n\n #wandb setup \n wandb_arg= wandb.init(project=learning_config.wandb_configuration['project'], entity=learning_config.wandb_configuration['entity'])\n outputs = []\n labels = []\n metrics = []\n \n categories_train_list = []\n categories_test_list = []\n\n if learning_config.training_configuration[\"dataset_type\"] == \"real\":\n trainer = Image_Trainer(learning_config, wandb_arg)\n trainer.split_dataset() \n trainer.load_model() #set load model to true in config, and specify load path\n trainer.loss_func = nn.CrossEntropyLoss() #set loss function\n categories_train, categories_test, metric = trainer.eval_model()\n categories_train_list.append(categories_train)\n categories_test_list.append(categories_test)\n metrics.append(metric)\n \n elif learning_config.training_configuration[\"dataset_type\"] == \"scenegraph\":\n trainer = Scenegraph_Trainer(learning_config, wandb_arg)\n trainer.split_dataset() \n trainer.load_model() #set load model to true in config, and specify load path\n trainer.loss_func = nn.CrossEntropyLoss() #set loss function\n outputs_train, labels_train, outputs_test, labels_test, metric = trainer.evaluate()\n outputs += outputs_test\n labels += labels_test\n metrics.append(metric)\n else:\n raise ValueError(\"Type unrecognized\")\n\n\nif __name__ == \"__main__\":\n # the entry of dynkg pipeline training\n learning_config = configuration(sys.argv[1:])\n test_Trainer(learning_config)" ]
[ [ "torch.nn.functional.dropout", "torch.nn.Linear", "torch.cat", "torch.nn.LSTM" ], [ "torch.nn.CrossEntropyLoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kuroko1t/nne
[ "184aa535ad588c1403520acaddca8e01b532e68d" ]
[ "examples/torch_example.py" ]
[ "import nne\nimport torchvision\nimport torch\nimport numpy as np\n\ninput_shape = (1, 3, 224, 224)\nmodel = torchvision.models.resnet34(pretrained=True).cuda()\n\ninput_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)\noutput_data = nne.infer_torch(model, input_data)\n" ]
[ [ "numpy.random.random_sample" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shubhada-2019/tsfresh
[ "ff69073bbb4df787fcbf277a611c6b40632e767d", "ff69073bbb4df787fcbf277a611c6b40632e767d", "ff69073bbb4df787fcbf277a611c6b40632e767d" ]
[ "tests/units/feature_selection/test_feature_significance.py", "tests/units/feature_selection/test_checks.py", "tsfresh/scripts/measure_execution_time.py" ]
[ "# -*- coding: utf-8 -*-\n# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)\n# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016\n\nfrom unittest import TestCase\n\nimport numpy as np\nimport pandas as pd\n\nfrom tsfresh.feature_selection.relevance import calculate_relevance_table\n\n\nclass FeatureSignificanceTestCase(TestCase):\n \"\"\"Test cases for the whole feature selection algorithm.\"\"\"\n\n def setUp(self):\n \"\"\"Fix the random seed.\"\"\"\n np.random.seed(seed=42)\n\n def test_binary_target_mixed_case(self):\n # Mixed case with binomial target\n np.random.seed(42)\n y = pd.Series(np.random.binomial(1, 0.5, 1000))\n X = pd.DataFrame(index=range(1000))\n\n z = y - np.random.binomial(1, 0.1, 1000) + np.random.binomial(1, 0.1, 1000)\n z[z == -1] = 0\n z[z == 2] = 1\n\n X[\"rel1\"] = z\n X[\"rel2\"] = y * np.abs(np.random.normal(0, 1, 1000)) + np.random.normal(\n 0, 1, 1000\n )\n X[\"rel3\"] = y + np.random.normal(0, 0.3, 1000)\n X[\"rel4\"] = y ** 2 + np.random.normal(0, 1, 1000)\n X[\"rel5\"] = np.sqrt(y) + np.random.binomial(2, 0.1, 1000)\n\n X[\"irr_constant\"] = 1.113344\n\n X[\"irr1\"] = np.random.normal(0, 1, 1000)\n X[\"irr2\"] = np.random.poisson(1, 1000)\n X[\"irr3\"] = np.random.binomial(1, 0.3, 1000)\n X[\"irr4\"] = np.random.normal(0, 1, 1000)\n X[\"irr5\"] = np.random.poisson(1, 1000)\n X[\"irr6\"] = np.random.binomial(1, 0.3, 1000)\n X[\"irr7\"] = np.random.normal(0, 1, 1000)\n X[\"irr8\"] = np.random.poisson(1, 1000)\n X[\"irr9\"] = np.random.binomial(1, 0.3, 1000)\n\n df_bh = calculate_relevance_table(X, y)\n feat_rej = df_bh.loc[df_bh.relevant].feature\n\n # Make sure all selected variables are relevant\n for kept_feature in feat_rej:\n self.assertIn(kept_feature, [\"rel1\", \"rel2\", \"rel3\", \"rel4\", \"rel5\"])\n\n self.assertGreater(len(feat_rej), 0)\n\n # Test type outputs\n for i in range(1, 6):\n row = df_bh.loc[\"rel{}\".format(i)]\n self.assertEqual(row.feature, \"rel{}\".format(i))\n if i == 1:\n self.assertEqual(row.type, \"binary\")\n else:\n self.assertEqual(row.type, \"real\")\n\n for i in range(1, 10):\n row = df_bh.loc[\"irr{}\".format(i)]\n self.assertEqual(row.feature, \"irr{}\".format(i))\n if i not in [3, 6, 9]:\n self.assertEqual(row.type, \"real\")\n else:\n self.assertEqual(row.type, \"binary\")\n\n self.assertFalse(row.relevant)\n\n # Assert that all of the relevant features are kept.\n # THIS FAILS!\n # self.assertEqual(len(kept_feature), 5)\n\n def test_binary_target_binary_features(self):\n # Binomial random variables and binomial target\n y = pd.Series(np.random.binomial(1, 0.5, 5000))\n X = pd.DataFrame(index=range(5000))\n\n for i in range(10):\n X[\"irr{}\".format(i)] = np.random.binomial(1, 0.1, 5000)\n\n for i in range(10, 20):\n X[\"irr{}\".format(i)] = np.random.binomial(1, 0.8, 5000)\n\n z = y - np.random.binomial(1, 0.01, 5000) + np.random.binomial(1, 0.01, 5000)\n z[z == -1] = 0\n z[z == 2] = 1\n X[\"rel1\"] = z\n\n z = y - np.random.binomial(1, 0.05, 5000) + np.random.binomial(1, 0.05, 5000)\n z[z == -1] = 0\n z[z == 2] = 1\n X[\"rel2\"] = z\n\n z = y - np.random.binomial(1, 0.10, 5000) + np.random.binomial(1, 0.10, 5000)\n z[z == -1] = 0\n z[z == 2] = 1\n X[\"rel3\"] = z\n\n z = y - np.random.binomial(1, 0.15, 5000) + np.random.binomial(1, 0.15, 5000)\n z[z == -1] = 0\n z[z == 2] = 1\n X[\"rel4\"] = z\n\n z = y - np.random.binomial(1, 0.20, 5000) + np.random.binomial(1, 0.20, 5000)\n z[z == -1] = 0\n z[z == 2] = 1\n X[\"rel5\"] = z\n\n df_bh = calculate_relevance_table(X, y)\n feat_rej = df_bh.loc[df_bh.relevant].feature\n\n # Make sure all selected variables are relevant\n for kept_feature in feat_rej:\n self.assertIn(kept_feature, [\"rel1\", \"rel2\", \"rel3\", \"rel4\", \"rel5\"])\n\n self.assertGreater(len(feat_rej), 0)\n\n # Test type outputs\n for i in range(1, 6):\n row = df_bh.loc[\"rel{}\".format(i)]\n self.assertEqual(row.feature, \"rel{}\".format(i))\n self.assertEqual(row.type, \"binary\")\n\n for i in range(1, 20):\n row = df_bh.loc[\"irr{}\".format(i)]\n self.assertEqual(row.feature, \"irr{}\".format(i))\n self.assertEqual(row.type, \"binary\")\n\n self.assertFalse(row.relevant)\n\n def test_binomial_target_realvalued_features(self):\n # Real valued random variables and binomial target\n y = pd.Series(np.random.binomial(1, 0.5, 5000))\n X = pd.DataFrame(index=range(5000))\n\n for i in range(10):\n X[\"irr{}\".format(i)] = np.random.normal(1, 0.3, 5000)\n\n for i in range(10, 20):\n X[\"irr{}\".format(i)] = np.random.normal(1, 0.5, 5000)\n\n for i in range(20, 30):\n X[\"irr{}\".format(i)] = np.random.normal(1, 0.8, 5000)\n\n X[\"rel1\"] = y * np.random.normal(0, 1, 5000) + np.random.normal(0, 1, 5000)\n X[\"rel2\"] = y + np.random.normal(0, 1, 5000)\n X[\"rel3\"] = y ** 2 + np.random.normal(0, 1, 5000)\n X[\"rel4\"] = np.sqrt(y) + np.random.binomial(2, 0.1, 5000)\n\n df_bh = calculate_relevance_table(X, y)\n feat_rej = df_bh.loc[df_bh.relevant].feature\n\n # Make sure all selected variables are relevant\n for kept_feature in feat_rej:\n self.assertIn(kept_feature, [\"rel1\", \"rel2\", \"rel3\", \"rel4\"])\n\n self.assertGreater(len(feat_rej), 0)\n\n # Test type outputs\n for i in range(1, 5):\n row = df_bh.loc[\"rel{}\".format(i)]\n self.assertEqual(row.feature, \"rel{}\".format(i))\n self.assertEqual(row.type, \"real\")\n\n for i in range(1, 30):\n row = df_bh.loc[\"irr{}\".format(i)]\n self.assertEqual(row.feature, \"irr{}\".format(i))\n self.assertEqual(row.type, \"real\")\n\n self.assertFalse(row.relevant)\n\n def test_real_target_mixed_case(self):\n # Mixed case with real target\n y = pd.Series(np.random.normal(0, 1, 5000))\n X = pd.DataFrame(index=range(5000))\n\n z = y.copy()\n z[z <= 0] = 0\n z[z > 0] = 1\n\n X[\"rel1\"] = z\n X[\"rel2\"] = y\n X[\"rel3\"] = y ** 2\n X[\"rel4\"] = np.sqrt(abs(y))\n\n X[\"irr1\"] = np.random.normal(0, 1, 5000)\n X[\"irr2\"] = np.random.poisson(1, 5000)\n X[\"irr3\"] = np.random.binomial(1, 0.1, 5000)\n X[\"irr4\"] = np.random.normal(0, 1, 5000)\n X[\"irr5\"] = np.random.poisson(1, 5000)\n X[\"irr6\"] = np.random.binomial(1, 0.05, 5000)\n X[\"irr7\"] = np.random.normal(0, 1, 5000)\n X[\"irr8\"] = np.random.poisson(1, 5000)\n X[\"irr9\"] = np.random.binomial(1, 0.2, 5000)\n\n df_bh = calculate_relevance_table(X, y)\n feat_rej = df_bh.loc[df_bh.relevant].feature\n\n # Make sure all selected variables are relevant\n for kept_feature in feat_rej:\n self.assertIn(kept_feature, [\"rel1\", \"rel2\", \"rel3\", \"rel4\"])\n\n self.assertGreater(len(feat_rej), 0)\n\n # Test type outputs\n for i in range(1, 5):\n row = df_bh.loc[\"rel{}\".format(i)]\n self.assertEqual(row.feature, \"rel{}\".format(i))\n if i == 1:\n self.assertEqual(row.type, \"binary\")\n else:\n self.assertEqual(row.type, \"real\")\n\n for i in range(1, 10):\n row = df_bh.loc[\"irr{}\".format(i)]\n self.assertEqual(row.feature, \"irr{}\".format(i))\n if i in [3, 6, 9]:\n self.assertEqual(row.type, \"binary\")\n else:\n self.assertEqual(row.type, \"real\")\n\n self.assertFalse(row.relevant)\n\n def test_real_target_binary_features(self):\n # Mixed case with real target\n y = pd.Series(np.random.normal(0, 1, 1000))\n X = pd.DataFrame(index=range(1000))\n\n z = y - np.random.binomial(1, 0.20, 1000) + np.random.binomial(1, 0.20, 1000)\n z[z == -1] = 0\n z[z == 2] = 1\n X[\"rel1\"] = z\n\n z = y - np.random.binomial(1, 0.10, 1000) + np.random.binomial(1, 0.10, 1000)\n z[z == -1] = 0\n z[z == 2] = 1\n X[\"rel2\"] = z\n\n X[\"irr1\"] = np.random.binomial(0, 0.1, 1000)\n X[\"irr2\"] = np.random.binomial(0, 0.15, 1000)\n X[\"irr3\"] = np.random.binomial(0, 0.05, 1000)\n X[\"irr4\"] = np.random.binomial(0, 0.2, 1000)\n X[\"irr5\"] = np.random.binomial(0, 0.25, 1000)\n X[\"irr6\"] = np.random.binomial(0, 0.01, 1000)\n\n df_bh = calculate_relevance_table(X, y)\n feat_rej = df_bh.loc[df_bh.relevant].feature\n\n # Make sure all selected variables are relevant\n for kept_feature in feat_rej:\n self.assertIn(kept_feature, [\"rel1\", \"rel2\"])\n\n self.assertGreater(len(feat_rej), 0)\n\n def test_all_features_good(self):\n # Mixed case with real target\n y = pd.Series(np.random.normal(0, 1, 1000))\n X = pd.DataFrame(index=range(1000))\n\n z = y - np.random.binomial(1, 0.20, 1000) + np.random.binomial(1, 0.20, 1000)\n z[z == -1] = 0\n z[z == 2] = 1\n X[\"rel1\"] = z\n\n z = y - np.random.binomial(1, 0.10, 1000) + np.random.binomial(1, 0.10, 1000)\n z[z == -1] = 0\n z[z == 2] = 1\n X[\"rel2\"] = z\n\n df_bh = calculate_relevance_table(X, y)\n feat_rej = df_bh.loc[df_bh.relevant].feature\n\n # Make sure all selected variables are relevant\n for kept_feature in feat_rej:\n self.assertIn(kept_feature, [\"rel1\", \"rel2\"])\n\n self.assertGreater(len(feat_rej), 0)\n\n def test_all_features_bad(self):\n # Mixed case with real target\n y = pd.Series(np.random.normal(0, 1, 1000))\n X = pd.DataFrame(index=range(1000))\n\n X[\"irr1\"] = np.random.binomial(0, 0.1, 1000)\n X[\"irr2\"] = np.random.binomial(0, 0.15, 1000)\n X[\"irr3\"] = np.random.binomial(0, 0.05, 1000)\n X[\"irr4\"] = np.random.binomial(0, 0.2, 1000)\n X[\"irr5\"] = np.random.binomial(0, 0.25, 1000)\n X[\"irr6\"] = np.random.binomial(0, 0.01, 1000)\n\n df_bh = calculate_relevance_table(X, y)\n feat_rej = df_bh.loc[df_bh.relevant].feature\n\n self.assertEqual(len(feat_rej), 0)\n", "# -*- coding: utf-8 -*-\n# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)\n# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom tests.fixtures import warning_free\nfrom tsfresh.defaults import TEST_FOR_BINARY_TARGET_REAL_FEATURE\nfrom tsfresh.feature_selection.significance_tests import (\n target_binary_feature_binary_test,\n target_binary_feature_real_test,\n target_real_feature_binary_test,\n target_real_feature_real_test,\n)\n\n\[email protected]()\ndef binary_series_with_nan():\n return pd.Series([np.NaN, 1, 1])\n\n\[email protected]()\ndef real_series_with_nan():\n return pd.Series([np.NaN, 1, 2])\n\n\[email protected]()\ndef binary_series():\n return pd.Series([0, 1, 1])\n\n\[email protected]()\ndef real_series():\n return pd.Series([0, 1, 2])\n\n\nclass TestChecksBinaryReal:\n \"\"\"\n Test the checks for the `target_binary_feature_real_test`.\n \"\"\"\n\n def test_check_target_is_binary(self, real_series):\n with pytest.raises(ValueError):\n target_binary_feature_real_test(\n x=real_series, y=real_series, test=TEST_FOR_BINARY_TARGET_REAL_FEATURE\n )\n\n def test_checks_test_function(self, binary_series, real_series):\n with pytest.raises(ValueError):\n target_binary_feature_real_test(\n x=real_series, y=binary_series, test=\"other_unknown_function\"\n )\n\n def test_checks_feature_nan(self, real_series_with_nan, binary_series):\n with pytest.raises(ValueError):\n target_binary_feature_real_test(\n x=real_series_with_nan,\n y=binary_series,\n test=TEST_FOR_BINARY_TARGET_REAL_FEATURE,\n )\n\n def test_checks_target_nan(self, binary_series_with_nan, real_series):\n with pytest.raises(ValueError):\n target_binary_feature_real_test(\n x=real_series,\n y=binary_series_with_nan,\n test=TEST_FOR_BINARY_TARGET_REAL_FEATURE,\n )\n\n def test_check_feature_is_series(self, binary_series, real_series):\n with pytest.raises(TypeError):\n target_binary_feature_real_test(x=real_series.values, y=binary_series)\n\n def test_check_feature_is_series(self, binary_series, real_series):\n with pytest.raises(TypeError):\n target_binary_feature_real_test(x=real_series, y=binary_series.values)\n\n\nclass TestChecksBinaryBinary:\n \"\"\"\n Test the checks for the `target_binary_feature_binary_test`.\n \"\"\"\n\n def test_checks_feature_is_binary(self, binary_series, real_series):\n with pytest.raises(ValueError):\n target_binary_feature_binary_test(x=real_series, y=binary_series)\n\n def test_checks_target_is_binary(self, binary_series, real_series):\n with pytest.raises(ValueError):\n target_binary_feature_binary_test(x=binary_series, y=real_series)\n\n def test_checks_feature_is_series(self, binary_series):\n with pytest.raises(TypeError):\n target_binary_feature_binary_test(x=binary_series.values, y=binary_series)\n\n def test_checks_target_is_series(self, binary_series):\n with pytest.raises(TypeError):\n target_binary_feature_binary_test(x=binary_series, y=binary_series.values)\n\n def test_checks_feature_nan(self, binary_series_with_nan, binary_series):\n with pytest.raises(ValueError):\n target_binary_feature_binary_test(x=binary_series_with_nan, y=binary_series)\n\n def test_checks_target_nan(self, binary_series_with_nan, binary_series):\n with pytest.raises(ValueError):\n target_binary_feature_binary_test(x=binary_series, y=binary_series_with_nan)\n\n\nclass TestChecksRealReal:\n \"\"\"\n Test the checks for the `target_real_feature_real_test`.\n \"\"\"\n\n def test_checks_feature_is_series(self, real_series):\n with pytest.raises(TypeError):\n target_real_feature_real_test(x=real_series.values, y=real_series)\n\n def test_checks_target_is_series(self, real_series):\n with pytest.raises(TypeError):\n target_real_feature_real_test(x=real_series, y=real_series.values)\n\n def test_checks_feature_nan(self, real_series_with_nan, real_series):\n with pytest.raises(ValueError):\n target_real_feature_real_test(x=real_series_with_nan, y=real_series)\n\n def test_checks_target_nan(self, real_series_with_nan, real_series):\n with pytest.raises(ValueError):\n target_real_feature_real_test(x=real_series, y=real_series_with_nan)\n\n\nclass TestChecksRealBinary:\n \"\"\"\n Test the checks for the `target_real_feature_binary_test`.\n \"\"\"\n\n def test_feature_is_binary(self, real_series):\n with pytest.raises(ValueError):\n target_real_feature_binary_test(x=real_series, y=real_series)\n\n def test_feature_is_series(self, real_series, binary_series):\n with pytest.raises(TypeError):\n target_real_feature_binary_test(x=binary_series.values, y=real_series)\n\n def test_feature_is_series(self, real_series, binary_series):\n with pytest.raises(TypeError):\n target_real_feature_binary_test(x=binary_series, y=real_series.values)\n\n def test_checks_feature_nan(self, binary_series_with_nan, real_series):\n with pytest.raises(ValueError):\n target_real_feature_binary_test(x=binary_series_with_nan, y=real_series)\n\n def test_checks_target_nan(self, real_series_with_nan, binary_series):\n with pytest.raises(ValueError):\n target_real_feature_binary_test(x=binary_series, y=real_series_with_nan)\n", "# This script extracts the execution time for\n# various different settings of tsfresh\n# using different input data\n# Attention: it will run for ~half a day\n# Do these calculations in a controlled environment\n# (e.g. a cloud provider VM)\n# You will need to have b2luigi installed.\nimport json\nfrom time import time\n\nimport b2luigi as luigi\nimport numpy as np\nimport pandas as pd\n\nfrom tsfresh.feature_extraction import (\n ComprehensiveFCParameters,\n MinimalFCParameters,\n extract_features,\n)\n\n\nclass DataCreationTask(luigi.Task):\n \"\"\"Create random data for testing\"\"\"\n\n num_ids = luigi.IntParameter(default=100)\n time_series_length = luigi.IntParameter()\n random_seed = luigi.IntParameter()\n\n def output(self):\n yield self.add_to_output(\"data.csv\")\n\n def run(self):\n np.random.seed(self.random_seed)\n\n df = pd.concat(\n [\n pd.DataFrame(\n {\n \"id\": [i] * self.time_series_length,\n \"time\": range(self.time_series_length),\n \"value\": np.random.randn(self.time_series_length),\n }\n )\n for i in range(self.num_ids)\n ]\n )\n\n with self._get_output_target(\"data.csv\").open(\"w\") as f:\n df.to_csv(f)\n\n\[email protected](DataCreationTask)\nclass TimingTask(luigi.Task):\n \"\"\"Run tsfresh with the given parameters\"\"\"\n\n feature_parameter = luigi.DictParameter(hashed=True)\n n_jobs = luigi.IntParameter()\n try_number = luigi.IntParameter()\n\n def output(self):\n yield self.add_to_output(\"result.json\")\n\n def run(self):\n input_file = self._get_input_targets(\"data.csv\")[0]\n\n with input_file.open(\"r\") as f:\n df = pd.read_csv(f)\n\n start_time = time()\n extract_features(\n df,\n column_id=\"id\",\n column_sort=\"time\",\n n_jobs=self.n_jobs,\n default_fc_parameters=self.feature_parameter,\n disable_progressbar=True,\n )\n end_time = time()\n\n single_parameter_name = list(self.feature_parameter.keys())[0]\n single_parameter_params = self.feature_parameter[single_parameter_name]\n\n result_json = {\n \"time\": end_time - start_time,\n \"n_ids\": self.num_ids,\n \"n_jobs\": self.n_jobs,\n \"feature\": single_parameter_name,\n \"number_parameters\": len(single_parameter_params)\n if single_parameter_params\n else 0,\n \"time_series_length\": int((df[\"id\"] == 0).sum()),\n \"try_number\": self.try_number,\n }\n\n with self._get_output_target(\"result.json\").open(\"w\") as f:\n json.dump(result_json, f)\n\n\[email protected](DataCreationTask)\nclass FullTimingTask(luigi.Task):\n \"\"\"Run tsfresh with all calculators for comparison\"\"\"\n\n n_jobs = luigi.IntParameter()\n\n def output(self):\n yield self.add_to_output(\"result.json\")\n\n def run(self):\n input_file = self._get_input_targets(\"data.csv\")[0]\n\n with input_file.open(\"r\") as f:\n df = pd.read_csv(f)\n\n start_time = time()\n extract_features(\n df,\n column_id=\"id\",\n column_sort=\"time\",\n n_jobs=self.n_jobs,\n disable_progressbar=True,\n )\n end_time = time()\n\n result_json = {\n \"time\": end_time - start_time,\n \"n_ids\": self.num_ids,\n \"n_jobs\": self.n_jobs,\n \"time_series_length\": int((df[\"id\"] == 0).sum()),\n }\n\n with self._get_output_target(\"result.json\").open(\"w\") as f:\n json.dump(result_json, f)\n\n\nclass CombinerTask(luigi.Task):\n \"\"\"Collect all tasks into a single result.csv file\"\"\"\n\n def complete(self):\n return False\n\n def requires(self):\n settings = ComprehensiveFCParameters()\n for job in [0, 1, 4]:\n for time_series_length in [100, 500, 1000, 5000]:\n yield FullTimingTask(\n time_series_length=time_series_length,\n n_jobs=job,\n num_ids=10,\n random_seed=42,\n )\n yield FullTimingTask(\n time_series_length=time_series_length,\n n_jobs=job,\n num_ids=100,\n random_seed=42,\n )\n\n for feature_name in settings:\n yield TimingTask(\n feature_parameter={feature_name: settings[feature_name]},\n time_series_length=time_series_length,\n n_jobs=job,\n num_ids=100,\n try_number=0,\n random_seed=42,\n )\n\n for try_number in range(3):\n yield TimingTask(\n feature_parameter={feature_name: settings[feature_name]},\n n_jobs=job,\n try_number=try_number,\n num_ids=10,\n time_series_length=time_series_length,\n random_seed=42,\n )\n\n def output(self):\n yield self.add_to_output(\"results.csv\")\n\n def run(self):\n results = []\n\n for input_file in self._get_input_targets(\"result.json\"):\n with input_file.open(\"r\") as f:\n results.append(json.load(f))\n\n df = pd.DataFrame(results)\n\n with self._get_output_target(\"results.csv\").open(\"w\") as f:\n df.to_csv(f)\n\n\nif __name__ == \"__main__\":\n luigi.set_setting(\"result_path\", \"results\")\n luigi.process(CombinerTask())\n" ]
[ [ "numpy.sqrt", "numpy.random.seed", "numpy.random.poisson", "numpy.random.normal", "numpy.random.binomial" ], [ "pandas.Series" ], [ "pandas.read_csv", "numpy.random.randn", "numpy.random.seed", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
SharathRaparthy/DeepRL
[ "96cc5d586c63681b469eca38fcf20f988b7f1ced" ]
[ "deep_rl/utils/misc.py" ]
[ "#######################################################################\n# Copyright (C) 2017 Shangtong Zhang([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nimport numpy as np\nimport pickle\nimport os\nimport datetime\nimport torch\nimport time\nfrom .torch_utils import *\nfrom pathlib import Path\n\n\ndef run_steps(agent):\n config = agent.config\n agent_name = agent.__class__.__name__\n t0 = time.time()\n while True:\n if config.save_interval and not agent.policy_step % config.save_interval:\n agent.save('data/%s-%s-%d' % (agent_name, config.tag, agent.total_steps))\n if config.log_interval and not agent.total_steps % config.log_interval:\n agent.logger.info('steps %d, %.2f steps/s' % (agent.total_steps, config.log_interval / (time.time() - t0)))\n t0 = time.time()\n if config.eval_interval and not agent.total_steps % config.eval_interval:\n agent.eval_episodes()\n if config.max_steps and agent.total_steps >= config.max_steps:\n agent.close()\n break\n agent.implicit_step()\n # if config.game_type == \"aggressive_reward\":\n # agent.logger.info('-----------Aggressive Reward Step --------------')\n # agent.reward_step()\n # if agent.reward_step_count % config.conservative_improvement_step == 0:\n # agent.logger.info('-----------Conservative Policy Step --------------')\n # agent.step()\n # if config.game_type == \"aggressive_policy\":\n # agent.logger.info('-----------Aggressive Policy Step --------------')\n # agent.step()\n # if agent.policy_step_count % config.conservative_improvement_step == 0:\n # agent.logger.info('-----------Conservative Reward Step --------------')\n # agent.reward_step()\n agent.switch_task()\n\n\ndef get_time_str():\n return datetime.datetime.now().strftime(\"%y%m%d-%H%M%S\")\n\n\ndef get_default_log_dir(name):\n return './log/%s-%s' % (name, get_time_str())\n\n\ndef mkdir(path):\n Path(path).mkdir(parents=True, exist_ok=True)\n\n\ndef close_obj(obj):\n if hasattr(obj, 'close'):\n obj.close()\n\n\ndef random_sample(indices, batch_size):\n indices = np.asarray(np.random.permutation(indices))\n batches = indices[:len(indices) // batch_size * batch_size].reshape(-1, batch_size)\n for batch in batches:\n yield batch\n r = len(indices) % batch_size\n if r:\n yield indices[-r:]\n\n\ndef generate_tag(params):\n if 'tag' in params.keys():\n return\n game = params['game']\n params.setdefault('run', 0)\n run = params['run']\n del params['game']\n del params['run']\n str = ['%s_%s' % (k, v) for k, v in sorted(params.items())]\n tag = '%s-%s-run-%d' % (game, '-'.join(str), run)\n params['tag'] = tag\n params['game'] = game\n params['run'] = run\n\n\ndef translate(pattern):\n groups = pattern.split('.')\n pattern = ('\\.').join(groups)\n return pattern\n\n\ndef split(a, n):\n k, m = divmod(len(a), n)\n return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))\n" ]
[ [ "numpy.random.permutation" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CognitiaAI/StreetFighterRL
[ "2b902ee7bb502336e1ba3f74ebeb09d843a75e73" ]
[ "agent/learning/pipeline/input_provider/normalized_one_hot_input_provider.py" ]
[ "import numpy as np\n\nfrom pipeline.input_provider.base_input_provider import BaseInputProvider\n\n\nclass NormalizedOneHotInputProvider(BaseInputProvider):\n def __init__(self):\n BaseInputProvider.__init__(self)\n self.__game_state = None\n self.__screen_shot = None\n\n def store(self, game_state, screen_shot):\n self.__game_state = game_state\n self.__screen_shot = screen_shot\n\n def min_max_scaling(self, X, X_min, X_max, range_min = 0, range_max = 1):\n X_std = (X - X_min) / (X_max - X_min)\n X_scaled = X_std * (range_max - range_min) + range_min\n return X_scaled\n\n def one_hot(self, X, length):\n encoded = np.zeros(length)\n encoded[X] = 1\n return encoded\n\n def pre_processing(self):\n # Normalizes the input and converts to numpy\n processed_state = self.normalize_state()\n processed_screen_shot = self.__screen_shot / 255.0\n\n return processed_state, processed_screen_shot\n\n def normalize_state(self):\n game_state = list()\n game_state.extend(self.one_hot(self.__game_state.player1.player_id, 12))\n game_state.append(self.min_max_scaling(self.__game_state.player1.health, 0, 176.0)) # Min Max Scaling\n game_state.append(self.min_max_scaling(self.__game_state.player1.x_coord, 0, 500.0))\n game_state.append(self.min_max_scaling(self.__game_state.player1.y_coord, 0, 192.0))\n game_state.extend(self.one_hot(self.__game_state.player1.is_jumping, 2))\n game_state.extend(self.one_hot(self.__game_state.player1.is_crouching, 2))\n game_state.extend(self.one_hot(self.__game_state.player1.is_player_in_move, 2))\n # game_state.append(self.__game_state.player1.move_id)\n game_state.extend(self.__game_state.player1.get_action_buttons()) # adding 10 more values\n\n game_state.extend(self.one_hot(self.__game_state.player2.player_id, 12))\n game_state.append(self.min_max_scaling(self.__game_state.player2.health, 0, 176.0)) # Min Max Scaling\n game_state.append(self.min_max_scaling(self.__game_state.player2.x_coord, 0, 500.0))\n game_state.append(self.min_max_scaling(self.__game_state.player2.y_coord, 0, 192.0))\n game_state.extend(self.one_hot(self.__game_state.player2.is_jumping, 2))\n game_state.extend(self.one_hot(self.__game_state.player2.is_crouching, 2))\n game_state.extend(self.one_hot(self.__game_state.player2.is_player_in_move, 2))\n # game_state.append(self.__game_state.player2.move_id)\n game_state.extend(self.__game_state.player2.get_action_buttons()) # adding 10 more values\n\n return np.array(game_state)\n\n def retrieve(self):\n return self.pre_processing()\n\n def store_and_retrieve(self, game_state, screen_shot):\n self.store(game_state, screen_shot)\n return self.retrieve()\n\n\n\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YoanSallami/robot_monitor
[ "633ae1cfdc3fb6b49a48ac606e79d10f6ecc2801" ]
[ "scripts/robot_monitor_node.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport math\nimport sys\nimport argparse\nimport rospy\nimport time\nimport tf2_ros\nfrom lxml import etree\nfrom collections import deque\nimport numpy\nimport yaml\nimport underworlds\nfrom std_msgs.msg import String\nfrom underworlds.types import Entity, Mesh, Camera, MESH, Situation\nfrom underworlds.helpers import transformations\nfrom underworlds.tools.loader import ModelLoader\nfrom underworlds.tools.primitives_3d import Box\n\nEPSILON = 0.02\nTF_CACHE_TIME = 5.0\nDEFAULT_CLIP_PLANE_NEAR = 0.001\nDEFAULT_CLIP_PLANE_FAR = 1000.0\nDEFAULT_HORIZONTAL_FOV = 57.20\nDEFAULT_ASPECT = 1.291196388\n\n\n# just for convenience\ndef strip_leading_slash(s):\n return s[1:] if s.startswith(\"/\") else s\n\n# just for convenience\ndef transformation_matrix(t, q):\n translation_mat = transformations.translation_matrix(t)\n rotation_mat = transformations.quaternion_matrix(q)\n return numpy.dot(translation_mat, rotation_mat)\n\n\nclass RobotMonitor(object):\n \"\"\"\n \"\"\"\n def __init__(self, ctx, source_world, target_world, urdf_file_path, model_dir_path, robot_name, perspective_frame,\n cam_rot, reference_frame):\n \"\"\"\n The constructor method\n @param ctx: The underworlds context\n @param source_world: The name of the source world\n @param source_world: The name of the target world\n @param urdf_path: The absolute path of the robot URDF model\n @param model_dir_path: The absolute path of the meshes directory\n @param reference_frame: The reference frame of the system\n \"\"\"\n self.ctx = ctx\n self.source = ctx.worlds[source_world]\n self.source_world_name = source_world\n self.target = ctx.worlds[target_world]\n self.target_world_name = target_world\n\n self.tfBuffer = tf2_ros.Buffer(rospy.Duration(TF_CACHE_TIME), debug=False)\n self.listener = tf2_ros.TransformListener(self.tfBuffer)\n\n self.node_mapping = {self.source.scene.rootnode.id: self.target.scene.rootnode.id}\n self.situation_mapping = {}\n\n self.already_created_node_ids = {}\n self.time_table = {}\n\n self.urdf_file_path = urdf_file_path\n self.model_dir_path = model_dir_path\n\n self.situation_map = {}\n\n self.robot_name = robot_name\n rospy.set_param('robot_name', robot_name)\n\n self.perspective_frame = perspective_frame\n self.reference_frame = reference_frame\n self.cam_rot = cam_rot\n # The map of the parent frames ordered by frame name\n self.parent_frames_map = {}\n self.model_map = {}\n\n self.relations_map = {}\n self.ros_pub = {\"situation_log\": rospy.Publisher(\"robot_monitor/log\", String, queue_size=5)}\n self.previous_nodes_to_update = []\n\n self.aabb_map = {}\n self.frames_transform = {}\n\n self.current_situations_map = {}\n\n self.parent_frames_map[reference_frame] = \"root\"\n self.parent_frames_map[\"base_footprint\"] = reference_frame\n\n self.load_urdf()\n\n def load_urdf(self):\n \"\"\"\n This function read the URDF file given in constructor and save the robot structure\n @return : None\n \"\"\"\n urdf_tree = etree.parse(self.urdf_file_path)\n\n urdf_root = urdf_tree.getroot()\n\n for link in urdf_root.iter(\"link\"):\n if link.find(\"visual\") is not None:\n if link.find(\"visual\").find(\"geometry\").find(\"mesh\") is not None:\n path = link.find(\"visual\").find(\"geometry\").find(\"mesh\").get(\"filename\").split(\"/\")\n if link.find(\"visual\").find(\"geometry\").find(\"mesh\").get(\"scale\"):\n scale_str = link.find(\"visual\").find(\"geometry\").find(\"mesh\").get(\"scale\").split(\" \")\n scale = float(scale_str[0]) * float(scale_str[1]) * float(scale_str[2])\n else:\n scale = 0.1\n count = 0\n path_str = \"\"\n element = path[len(path)-1]\n while count < len(path):\n if element == \"meshes\":\n break\n else:\n path_str = \"/\" + element + path_str\n count += 1\n element = path[len(path)-1-count]\n\n filename = self.model_dir_path + path_str\n try:\n nodes_loaded = ModelLoader().load(filename, self.ctx, world=self.target_world_name, root=None,\n only_meshes=True, scale=scale)\n for n in nodes_loaded:\n if n.type == MESH:\n self.model_map[link.get(\"name\")] = n.properties[\"mesh_ids\"]\n self.aabb_map[link.get(\"name\")] = n.properties[\"aabb\"]\n except Exception as e:\n pass\n else:\n if link.find(\"visual\").find(\"geometry\").find(\"box\") is not None:\n mesh_ids = []\n sizes = link.find(\"visual\").find(\"geometry\").find(\"box\").get(\"size\").split(\" \")\n box = Box.create(float(sizes[0]), float(sizes[1]), float(sizes[2]))\n self.ctx.push_mesh(box)\n mesh_ids.append([box.id])\n self.model_map[link.get(\"name\")] = mesh_ids\n\n def start_n2_situation(self, predicate, subject_name, object_name):\n description = predicate+\"(\"+subject_name+\",\"+object_name+\")\"\n sit = Situation(desc=description)\n sit.starttime = time.time()\n self.current_situations_map[description] = sit\n self.ros_pub[\"situation_log\"].publish(\"START \" + description)\n self.target.timeline.update(sit)\n return sit.id\n\n def start_n1_situation(self, predicate, subject_name):\n description = predicate+\"(\"+subject_name+\")\"\n sit = Situation(desc=description)\n sit.starttime = time.time()\n self.current_situations_map[description] = sit\n self.ros_pub[\"situation_log\"].publish(\"START \" + description)\n self.target.timeline.update(sit)\n return sit.id\n\n def end_n1_situation(self, predicate, subject_name):\n description = predicate+\"(\"+subject_name+\")\"\n sit = self.current_situations_map[description]\n self.ros_pub[\"situation_log\"].publish(\"END \"+description)\n try:\n self.target.timeline.end(sit)\n except Exception as e:\n rospy.logwarn(\"[robot_monitor] Exception occurred : \"+str(e))\n\n def end_n2_situation(self, predicate, subject_name, object_name):\n description = predicate+\"(\"+subject_name+\",\"+object_name+\")\"\n sit = self.current_situations_map[description]\n self.ros_pub[\"situation_log\"].publish(\"END \"+description)\n try:\n self.target.timeline.end(sit)\n except Exception as e:\n rospy.logwarn(\"[robot_monitor] Exception occurred : \"+str(e))\n\n def filter(self):\n nodes_to_update = []\n for node in self.source.scene.nodes:\n if node != self.source.scene.rootnode:\n new_node = node.copy()\n if node.id in self.node_mapping:\n new_node.id = self.node_mapping[node.id]\n if new_node in self.target.scene.nodes:\n if not numpy.allclose(self.target.scene.nodes[new_node.id].transformation, node.transformation,\n rtol=0, atol=EPSILON):\n nodes_to_update.append(node)\n else:\n self.node_mapping[node.id] = new_node.id\n self.frames_transform[new_node.name] = new_node.transformation\n nodes_to_update.append(new_node)\n\n if nodes_to_update:\n for node in nodes_to_update:\n if node.parent == self.source.scene.rootnode.id:\n self.target.scene.nodes.update(node)\n node.parent = self.node_mapping[node.parent] if node.parent in self.node_mapping \\\n else self.target.scene.rootnode.id\n self.target.scene.nodes.update(nodes_to_update)\n\n situations_to_update = []\n for situation in self.source.timeline:\n new_situation = situation.copy()\n if situation in self.situation_mapping:\n new_situation.id = self.situation_mapping[situation.id]\n else:\n self.situation_mapping[situation.id] = new_situation.id\n situations_to_update.append(new_situation)\n\n if situations_to_update:\n self.target.timeline.update(situations_to_update)\n\n def monitor_robot(self):\n \"\"\"\n This method read the frames of the robot if they exist in /tf and then update the poses/3D models of\n the robot in the output world\n @return : None\n \"\"\"\n try:\n nodes_to_update = []\n\n node = Camera(name=self.robot_name)\n node.properties[\"clipplanenear\"] = DEFAULT_CLIP_PLANE_NEAR\n node.properties[\"clipplanefar\"] = DEFAULT_CLIP_PLANE_FAR\n node.properties[\"horizontalfov\"] = math.radians(DEFAULT_HORIZONTAL_FOV)\n node.properties[\"aspect\"] = DEFAULT_ASPECT\n\n msg = self.tfBuffer.lookup_transform(self.reference_frame, self.perspective_frame, rospy.Time(0))\n trans = [msg.transform.translation.x, msg.transform.translation.y, msg.transform.translation.z]\n rot = [msg.transform.rotation.x, msg.transform.rotation.y, msg.transform.rotation.z, msg.transform.rotation.w]\n\n transform = transformation_matrix(trans, rot)\n node.transformation = numpy.dot(transform, self.cam_rot)\n\n if node.name in self.already_created_node_ids:\n node.id = self.already_created_node_ids[node.name]\n if not numpy.allclose(self.frames_transform[node.name], node.transformation, rtol=0, atol=EPSILON):\n self.frames_transform[node.name] = node.transformation\n nodes_to_update.append(node)\n\n else:\n self.already_created_node_ids[node.name] = node.id\n self.frames_transform[node.name] = node.transformation\n nodes_to_update.append(node)\n\n for frame in self.model_map:\n node = Mesh(name=frame)\n node.properties[\"mesh_ids\"] = [mesh_id for mesh_id in self.model_map[frame]]\n node.properties[\"aabb\"] = self.aabb_map[frame]\n\n msg = self.tfBuffer.lookup_transform(self.perspective_frame, frame, rospy.Time(0))\n trans = [msg.transform.translation.x, msg.transform.translation.y, msg.transform.translation.z]\n rot = [msg.transform.rotation.x, msg.transform.rotation.y, msg.transform.rotation.z,\n msg.transform.rotation.w]\n\n node.transformation = transformation_matrix(trans, rot)\n\n node.parent = self.already_created_node_ids[self.robot_name]\n if node.name in self.already_created_node_ids:\n node.id = self.already_created_node_ids[frame]\n if not numpy.allclose(self.frames_transform[node.name], node.transformation, rtol=0, atol=EPSILON):\n self.frames_transform[node.name] = node.transformation\n nodes_to_update.append(node)\n else:\n self.already_created_node_ids[node.name] = node.id\n self.frames_transform[node.name] = node.transformation\n nodes_to_update.append(node)\n\n for node in self.source.scene.nodes:\n if node != self.source.scene.rootnode:\n new_node = node.copy()\n if node.id in self.node_mapping:\n new_node.id = self.node_mapping[node.id]\n if new_node in self.target.scene.nodes:\n if not numpy.allclose(self.target.scene.nodes[new_node.id].transformation, node.transformation,\n rtol=0, atol=EPSILON):\n nodes_to_update.append(node)\n else:\n self.node_mapping[node.id] = new_node.id\n self.frames_transform[new_node.name] = new_node.transformation\n nodes_to_update.append(new_node)\n\n if nodes_to_update:\n self.target.scene.nodes.update(nodes_to_update)\n self.previous_nodes_to_update = nodes_to_update\n\n except (tf2_ros.TransformException, tf2_ros.LookupException, tf2_ros.ConnectivityException,\n tf2_ros.ExtrapolationException):\n pass\n\n def run(self):\n while not rospy.is_shutdown():\n self.filter()\n self.monitor_robot()\n\nif __name__ == \"__main__\":\n\n sys.argv = [arg for arg in sys.argv if \"__name\" not in arg and \"__log\" not in arg]\n sys.argc = len(sys.argv)\n\n parser = argparse.ArgumentParser(description=\"Add in the given output world, the nodes from input \"\n \"world and the robot agent from ROS\")\n parser.add_argument(\"input_world\", help=\"Underworlds input world\")\n parser.add_argument(\"output_world\", help=\"Underworlds output world\")\n parser.add_argument(\"urdf_file_path\", help=\"The path of the urdf file\")\n parser.add_argument(\"model_dir_path\", help=\"The path of the robot mesh directory\")\n parser.add_argument(\"robot_name\", help=\"The robot name\")\n parser.add_argument(\"perspective_frame\", help=\"The name of the robot head gaze frame\")\n\n parser.add_argument(\"--cam_rot\", default=\"0.0_0.0_0.0\",\n help=\"The camera rotation offset :\\\"<rx>_<ry>_<rz>\\\" in [°] \")\n parser.add_argument(\"--reference\", default=\"map\", help=\"The reference frame\")\n args = parser.parse_args()\n\n rospy.init_node(\"robot_filter\", anonymous=False)\n\n with underworlds.Context(\"Robot filter\") as ctx:\n\n rx, rz, ry = [math.radians(float(i)) for i in args.cam_rot.split(\"_\")]\n rot = transformations.euler_matrix(rx, rz, ry, 'rxyz')\n RobotMonitor(ctx, args.input_world, args.output_world, args.urdf_file_path, args.model_dir_path,\n args.robot_name, args.perspective_frame, rot, args.reference).run()" ]
[ [ "numpy.dot", "numpy.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
andreashirley/Lolipop
[ "658a05c55fe8950f75d7ef50f1d983e86bd6fedf", "658a05c55fe8950f75d7ef50f1d983e86bd6fedf" ]
[ "muller/graphics/heatmap.py", "muller/graphics/distancegraphs.py" ]
[ "from pathlib import Path\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n# plt.switch_backend('agg')\nimport pandas\nimport seaborn\n\ndef plot_heatmap(data: pandas.DataFrame, filename: Path):\n\tfont = {\n\t\t'size': 20\n\t}\n\tuse_annotations = len(data) < 20 # So the annotations are actually visible\n\tmatplotlib.rc('font', **font)\n\tfigsize = (20, 20)\n\tfig, ax = plt.subplots(figsize = figsize)\n\tseaborn.heatmap(\n\t\tdata,\n\t\tax = ax,\n\t\tannot = use_annotations,\n\t\tcmap = 'Reds',\n\t\tsquare = True,\n\t\txticklabels = True,\n\t\tyticklabels = True\n\t)\n\tax.tick_params(axis = 'both', which = 'major', labelsize = 24)\n\tax.set_ylabel(\"Trajectory Label\", size = 32)\n\tax.set_xlabel(\"Trajectory Label\", size = 32)\n\tax.set_title(\"p-values of all mutational trajectories\", size = 36)\n\tplt.tight_layout()\n\tfig.savefig(str(filename), format = 'svg')\n", "from pathlib import Path\nfrom typing import List, Optional\n\nimport matplotlib.pyplot as plt\nimport seaborn\n\n# plt.style.use(\"/home/cld100/Documents/sandbox/matplotlibrc\")\n\n\ndef generate_distance_plot(distances: List[float], similarity_cutoff: float, filename: Optional[Path] = None, ax: plt.Axes = None):\n\t\"\"\" Shows the spread of computed pairwise distances as a histogram.\"\"\"\n\tif not ax:\n\t\tfig, ax = plt.subplots(figsize = (12, 10))\n\n\tseaborn.distplot(distances, ax = ax, kde = False, rug = True, bins = 20)\n\tax.axvline(similarity_cutoff, color = 'red')\n\n\tax.set_title(\"Pairwise distances between each pair of trajectories\")\n\tax.set_xlabel(\"Distance\")\n\tax.set_ylabel(\"Count\")\n\tax.set_xlim(0, max(distances))\n\tplt.tight_layout()\n\tif filename:\n\t\tplt.savefig(filename)\n\telse:\n\t\tplt.show()\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplots", "matplotlib.rc" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jangirrishabh/Overcoming-exploration-from-demos
[ "3760d5329326d47c068fd0c65330a0abad7f0197" ]
[ "experiment/config.py" ]
[ "import numpy as np\nimport gym\nimport gym_gazebo\n\nimport sys\nsys.path.append('/home/rjangir/software/workSpace/Overcoming-exploration-from-demos/')\n\n\nfrom baselines import logger\nfrom ddpg import DDPG\nfrom her import make_sample_her_transitions\n\n\nDEFAULT_ENV_PARAMS = {\n 'FetchReach-v1': {\n 'n_cycles': 10,\n },\n 'GazeboWAMemptyEnv-v2': {\n 'n_cycles': 20,\n },\n}\n\n\nDEFAULT_PARAMS = {\n # env\n 'max_u': 1., # max absolute value of actions on different coordinates\n # ddpg\n 'layers': 3, # number of layers in the critic/actor networks\n 'hidden': 256, # number of neurons in each hidden layers\n 'network_class': 'baselines.her.actor_critic:ActorCritic',\n 'Q_lr': 0.001, # critic learning rate\n 'pi_lr': 0.001, # actor learning rate\n 'buffer_size': int(1E6), # for experience replay\n 'polyak': 0.8, # polyak averaging coefficient\n 'action_l2': 1.0, # quadratic penalty on actions (before rescaling by max_u)\n 'clip_obs': 200.,\n 'scope': 'ddpg', # can be tweaked for testing\n 'relative_goals': False,\n # training\n 'n_cycles': 20, # per epoch\n 'rollout_batch_size': 1, # per mpi thread\n 'n_batches': 40, # training batches per cycle\n 'batch_size': 1024, # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.\n 'n_test_rollouts': 10, # number of test rollouts per epoch, each consists of rollout_batch_size rollouts\n 'test_with_polyak': False, # run test episodes with the target network\n # exploration\n 'random_eps': 0.2, # percentage of time a random action is taken\n 'noise_eps': 0.1, # std of gaussian noise added to not-completely-random actions as a percentage of max_u\n # HER\n 'replay_strategy': 'future', # supported modes: future, none\n 'replay_k': 4, # number of additional goals used for replay, only used if off_policy_data=future\n # normalization\n 'norm_eps': 0.01, # epsilon used for observation normalization\n 'norm_clip': 5, # normalized observations are cropped to this values\n 'bc_loss': 1, # whether or not to use the behavior cloning loss as an auxilliary loss\n 'q_filter': 1, # whether or not a Q value filter should be used on the Actor outputs\n 'num_demo': 100 # number of expert demo episodes\n}\n\n\nCACHED_ENVS = {}\n\n\ndef cached_make_env(make_env):\n \"\"\"\n Only creates a new environment from the provided function if one has not yet already been\n created. This is useful here because we need to infer certain properties of the env, e.g.\n its observation and action spaces, without any intend of actually using it.\n \"\"\"\n if make_env not in CACHED_ENVS:\n env = make_env()\n CACHED_ENVS[make_env] = env\n return CACHED_ENVS[make_env]\n\n\ndef prepare_params(kwargs):\n # DDPG params\n ddpg_params = dict()\n\n env_name = kwargs['env_name']\n\n def make_env():\n return gym.make(env_name)\n kwargs['make_env'] = make_env\n tmp_env = cached_make_env(kwargs['make_env'])\n assert hasattr(tmp_env, '_max_episode_steps')\n kwargs['T'] = tmp_env._max_episode_steps\n tmp_env.reset()\n kwargs['max_u'] = np.array(kwargs['max_u']) if isinstance(kwargs['max_u'], list) else kwargs['max_u']\n kwargs['gamma'] = 1. - 1. / kwargs['T']\n if 'lr' in kwargs:\n kwargs['pi_lr'] = kwargs['lr']\n kwargs['Q_lr'] = kwargs['lr']\n del kwargs['lr']\n for name in ['buffer_size', 'hidden', 'layers',\n 'network_class',\n 'polyak',\n 'batch_size', 'Q_lr', 'pi_lr',\n 'norm_eps', 'norm_clip', 'max_u',\n 'action_l2', 'clip_obs', 'scope', 'relative_goals']:\n ddpg_params[name] = kwargs[name]\n kwargs['_' + name] = kwargs[name]\n del kwargs[name]\n kwargs['ddpg_params'] = ddpg_params\n\n return kwargs\n\n\ndef log_params(params, logger=logger):\n for key in sorted(params.keys()):\n logger.info('{}: {}'.format(key, params[key]))\n\n\ndef configure_her(params):\n env = cached_make_env(params['make_env'])\n env.reset()\n\n def reward_fun(ag_2, g, info): # vectorized\n #return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)\n return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)\n\n # Prepare configuration for HER.\n her_params = {\n 'reward_fun': reward_fun,\n }\n for name in ['replay_strategy', 'replay_k']:\n her_params[name] = params[name]\n params['_' + name] = her_params[name]\n del params[name]\n sample_her_transitions = make_sample_her_transitions(**her_params)\n\n return sample_her_transitions\n\n\ndef simple_goal_subtract(a, b):\n assert a.shape == b.shape\n return a - b\n\n\ndef configure_ddpg(dims, params, reuse=False, use_mpi=True, clip_return=True):\n sample_her_transitions = configure_her(params)\n # Extract relevant parameters.\n gamma = params['gamma']\n rollout_batch_size = params['rollout_batch_size']\n ddpg_params = params['ddpg_params']\n\n input_dims = dims.copy()\n\n # DDPG agent\n env = cached_make_env(params['make_env'])\n env.reset()\n ddpg_params.update({'input_dims': input_dims, # agent takes an input observations\n 'T': params['T'],\n 'clip_pos_returns': True, # clip positive returns\n 'clip_return': (1. / (1. - gamma)) if clip_return else np.inf, # max abs of return\n 'rollout_batch_size': rollout_batch_size,\n 'subtract_goals': simple_goal_subtract,\n 'sample_transitions': sample_her_transitions,\n 'gamma': gamma,\n 'bc_loss': params['bc_loss'],\n 'q_filter': params['q_filter'],\n 'num_demo': params['num_demo'],\n })\n ddpg_params['info'] = {\n 'env_name': params['env_name'],\n }\n policy = DDPG(reuse=reuse, **ddpg_params, use_mpi=use_mpi)\n return policy\n\n\ndef configure_dims(params):\n env = cached_make_env(params['make_env'])\n env.reset()\n obs, _, _, info = env.step(env.action_space.sample())\n\n dims = {\n 'o': obs['observation'].shape[0],\n 'u': env.action_space.shape[0],\n 'g': obs['desired_goal'].shape[0],\n }\n\n for key, value in info.items():\n value = np.array(value)\n if value.ndim == 0:\n value = value.reshape(1)\n dims['info_{}'.format(key)] = value.shape[0]\n return dims\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rollingman1/audioset_tagging_cnn
[ "5036f772dfa8dd05fbfb0b6fa5bfedcea10cfb10" ]
[ "pytorch/inference.py" ]
[ "import os\nimport sys\n\nsys.path.insert(1, os.path.join(sys.path[0], '../utils'))\nimport numpy as np\nimport argparse\nimport librosa\nimport matplotlib.pyplot as plt\nimport torch\n\nfrom utilities import create_folder, get_filename\nfrom models import *\nfrom pytorch_utils import move_data_to_device\nimport config\n\n\ndef audio_tagging(args):\n \"\"\"Inference audio tagging result of an audio clip.\n \"\"\"\n\n # Arugments & parameters\n sample_rate = args.sample_rate\n window_size = args.window_size\n hop_size = args.hop_size\n mel_bins = args.mel_bins\n fmin = args.fmin\n fmax = args.fmax\n model_type = args.model_type\n checkpoint_path = args.checkpoint_path\n audio_path = args.audio_path\n device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')\n\n classes_num = config.classes_num\n labels = config.labels\n\n # Model\n Model = eval(model_type)\n model = Model(sample_rate=sample_rate, window_size=window_size,\n hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,\n classes_num=classes_num)\n\n checkpoint = torch.load(checkpoint_path, map_location=device)\n model.load_state_dict(checkpoint['model'])\n\n # Parallel\n if 'cuda' in str(device):\n model.to(device)\n print('GPU number: {}'.format(torch.cuda.device_count()))\n model = torch.nn.DataParallel(model)\n else:\n print('Using CPU.')\n\n # Load audio\n (waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)\n\n waveform = waveform[None, :] # (1, audio_length)\n waveform = move_data_to_device(waveform, device)\n\n # Forward\n with torch.no_grad():\n model.eval()\n batch_output_dict = model(waveform, None)\n print('batch_output_dict\\n', batch_output_dict)\n\n clipwise_output = batch_output_dict['clipwise_output'].data.cpu().numpy()[0]\n \"\"\"(classes_num,)\"\"\"\n\n print('clipwise_output:\\n', clipwise_output)\n\n sorted_indexes = np.argsort(clipwise_output)[::-1]\n\n # Print audio tagging top probabilities\n for k in range(10):\n print('{}: {:.3f}'.format(np.array(labels)[sorted_indexes[k]],\n clipwise_output[sorted_indexes[k]]))\n\n # Print embedding\n if 'embedding' in batch_output_dict.keys():\n embedding = batch_output_dict['embedding'].data.cpu().numpy()[0]\n print('embedding: {}'.format(embedding.shape))\n\n return clipwise_output, labels\n\n\ndef sound_event_detection(args):\n \"\"\"Inference sound event detection result of an audio clip.\n \"\"\"\n\n # Arugments & parameters\n sample_rate = args.sample_rate\n window_size = args.window_size\n hop_size = args.hop_size\n mel_bins = args.mel_bins\n fmin = args.fmin\n fmax = args.fmax\n model_type = args.model_type\n checkpoint_path = args.checkpoint_path\n audio_path = args.audio_path\n device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')\n\n classes_num = config.classes_num\n labels = config.labels\n frames_per_second = sample_rate // hop_size\n print('frames_per_second', frames_per_second)\n\n # Paths\n fig_path = os.path.join('results', '{}.png'.format(get_filename(audio_path)))\n create_folder(os.path.dirname(fig_path))\n\n # Model\n Model = eval(model_type)\n model = Model(sample_rate=sample_rate, window_size=window_size,\n hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,\n classes_num=classes_num)\n\n checkpoint = torch.load(checkpoint_path, map_location=device)\n model.load_state_dict(checkpoint['model'])\n\n # Parallel\n print('GPU number: {}'.format(torch.cuda.device_count()))\n model = torch.nn.DataParallel(model)\n\n if 'cuda' in str(device):\n model.to(device)\n\n # Load audio\n (waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)\n\n waveform = waveform[None, :] # (1, audio_length)\n waveform = move_data_to_device(waveform, device)\n\n # Forward\n with torch.no_grad():\n model.eval()\n batch_output_dict = model(waveform, None)\n\n framewise_output = batch_output_dict['framewise_output'].data.cpu().numpy()[0]\n \"\"\"(time_steps, classes_num)\"\"\"\n print('framewise_output:', framewise_output[0].argmax())\n\n print('Sound event detection result (time_steps x classes_num): {}'.format(\n framewise_output.shape))\n\n sorted_indexes = np.argsort(np.max(framewise_output, axis=0))[::-1]\n\n top_k = 10 # Show top results\n top_result_mat = framewise_output[:, sorted_indexes[0: top_k]]\n print('top result mat', top_result_mat)\n print('frame number', len(top_result_mat))\n for idx, frame in enumerate(top_result_mat):\n if idx % (frames_per_second//2) == 0:\n if frame[frame.argmax()] > 0.03:\n print('frame_label', idx/frames_per_second, np.array(labels)[sorted_indexes[0: top_k]][frame.argmax()])\n else:\n print('frame_label', 'None')\n print(np.array(labels)[sorted_indexes[0: top_k]])\n \"\"\"(time_steps, top_k)\"\"\"\n\n # Plot result \n stft = librosa.core.stft(y=waveform[0].data.cpu().numpy(), n_fft=window_size,\n hop_length=hop_size, window='hann', center=True)\n frames_num = stft.shape[-1]\n\n fig, axs = plt.subplots(2, 1, sharex=True, figsize=(10, 4))\n axs[0].matshow(np.log(np.abs(stft)), origin='lower', aspect='auto', cmap='jet')\n axs[0].set_ylabel('Frequency bins')\n axs[0].set_title('Log spectrogram')\n axs[1].matshow(top_result_mat.T, origin='upper', aspect='auto', cmap='jet', vmin=0, vmax=1)\n axs[1].xaxis.set_ticks(np.arange(0, frames_num, frames_per_second))\n axs[1].xaxis.set_ticklabels(np.arange(0, frames_num / frames_per_second))\n axs[1].yaxis.set_ticks(np.arange(0, top_k))\n axs[1].yaxis.set_ticklabels(np.array(labels)[sorted_indexes[0: top_k]])\n axs[1].yaxis.grid(color='k', linestyle='solid', linewidth=0.3, alpha=0.3)\n axs[1].set_xlabel('Seconds')\n axs[1].xaxis.set_ticks_position('bottom')\n\n plt.tight_layout()\n plt.savefig(fig_path)\n print('Save sound event detection visualization to {}'.format(fig_path))\n\n return framewise_output, labels\n\n\nif __name__ == '__main__':\n import time\n\n start = time.time()\n\n parser = argparse.ArgumentParser(description='Example of parser. ')\n subparsers = parser.add_subparsers(dest='mode')\n\n parser_at = subparsers.add_parser('audio_tagging')\n parser_at.add_argument('--sample_rate', type=int, default=32000)\n parser_at.add_argument('--window_size', type=int, default=1024)\n parser_at.add_argument('--hop_size', type=int, default=320)\n parser_at.add_argument('--mel_bins', type=int, default=64)\n parser_at.add_argument('--fmin', type=int, default=50)\n parser_at.add_argument('--fmax', type=int, default=14000)\n parser_at.add_argument('--model_type', type=str, required=True)\n parser_at.add_argument('--checkpoint_path', type=str, required=True)\n parser_at.add_argument('--audio_path', type=str, required=True)\n parser_at.add_argument('--cuda', action='store_true', default=False)\n\n parser_sed = subparsers.add_parser('sound_event_detection')\n parser_sed.add_argument('--sample_rate', type=int, default=32000)\n parser_sed.add_argument('--window_size', type=int, default=1024)\n parser_sed.add_argument('--hop_size', type=int, default=320)\n parser_sed.add_argument('--mel_bins', type=int, default=64)\n parser_sed.add_argument('--fmin', type=int, default=50)\n parser_sed.add_argument('--fmax', type=int, default=14000)\n parser_sed.add_argument('--model_type', type=str, required=True)\n parser_sed.add_argument('--checkpoint_path', type=str, required=True)\n parser_sed.add_argument('--audio_path', type=str, required=True)\n parser_sed.add_argument('--cuda', action='store_true', default=False)\n\n args = parser.parse_args()\n\n if args.mode == 'audio_tagging':\n audio_tagging(args)\n\n elif args.mode == 'sound_event_detection':\n sound_event_detection(args)\n\n else:\n raise Exception('Error argument!')\n\n print(\"time :\", time.time() - start)\n" ]
[ [ "matplotlib.pyplot.tight_layout", "numpy.abs", "torch.load", "numpy.arange", "torch.cuda.device_count", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "numpy.max", "torch.no_grad", "torch.cuda.is_available", "torch.device", "numpy.argsort", "torch.nn.DataParallel", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
visitor9999th/Tensorflow_GP-GAN
[ "344efbfe4805fadf539151b18b7431a481c5c9ba" ]
[ "run_gp_gan.py" ]
[ "import argparse\nimport os\n\nimport cv2\nimport tensorflow as tf\n\nfrom gp_gan import gp_gan\nfrom model import EncoderDecoder\n\n#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Disable GPU computation\n\nbasename = lambda path: os.path.splitext(os.path.basename(path))[0]\n\n\"\"\"\n Note: source image, destination image and mask image have the same size.\n\"\"\"\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Gaussian-Poisson GAN for high-resolution image blending')\n parser.add_argument('--nef', type=int, default=64, help='# of base filters in encoder')\n parser.add_argument('--ngf', type=int, default=64, help='# of base filters in decoder or G')\n parser.add_argument('--nc', type=int, default=3, help='# of output channels in decoder or G')\n parser.add_argument('--nBottleneck', type=int, default=4000, help='# of output channels in encoder')\n parser.add_argument('--ndf', type=int, default=64, help='# of base filters in D')\n\n parser.add_argument('--image_size', type=int, default=64, help='The height / width of the input image to network')\n\n parser.add_argument('--color_weight', type=float, default=1, help='Color weight')\n parser.add_argument('--sigma', type=float, default=0.5,\n help='Sigma for gaussian smooth of Gaussian-Poisson Equation')\n parser.add_argument('--gradient_kernel', type=str, default='normal', help='Kernel type for calc gradient')\n parser.add_argument('--smooth_sigma', type=float, default=1, help='Sigma for gaussian smooth of Laplacian pyramid')\n\n parser.add_argument('--generator_path', default=None, help='Path to GAN model checkpoint')\n\n parser.add_argument('--list_path', default='',\n help='File for input list in csv format: obj_path;bg_path;mask_path in each line')\n parser.add_argument('--result_folder', default='blending_result', help='Name for folder storing results')\n\n parser.add_argument('--src_image', default='DataBase/test_images/src.jpg', help='Path for source image')\n parser.add_argument('--dst_image', default='DataBase/test_images/dst.jpg', help='Path for destination image')\n parser.add_argument('--mask_image', default='DataBase/test_images/mask.png', help='Path for mask image')\n parser.add_argument('--blended_image', default='DataBase/test_images/result2.jpg', help='Where to save blended image')\n\n args = parser.parse_args()\n\n print('Input arguments:')\n for key, value in vars(args).items():\n print('\\t{}: {}'.format(key, value))\n print('')\n\n # Init CNN model\n generator = EncoderDecoder(encoder_filters=args.nef, encoded_dims=args.nBottleneck, output_channels=args.nc,\n decoder_filters=args.ngf, is_training=False, image_size=args.image_size,\n scope_name='generator')\n\n inputdata = tf.placeholder(\n dtype=tf.float32,\n shape=[1, args.image_size, args.image_size, args.nc],\n name='input'\n )\n\n gan_im_tens = generator(inputdata)\n\n loader = tf.train.Saver(tf.all_variables())\n sess = tf.Session()\n\n with sess.as_default():\n loader.restore(sess=sess, save_path=args.generator_path)\n\n # Init image list\n if args.list_path:\n print('Load images from {} ...'.format(args.list_path))\n with open(args.list_path) as f:\n test_list = [line.strip().split(';') for line in f]\n print('\\t {} images in total ...\\n'.format(len(test_list)))\n else:\n test_list = [(args.src_image, args.dst_image, args.mask_image)]\n\n if not args.blended_image:\n # Init result folder\n if not os.path.isdir(args.result_folder):\n os.makedirs(args.result_folder)\n print('Result will save to {} ...\\n'.format(args.result_folder))\n\n total_size = len(test_list)\n for idx in range(total_size):\n print('Processing {}/{} ...'.format(idx + 1, total_size))\n\n # load image\n obj = cv2.cvtColor(cv2.imread(test_list[idx][0], 1), cv2.COLOR_BGR2RGB) / 255\n bg = cv2.cvtColor(cv2.imread(test_list[idx][1], 1), cv2.COLOR_BGR2RGB) / 255\n mask = cv2.imread(test_list[idx][2], 0).astype(obj.dtype)\n\n blended_im = gp_gan(obj, bg, mask, gan_im_tens, inputdata, sess, args.image_size, color_weight=args.color_weight,\n sigma=args.sigma,\n gradient_kernel=args.gradient_kernel, smooth_sigma=args.smooth_sigma)\n\n if args.blended_image:\n cv2.imwrite(args.blended_image, cv2.cvtColor(blended_im, cv2.COLOR_RGB2BGR))\n else:\n cv2.imwrite('{}/obj_{}_bg_{}_mask_{}.png'.format(args.result_folder, basename(test_list[idx][0]),\n basename(test_list[idx][1]), basename(test_list[idx][2])),\n blended_im)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.all_variables", "tensorflow.placeholder", "tensorflow.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
mahiuchun/agents
[ "965e9868c3f921b33fddef37b5b9d1662e0ea448" ]
[ "tf_agents/bandits/policies/greedy_reward_prediction_policy.py" ]
[ "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Policy for greedy reward prediction.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gin\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nimport tensorflow_probability as tfp\n\nfrom tf_agents.bandits.networks import heteroscedastic_q_network\nfrom tf_agents.bandits.policies import policy_utilities\nfrom tf_agents.bandits.specs import utils as bandit_spec_utils\nfrom tf_agents.policies import tf_policy\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\n\n\[email protected]\nclass GreedyRewardPredictionPolicy(tf_policy.TFPolicy):\n \"\"\"Class to build GreedyNNPredictionPolicies.\"\"\"\n\n def __init__(self,\n time_step_spec=None,\n action_spec=None,\n reward_network=None,\n observation_and_action_constraint_splitter=None,\n accepts_per_arm_features=False,\n emit_policy_info=(),\n name=None):\n \"\"\"Builds a GreedyRewardPredictionPolicy given a reward tf_agents.Network.\n\n This policy takes a tf_agents.Network predicting rewards and generates the\n action corresponding to the largest predicted reward.\n\n Args:\n time_step_spec: A `TimeStep` spec of the expected time_steps.\n action_spec: A nest of BoundedTensorSpec representing the actions.\n reward_network: An instance of a `tf_agents.network.Network`,\n callable via `network(observation, step_type) -> (output, final_state)`.\n observation_and_action_constraint_splitter: A function used for masking\n valid/invalid actions with each state of the environment. The function\n takes in a full observation and returns a tuple consisting of 1) the\n part of the observation intended as input to the network and 2) the\n mask. The mask should be a 0-1 `Tensor` of shape\n `[batch_size, num_actions]`. This function should also work with a\n `TensorSpec` as input, and should output `TensorSpec` objects for the\n observation and mask.\n accepts_per_arm_features: (bool) Whether the policy accepts per-arm\n features.\n emit_policy_info: (tuple of strings) what side information we want to get\n as part of the policy info. Allowed values can be found in\n `policy_utilities.PolicyInfo`.\n name: The name of this policy. All variables in this module will fall\n under that name. Defaults to the class name.\n\n Raises:\n NotImplementedError: If `action_spec` contains more than one\n `BoundedTensorSpec` or the `BoundedTensorSpec` is not valid.\n \"\"\"\n flat_action_spec = tf.nest.flatten(action_spec)\n if len(flat_action_spec) > 1:\n raise NotImplementedError(\n 'action_spec can only contain a single BoundedTensorSpec.')\n\n action_spec = flat_action_spec[0]\n if (not tensor_spec.is_bounded(action_spec) or\n not tensor_spec.is_discrete(action_spec) or\n action_spec.shape.rank > 1 or\n action_spec.shape.num_elements() != 1):\n raise NotImplementedError(\n 'action_spec must be a BoundedTensorSpec of type int32 and shape (). '\n 'Found {}.'.format(action_spec))\n self._expected_num_actions = action_spec.maximum - action_spec.minimum + 1\n self._action_offset = action_spec.minimum\n reward_network.create_variables()\n self._reward_network = reward_network\n\n self._emit_policy_info = emit_policy_info\n predicted_rewards_mean = ()\n if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN in emit_policy_info:\n predicted_rewards_mean = tensor_spec.TensorSpec(\n [self._expected_num_actions])\n bandit_policy_type = ()\n if policy_utilities.InfoFields.BANDIT_POLICY_TYPE in emit_policy_info:\n bandit_policy_type = (\n policy_utilities.create_bandit_policy_type_tensor_spec(shape=[1]))\n if accepts_per_arm_features:\n # The features for the chosen arm is saved to policy_info.\n arm_spec = time_step_spec.observation[\n bandit_spec_utils.PER_ARM_FEATURE_KEY]\n chosen_arm_features_info = tensor_spec.remove_outer_dims_nest(arm_spec, 1)\n info_spec = policy_utilities.PerArmPolicyInfo(\n predicted_rewards_mean=predicted_rewards_mean,\n bandit_policy_type=bandit_policy_type,\n chosen_arm_features=chosen_arm_features_info)\n else:\n info_spec = policy_utilities.PolicyInfo(\n predicted_rewards_mean=predicted_rewards_mean,\n bandit_policy_type=bandit_policy_type)\n\n self._accepts_per_arm_features = accepts_per_arm_features\n\n super(GreedyRewardPredictionPolicy, self).__init__(\n time_step_spec, action_spec,\n policy_state_spec=reward_network.state_spec,\n clip=False,\n info_spec=info_spec,\n observation_and_action_constraint_splitter=(\n observation_and_action_constraint_splitter),\n name=name)\n\n @property\n def accepts_per_arm_features(self):\n return self._accepts_per_arm_features\n\n def _variables(self):\n return self._reward_network.variables\n\n def _distribution(self, time_step, policy_state):\n observation = time_step.observation\n observation_and_action_constraint_splitter = (\n self.observation_and_action_constraint_splitter)\n if observation_and_action_constraint_splitter is not None:\n observation, mask = observation_and_action_constraint_splitter(\n observation)\n\n predictions, policy_state = self._reward_network(\n observation, time_step.step_type, policy_state)\n batch_size = tf.shape(predictions)[0]\n\n if isinstance(self._reward_network,\n heteroscedastic_q_network.HeteroscedasticQNetwork):\n predicted_reward_values = predictions.q_value_logits\n else:\n predicted_reward_values = predictions\n\n predicted_reward_values.shape.with_rank_at_least(2)\n predicted_reward_values.shape.with_rank_at_most(3)\n if predicted_reward_values.shape[-1] != self._expected_num_actions:\n raise ValueError(\n 'The number of actions ({}) does not match the reward_network output'\n ' size ({}).'.format(self._expected_num_actions,\n predicted_reward_values.shape[1]))\n if observation_and_action_constraint_splitter is not None:\n actions = policy_utilities.masked_argmax(\n predicted_reward_values, mask, output_type=self.action_spec.dtype)\n else:\n actions = tf.argmax(\n predicted_reward_values, axis=-1, output_type=self.action_spec.dtype)\n actions += self._action_offset\n\n bandit_policy_values = tf.fill([batch_size, 1],\n policy_utilities.BanditPolicyType.GREEDY)\n\n if self._accepts_per_arm_features:\n # Saving the features for the chosen action to the policy_info.\n def gather_observation(obs):\n return tf.gather(params=obs, indices=actions, batch_dims=1)\n\n chosen_arm_features = tf.nest.map_structure(\n gather_observation,\n observation[bandit_spec_utils.PER_ARM_FEATURE_KEY])\n policy_info = policy_utilities.PerArmPolicyInfo(\n predicted_rewards_mean=(\n predicted_reward_values if policy_utilities.InfoFields\n .PREDICTED_REWARDS_MEAN in self._emit_policy_info else ()),\n bandit_policy_type=(bandit_policy_values\n if policy_utilities.InfoFields.BANDIT_POLICY_TYPE\n in self._emit_policy_info else ()),\n chosen_arm_features=chosen_arm_features)\n else:\n policy_info = policy_utilities.PolicyInfo(\n predicted_rewards_mean=(\n predicted_reward_values if policy_utilities.InfoFields\n .PREDICTED_REWARDS_MEAN in self._emit_policy_info else ()),\n bandit_policy_type=(bandit_policy_values\n if policy_utilities.InfoFields.BANDIT_POLICY_TYPE\n in self._emit_policy_info else ()))\n\n return policy_step.PolicyStep(\n tfp.distributions.Deterministic(loc=actions), policy_state, policy_info)\n" ]
[ [ "tensorflow.fill", "tensorflow.shape", "tensorflow.argmax", "tensorflow.gather", "tensorflow.nest.flatten", "tensorflow.nest.map_structure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ZibinGuo/Paddle
[ "6e0892312de5e4ba76d980ff0e4322ac55ca0d07", "6e0892312de5e4ba76d980ff0e4322ac55ca0d07", "6e0892312de5e4ba76d980ff0e4322ac55ca0d07", "6e0892312de5e4ba76d980ff0e4322ac55ca0d07" ]
[ "python/paddle/nn/layer/rnn.py", "python/paddle/fluid/tests/unittests/test_diag_v2.py", "python/paddle/fluid/tests/unittests/xpu/test_prior_box_op_xpu.py", "python/paddle/fluid/tests/unittests/test_trunc_op.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport collections\nimport itertools\nimport six\nimport math\nimport sys\nimport warnings\nfrom functools import partial, reduce\n\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle import framework\nfrom paddle.device import get_device, get_cudnn_version\nfrom paddle.nn import functional as F\nfrom paddle.nn import initializer as I\nfrom paddle.nn import Layer, LayerList\nfrom paddle.fluid.layers import utils\nfrom paddle.fluid.layers.utils import map_structure, flatten, pack_sequence_as\nfrom paddle.fluid.data_feeder import convert_dtype\nfrom paddle import _C_ops\n__all__ = []\n\n\ndef split_states(states, bidirectional=False, state_components=1):\n r\"\"\"\n Split states of RNN network into possibly nested list or tuple of\n states of each RNN cells of the RNN network.\n\n Parameters:\n states (Tensor|tuple|list): the concatenated states for RNN network.\n When `state_components` is 1, states in a Tensor with shape\n `(L*D, N, C)` where `L` is the number of layers of the RNN \n network, `D` is the number of directions of the RNN network(1 \n for unidirectional RNNs and 2 for bidirectional RNNs), `N` is \n the batch size of the input to the RNN network, `C` is the \n hidden size of the RNN network. \n\n When `state_components` is larger than 1, `states` is a tuple of \n `state_components` Tensors that meet the requirements described \n above. \n \n For SimpleRNNs and GRUs, `state_components` is 1, and for LSTMs, \n `state_components` is 2.\n bidirectional (bool): whether the state is of a bidirectional RNN \n network. Defaults to False.\n state_components (int): the number of the components of the states. see\n `states` above. Defaults to 1.\n \n Returns:\n A nested list or tuple of RNN cell states. \n If `bidirectional` is True, it can be indexed twice to get an RNN \n cell state. The first index indicates the layer, the second index \n indicates the direction.\n If `bidirectional` is False, it can be indexed once to get an RNN\n cell state. The index indicates the layer.\n Note that if `state_components` is larger than 1, an RNN cell state\n can be indexed one more time to get a tensor of shape(N, C), where \n `N` is the batch size of the input to the RNN cell, and `C` is the\n hidden size of the RNN cell.\n \"\"\"\n if state_components == 1:\n states = paddle.unstack(states)\n if not bidirectional:\n return states\n else:\n return list(zip(states[::2], states[1::2]))\n else:\n assert len(states) == state_components\n states = tuple([paddle.unstack(item) for item in states])\n if not bidirectional:\n return list(zip(*states))\n else:\n states = list(zip(*states))\n return list(zip(states[::2], states[1::2]))\n\n\ndef concat_states(states, bidirectional=False, state_components=1):\n r\"\"\"\n Concatenate a possibly nested list or tuple of RNN cell states into a \n compact form.\n\n Parameters:\n states (list|tuple): a possibly nested list or tuple of RNN cell \n states. \n If `bidirectional` is True, it can be indexed twice to get an \n RNN cell state. The first index indicates the layer, the second \n index indicates the direction.\n If `bidirectional` is False, it can be indexed once to get an RNN\n cell state. The index indicates the layer.\n Note that if `state_components` is larger than 1, an RNN cell \n state can be indexed one more time to get a tensor of shape(N, C), \n where `N` is the batch size of the input to the RNN cell, and \n `C` is the hidden size of the RNN cell. \n bidirectional (bool): whether the state is of a bidirectional RNN \n network. Defaults to False.\n state_components (int): the number of the components of the states. see\n `states` above. Defaults to 1.\n \n Returns:\n Concatenated states for RNN network.\n When `state_components` is 1, states in a Tensor with shape\n `(L\\*D, N, C)` where `L` is the number of layers of the RNN \n network, `D` is the number of directions of the RNN network(1 for \n unidirectional RNNs and 2 for bidirectional RNNs), `N` is the batch \n size of the input to the RNN network, `C` is the hidden size of the \n RNN network.\n \n \"\"\"\n if state_components == 1:\n return paddle.stack(flatten(states))\n else:\n states = flatten(states)\n componnets = []\n for i in range(state_components):\n componnets.append(states[i::state_components])\n return tuple([paddle.stack(item) for item in componnets])\n\n\nclass RNNCellBase(Layer):\n r\"\"\"\n RNNCellBase is the base class for abstraction representing the calculations\n mapping the input and state to the output and new state. It is suitable to\n and mostly used in RNN.\n \"\"\"\n\n def get_initial_states(self,\n batch_ref,\n shape=None,\n dtype=None,\n init_value=0.,\n batch_dim_idx=0):\n r\"\"\"\n Generate initialized states according to provided shape, data type and\n value.\n\n Parameters:\n batch_ref (Tensor): A tensor, which shape would be used to \n determine the batch size, which is used to generate initial \n states. For `batch_ref`'s shape d, `d[batch_dim_idx]` is \n treated as batch size.\n shape (list|tuple, optional): A (possibly nested structure of) shape[s], \n where a shape is a list/tuple of integer. `-1` (for batch size) \n will be automatically prepended if a shape does not starts with \n it. If None, property `state_shape` will be used. Defaults to \n None.\n dtype (str|list|tuple, optional): A (possibly nested structure of) \n data type[s]. The structure must be same as that of `shape`, \n except when all tensors' in states has the same data type, a \n single data type can be used. If None and property `cell.state_shape` \n is not available, current default floating type of paddle is \n used. Defaults to None.\n init_value (float, optional): A float value used to initialize states. \n Defaults to 0.\n batch_dim_idx (int, optional): An integer indicating which \n dimension of the of `batch_ref` represents batch. Defaults to 0.\n \n Returns:\n init_states (Tensor|tuple|list): tensor of the provided shape and \n dtype, or list of tensors that each satisfies the requirements,\n packed in the same structure as `shape` and `type` does.\n \"\"\"\n # TODO: use inputs and batch_size\n batch_ref = flatten(batch_ref)[0]\n\n def _is_shape_sequence(seq):\n if sys.version_info < (3, ):\n integer_types = (\n int,\n long, )\n else:\n integer_types = (int, )\n \"\"\"For shape, list/tuple of integer is the finest-grained objection\"\"\"\n if (isinstance(seq, list) or isinstance(seq, tuple)):\n if reduce(lambda flag, x: isinstance(x, integer_types) and flag,\n seq, True):\n return False\n # TODO: Add check for the illegal\n if isinstance(seq, dict):\n return True\n return (isinstance(seq, collections.Sequence) and\n not isinstance(seq, six.string_types))\n\n class Shape(object):\n def __init__(self, shape):\n self.shape = shape if shape[0] == -1 else ([-1] + list(shape))\n\n # nested structure of shapes\n states_shapes = self.state_shape if shape is None else shape\n is_sequence_ori = utils.is_sequence\n utils.is_sequence = _is_shape_sequence\n states_shapes = map_structure(lambda shape: Shape(shape), states_shapes)\n utils.is_sequence = is_sequence_ori\n\n # nested structure of dtypes\n try:\n states_dtypes = self.state_dtype if dtype is None else dtype\n except NotImplementedError:\n states_dtypes = framework.get_default_dtype()\n if len(flatten(states_dtypes)) == 1:\n dtype = flatten(states_dtypes)[0]\n states_dtypes = map_structure(lambda shape: dtype, states_shapes)\n\n init_states = map_structure(\n lambda shape, dtype: paddle.fluid.layers.fill_constant_batch_size_like(\n input=batch_ref,\n shape=shape.shape,\n dtype=dtype,\n value=init_value,\n input_dim_idx=batch_dim_idx), states_shapes, states_dtypes)\n return init_states\n\n @property\n def state_shape(self):\n r\"\"\"\n Abstract method (property).\n Used to initialize states.\n A (possiblely nested structure of) shape[s], where a shape is a \n list/tuple of integers (-1 for batch size would be automatically\n inserted into a shape if shape is not started with it).\n Not necessary to be implemented if states are not initialized by\n `get_initial_states` or the `shape` argument is provided when using\n `get_initial_states`.\n \"\"\"\n raise NotImplementedError(\n \"Please add implementaion for `state_shape` in the used cell.\")\n\n @property\n def state_dtype(self):\n r\"\"\"\n Abstract method (property).\n Used to initialize states.\n A (possiblely nested structure of) data types[s]. The structure must be\n same as that of `shape`, except when all tensors' in states has the same\n data type, a signle data type can be used.\n Not necessary to be implemented if states are not initialized\n by `get_initial_states` or the `dtype` argument is provided when using\n `get_initial_states`.\n \"\"\"\n raise NotImplementedError(\n \"Please add implementaion for `state_dtype` in the used cell.\")\n\n\nclass SimpleRNNCell(RNNCellBase):\n r\"\"\"\n Elman RNN (SimpleRNN) cell. Given the inputs and previous states, it \n computes the outputs and updates states.\n\n The formula used is as follows:\n\n .. math::\n h_{t} & = act(W_{ih}x_{t} + b_{ih} + W_{hh}h_{t-1} + b_{hh})\n\n y_{t} & = h_{t}\n \n where :math:`act` is for :attr:`activation`.\n\n Please refer to `Finding Structure in Time \n <https://crl.ucsd.edu/~elman/Papers/fsit.pdf>`_ for more details.\n \n Parameters:\n input_size (int): The input size.\n hidden_size (int): The hidden size.\n activation (str, optional): The activation in the SimpleRNN cell. \n It can be `tanh` or `relu`. Defaults to `tanh`.\n weight_ih_attr (ParamAttr, optional): The parameter attribute for \n :math:`weight_ih`. Default: None.\n weight_hh_attr(ParamAttr, optional): The parameter attribute for \n :math:`weight_hh`. Default: None.\n bias_ih_attr (ParamAttr, optional): The parameter attribute for the \n :math:`bias_ih`. Default: None.\n bias_hh_attr (ParamAttr, optional): The parameter attribute for the \n :math:`bias_hh`. Default: None.\n name (str, optional): Name for the operation (optional, default is \n None). For more information, please refer to :ref:`api_guide_Name`.\n\n Variables:\n - **weight_ih** (Parameter): shape (hidden_size, input_size), input to hidden weight, corresponding to :math:`W_{ih}` in the formula.\n - **weight_hh** (Parameter): shape (hidden_size, hidden_size), hidden to hidden weight, corresponding to :math:`W_{hh}` in the formula.\n - **bias_ih** (Parameter): shape (hidden_size, ), input to hidden bias, corresponding to :math:`b_{ih}` in the formula.\n - **bias_hh** (Parameter): shape (hidden_size, ), hidden to hidden bias, corresponding to :math:`b_{hh}` in the formula.\n \n Inputs:\n - **inputs** (Tensor): shape `[batch_size, input_size]`, the input, corresponding to :math:`x_{t}` in the formula.\n - **states** (Tensor, optional): shape `[batch_size, hidden_size]`, the previous hidden state, corresponding to :math:`h_{t-1}` in the formula. When states is None, zero state is used. Defaults to None.\n\n Returns:\n - **outputs** (Tensor): shape `[batch_size, hidden_size]`, the output, corresponding to :math:`h_{t}` in the formula.\n - **states** (Tensor): shape `[batch_size, hidden_size]`, the new hidden state, corresponding to :math:`h_{t}` in the formula.\n \n Notes:\n All the weights and bias are initialized with `Uniform(-std, std)` by default. Where std = :math:`\\frac{1}{\\sqrt{hidden\\_size}}`. For more information about parameter initialization, please refer to :ref:`api_fluid_ParamAttr`.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n x = paddle.randn((4, 16))\n prev_h = paddle.randn((4, 32))\n\n cell = paddle.nn.SimpleRNNCell(16, 32)\n y, h = cell(x, prev_h)\n print(y.shape)\n\n #[4,32]\n\n \"\"\"\n\n def __init__(self,\n input_size,\n hidden_size,\n activation=\"tanh\",\n weight_ih_attr=None,\n weight_hh_attr=None,\n bias_ih_attr=None,\n bias_hh_attr=None,\n name=None):\n super(SimpleRNNCell, self).__init__()\n if hidden_size <= 0:\n raise ValueError(\n \"hidden_size of {} must be greater than 0, but now equals to {}\".\n format(self.__class__.__name__, hidden_size))\n std = 1.0 / math.sqrt(hidden_size)\n self.weight_ih = self.create_parameter(\n (hidden_size, input_size),\n weight_ih_attr,\n default_initializer=I.Uniform(-std, std))\n self.weight_hh = self.create_parameter(\n (hidden_size, hidden_size),\n weight_hh_attr,\n default_initializer=I.Uniform(-std, std))\n self.bias_ih = self.create_parameter(\n (hidden_size, ),\n bias_ih_attr,\n is_bias=True,\n default_initializer=I.Uniform(-std, std))\n self.bias_hh = self.create_parameter(\n (hidden_size, ),\n bias_hh_attr,\n is_bias=True,\n default_initializer=I.Uniform(-std, std))\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n if activation not in [\"tanh\", \"relu\"]:\n raise ValueError(\n \"activation for SimpleRNNCell should be tanh or relu, \"\n \"but get {}\".format(activation))\n self.activation = activation\n self._activation_fn = paddle.tanh \\\n if activation == \"tanh\" \\\n else F.relu\n\n def forward(self, inputs, states=None):\n if states is None:\n states = self.get_initial_states(inputs, self.state_shape)\n pre_h = states\n i2h = paddle.matmul(inputs, self.weight_ih, transpose_y=True)\n if self.bias_ih is not None:\n i2h += self.bias_ih\n h2h = paddle.matmul(pre_h, self.weight_hh, transpose_y=True)\n if self.bias_hh is not None:\n h2h += self.bias_hh\n h = self._activation_fn(i2h + h2h)\n return h, h\n\n @property\n def state_shape(self):\n return (self.hidden_size, )\n\n def extra_repr(self):\n s = '{input_size}, {hidden_size}'\n if self.activation is not \"tanh\":\n s += ', activation={activation}'\n return s.format(**self.__dict__)\n\n\nclass LSTMCell(RNNCellBase):\n r\"\"\"\n Long-Short Term Memory(LSTM) RNN cell. Given the inputs and previous states, \n it computes the outputs and updates states.\n\n The formula used is as follows:\n\n .. math::\n i_{t} & = \\sigma(W_{ii}x_{t} + b_{ii} + W_{hi}h_{t-1} + b_{hi})\n\n f_{t} & = \\sigma(W_{if}x_{t} + b_{if} + W_{hf}h_{t-1} + b_{hf})\n\n o_{t} & = \\sigma(W_{io}x_{t} + b_{io} + W_{ho}h_{t-1} + b_{ho})\n\n \\widetilde{c}_{t} & = \\tanh (W_{ig}x_{t} + b_{ig} + W_{hg}h_{t-1} + b_{hg})\n\n c_{t} & = f_{t} * c_{t-1} + i_{t} * \\widetilde{c}_{t}\n\n h_{t} & = o_{t} * \\tanh(c_{t})\n\n y_{t} & = h_{t}\n\n where :math:`\\sigma` is the sigmoid fucntion, and * is the elemetwise \n multiplication operator.\n\n Please refer to `An Empirical Exploration of Recurrent Network Architectures\n <http://proceedings.mlr.press/v37/jozefowicz15.pdf>`_ for more details.\n\n Parameters:\n input_size (int): The input size.\n hidden_size (int): The hidden size.\n weight_ih_attr(ParamAttr, optional): The parameter attribute for \n `weight_ih`. Default: None.\n weight_hh_attr(ParamAttr, optional): The parameter attribute for \n `weight_hh`. Default: None.\n bias_ih_attr (ParamAttr, optional): The parameter attribute for the \n `bias_ih`. Default: None.\n bias_hh_attr (ParamAttr, optional): The parameter attribute for the \n `bias_hh`. Default: None.\n name (str, optional): Name for the operation (optional, default is \n None). For more information, please refer to :ref:`api_guide_Name`.\n\n Variables:\n - **weight_ih** (Parameter): shape (4 * hidden_size, input_size), input to hidden weight, which corresponds to the concatenation of :math:`W_{ii}, W_{if}, W_{ig}, W_{io}` in the formula.\n - **weight_hh** (Parameter): shape (4 * hidden_size, hidden_size), hidden to hidden weight, which corresponds to the concatenation of :math:`W_{hi}, W_{hf}, W_{hg}, W_{ho}` in the formula.\n - **bias_ih** (Parameter): shape (4 * hidden_size, ), input to hidden bias, which corresponds to the concatenation of :math:`b_{ii}, b_{if}, b_{ig}, b_{io}` in the formula.\n - **bias_hh** (Parameter): shape (4 * hidden_size, ), hidden to hidden bias, swhich corresponds to the concatenation of :math:`b_{hi}, b_{hf}, b_{hg}, b_{ho}` in the formula.\n\n Inputs:\n - **inputs** (Tensor): shape `[batch_size, input_size]`, the input, corresponding to :math:`x_t` in the formula.\n - **states** (list|tuple, optional): a list/tuple of two tensors, each of shape `[batch_size, hidden_size]`, the previous hidden state, corresponding to :math:`h_{t-1}, c_{t-1}` in the formula. When states is None, zero state is used. Defaults to None.\n\n Returns:\n - **outputs** (Tensor): shape `[batch_size, hidden_size]`, the output, corresponding to :math:`h_{t}` in the formula.\n - **states** (tuple): a tuple of two tensors, each of shape `[batch_size, hidden_size]`, the new hidden states, corresponding to :math:`h_{t}, c_{t}` in the formula.\n\n Notes:\n All the weights and bias are initialized with `Uniform(-std, std)` by \n default. Where std = :math:`\\frac{1}{\\sqrt{hidden\\_size}}`. For more \n information about parameter initialization, please refer to :ref:`api_fluid_ParamAttr`.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n x = paddle.randn((4, 16))\n prev_h = paddle.randn((4, 32))\n prev_c = paddle.randn((4, 32))\n\n cell = paddle.nn.LSTMCell(16, 32)\n y, (h, c) = cell(x, (prev_h, prev_c))\n\n print(y.shape)\n print(h.shape)\n print(c.shape)\n\n #[4,32]\n #[4,32]\n #[4,32]\n\n \"\"\"\n\n def __init__(self,\n input_size,\n hidden_size,\n weight_ih_attr=None,\n weight_hh_attr=None,\n bias_ih_attr=None,\n bias_hh_attr=None,\n name=None):\n super(LSTMCell, self).__init__()\n if hidden_size <= 0:\n raise ValueError(\n \"hidden_size of {} must be greater than 0, but now equals to {}\".\n format(self.__class__.__name__, hidden_size))\n std = 1.0 / math.sqrt(hidden_size)\n self.weight_ih = self.create_parameter(\n (4 * hidden_size, input_size),\n weight_ih_attr,\n default_initializer=I.Uniform(-std, std))\n self.weight_hh = self.create_parameter(\n (4 * hidden_size, hidden_size),\n weight_hh_attr,\n default_initializer=I.Uniform(-std, std))\n self.bias_ih = self.create_parameter(\n (4 * hidden_size, ),\n bias_ih_attr,\n is_bias=True,\n default_initializer=I.Uniform(-std, std))\n self.bias_hh = self.create_parameter(\n (4 * hidden_size, ),\n bias_hh_attr,\n is_bias=True,\n default_initializer=I.Uniform(-std, std))\n\n self.hidden_size = hidden_size\n self.input_size = input_size\n self._gate_activation = F.sigmoid\n self._activation = paddle.tanh\n\n def forward(self, inputs, states=None):\n if states is None:\n states = self.get_initial_states(inputs, self.state_shape)\n pre_hidden, pre_cell = states\n gates = paddle.matmul(inputs, self.weight_ih, transpose_y=True)\n if self.bias_ih is not None:\n gates = gates + self.bias_ih\n gates += paddle.matmul(pre_hidden, self.weight_hh, transpose_y=True)\n if self.bias_hh is not None:\n gates = gates + self.bias_hh\n\n chunked_gates = paddle.split(gates, num_or_sections=4, axis=-1)\n\n i = self._gate_activation(chunked_gates[0])\n f = self._gate_activation(chunked_gates[1])\n o = self._gate_activation(chunked_gates[3])\n c = f * pre_cell + i * self._activation(chunked_gates[2])\n h = o * self._activation(c)\n\n return h, (h, c)\n\n @property\n def state_shape(self):\n r\"\"\"\n The `state_shape` of LSTMCell is a tuple with two shapes: \n `((hidden_size, ), (hidden_size,))`. (-1 for batch size would be \n automatically inserted into shape). These two shapes correspond \n to :math:`h_{t-1}` and :math:`c_{t-1}` separately.\n \"\"\"\n return ((self.hidden_size, ), (self.hidden_size, ))\n\n def extra_repr(self):\n return '{input_size}, {hidden_size}'.format(**self.__dict__)\n\n\nclass GRUCell(RNNCellBase):\n r\"\"\"\n Gated Recurrent Unit (GRU) RNN cell. Given the inputs and previous states, \n it computes the outputs and updates states.\n\n The formula for GRU used is as follows:\n\n .. math::\n\n r_{t} & = \\sigma(W_{ir}x_{t} + b_{ir} + W_{hr}h_{t-1} + b_{hr})\n\n z_{t} & = \\sigma(W_{iz}x_{t} + b_{iz} + W_{hz}h_{t-1} + b_{hz})\n\n \\widetilde{h}_{t} & = \\tanh(W_{ic}x_{t} + b_{ic} + r_{t} * (W_{hc}h_{t-1} + b_{hc}))\n\n h_{t} & = z_{t} * h_{t-1} + (1 - z_{t}) * \\widetilde{h}_{t}\n\n y_{t} & = h_{t}\n \n where :math:`\\sigma` is the sigmoid fucntion, and * is the elemetwise \n multiplication operator.\n\n Please refer to `An Empirical Exploration of Recurrent Network Architectures\n <http://proceedings.mlr.press/v37/jozefowicz15.pdf>`_ for more details.\n\n Parameters:\n input_size (int): The input size.\n hidden_size (int): The hidden size.\n weight_ih_attr(ParamAttr, optional): The parameter attribute for \n `weight_ih`. Default: None.\n weight_hh_attr(ParamAttr, optional): The parameter attribute for \n `weight_hh`. Default: None.\n bias_ih_attr (ParamAttr, optional): The parameter attribute for the \n `bias_ih`. Default: None.\n bias_hh_attr (ParamAttr, optional): The parameter attribute for the \n `bias_hh`. Default: None.\n name (str, optional): Name for the operation (optional, default is \n None). For more information, please refer to :ref:`api_guide_Name`.\n\n Variables:\n - **weight_ih** (Parameter): shape (3 * hidden_size, input_size), input to hidden weight, which corresponds to the concatenation of :math:`W_{ir}, W_{iz}, W_{ic}` in the formula.\n - **weight_hh** (Parameter): shape (3 * hidden_size, hidden_size), hidden to hidden weight, which corresponds to the concatenation of :math:`W_{hr}, W_{hz}, W_{hc}` in the formula.\n - **bias_ih** (Parameter): shape (3 * hidden_size, ), input to hidden bias, which corresponds to the concatenation of :math:`b_{ir}, b_{iz}, b_{ic}` in the formula.\n - **bias_hh** (Parameter): shape (3 * hidden_size, ), hidden to hidden bias, swhich corresponds to the concatenation of :math:`b_{hr}, b_{hz}, b_{hc}` in the formula.\n\n Inputs:\n - **inputs** (Tensor): A tensor with shape `[batch_size, input_size]`, corresponding to :math:`x_t` in the formula.\n - **states** (Tensor): A tensor with shape `[batch_size, hidden_size]`, corresponding to :math:`h_{t-1}` in the formula.\n\n Returns:\n - **outputs** (Tensor): shape `[batch_size, hidden_size]`, the output, corresponding to :math:`h_{t}` in the formula.\n - **states** (Tensor): shape `[batch_size, hidden_size]`, the new hidden state, corresponding to :math:`h_{t}` in the formula.\n \n Notes:\n All the weights and bias are initialized with `Uniform(-std, std)` by \n default. Where std = :math:`\\frac{1}{\\sqrt{hidden\\_size}}`. For more \n information about parameter initialization, please refer to s:ref:`api_fluid_ParamAttr`.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n x = paddle.randn((4, 16))\n prev_h = paddle.randn((4, 32))\n\n cell = paddle.nn.GRUCell(16, 32)\n y, h = cell(x, prev_h)\n\n print(y.shape)\n print(h.shape)\n\n #[4,32]\n #[4,32]\n\n \"\"\"\n\n def __init__(self,\n input_size,\n hidden_size,\n weight_ih_attr=None,\n weight_hh_attr=None,\n bias_ih_attr=None,\n bias_hh_attr=None,\n name=None):\n super(GRUCell, self).__init__()\n if hidden_size <= 0:\n raise ValueError(\n \"hidden_size of {} must be greater than 0, but now equals to {}\".\n format(self.__class__.__name__, hidden_size))\n std = 1.0 / math.sqrt(hidden_size)\n self.weight_ih = self.create_parameter(\n (3 * hidden_size, input_size),\n weight_ih_attr,\n default_initializer=I.Uniform(-std, std))\n self.weight_hh = self.create_parameter(\n (3 * hidden_size, hidden_size),\n weight_hh_attr,\n default_initializer=I.Uniform(-std, std))\n self.bias_ih = self.create_parameter(\n (3 * hidden_size, ),\n bias_ih_attr,\n is_bias=True,\n default_initializer=I.Uniform(-std, std))\n self.bias_hh = self.create_parameter(\n (3 * hidden_size, ),\n bias_hh_attr,\n is_bias=True,\n default_initializer=I.Uniform(-std, std))\n\n self.hidden_size = hidden_size\n self.input_size = input_size\n self._gate_activation = F.sigmoid\n self._activation = paddle.tanh\n\n def forward(self, inputs, states=None):\n if states is None:\n states = self.get_initial_states(inputs, self.state_shape)\n\n pre_hidden = states\n x_gates = paddle.matmul(inputs, self.weight_ih, transpose_y=True)\n if self.bias_ih is not None:\n x_gates = x_gates + self.bias_ih\n h_gates = paddle.matmul(pre_hidden, self.weight_hh, transpose_y=True)\n if self.bias_hh is not None:\n h_gates = h_gates + self.bias_hh\n\n x_r, x_z, x_c = paddle.split(x_gates, num_or_sections=3, axis=1)\n h_r, h_z, h_c = paddle.split(h_gates, num_or_sections=3, axis=1)\n\n r = self._gate_activation(x_r + h_r)\n z = self._gate_activation(x_z + h_z)\n c = self._activation(x_c + r * h_c) # apply reset gate after mm\n h = (pre_hidden - c) * z + c\n\n return h, h\n\n @property\n def state_shape(self):\n r\"\"\"\n The `state_shape` of GRUCell is a shape `[hidden_size]` (-1 for batch\n size would be automatically inserted into shape). The shape corresponds\n to the shape of :math:`h_{t-1}`.\n \"\"\"\n return (self.hidden_size, )\n\n def extra_repr(self):\n return '{input_size}, {hidden_size}'.format(**self.__dict__)\n\n\nclass RNN(Layer):\n r\"\"\"\n Wrapper for RNN, which creates a recurrent neural network with an RNN cell. \n It performs :code:`cell.forward()` repeatedly until reaches to the maximum \n length of `inputs`.\n\n Parameters:\n cell(RNNCellBase): An instance of `RNNCellBase`.\n is_reverse (bool, optional): Indicate whether to calculate in the reverse\n order of input sequences. Defaults to False.\n time_major (bool): Whether the first dimension of the input means the\n time steps. Defaults to False.\n\n Inputs:\n - **inputs** (Tensor): A (possibly nested structure of) tensor[s]. The input sequences. If time major is False, the shape is `[batch_size, time_steps, input_size]`. If time major is True, the shape is `[time_steps, batch_size, input_size]` where `input_size` is the input size of the cell.\n - **initial_states** (Tensor|list|tuple, optional): Tensor of a possibly nested structure of tensors, representing the initial state for the rnn cell. If not provided, `cell.get_initial_states` would be called to produce the initial states. Defaults to None.\n - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None.If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whose time step index are not less than the valid length are treated as paddings.\n - **kwargs**: Additional keyword arguments to pass to `forward` of the cell. \n\n Returns:\n - **outputs** (Tensor|list|tuple): the output sequences. If `time_major` is True, the shape is `[time_steps, batch_size, hidden_size]`, else `[batch_size, time_steps, hidden_size]`.\n - **final_states** (Tensor|list|tuple): final states of the cell. Tensor or a possibly nested structure of tensors which has the same structure with intial state. Each tensor in final states has the same shape and dtype as the corresponding tensor in initial states.\n \n Notes:\n This class is a low level API for wrapping rnn cell into a RNN network.\n Users should take care of the state of the cell. If `initial_states` is \n passed to the `forward` method, make sure that it satisfies the \n requirements of the cell.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n inputs = paddle.rand((4, 23, 16))\n prev_h = paddle.randn((4, 32))\n\n cell = paddle.nn.SimpleRNNCell(16, 32)\n rnn = paddle.nn.RNN(cell)\n outputs, final_states = rnn(inputs, prev_h)\n\n print(outputs.shape)\n print(final_states.shape)\n\n #[4,23,32]\n #[4,32]\n\n \"\"\"\n\n def __init__(self, cell, is_reverse=False, time_major=False):\n super(RNN, self).__init__()\n self.cell = cell\n if not hasattr(self.cell, \"call\"):\n # for non-dygraph mode, `rnn` api uses cell.call\n self.cell.call = self.cell.forward\n self.is_reverse = is_reverse\n self.time_major = time_major\n\n def forward(self,\n inputs,\n initial_states=None,\n sequence_length=None,\n **kwargs):\n final_outputs, final_states = paddle.fluid.layers.rnn(\n self.cell,\n inputs,\n initial_states=initial_states,\n sequence_length=sequence_length,\n time_major=self.time_major,\n is_reverse=self.is_reverse,\n **kwargs)\n return final_outputs, final_states\n\n\nclass BiRNN(Layer):\n r\"\"\"\n Wrapper for bidirectional RNN, which builds a bidiretional RNN given the \n forward rnn cell and backward rnn cell. A BiRNN applies forward RNN and \n backward RNN with coresponding cells separately and concats the outputs \n along the last axis.\n\n Parameters:\n cell_fw (RNNCellBase): A RNNCellBase instance used for forward RNN.\n cell_bw (RNNCellBase): A RNNCellBase instance used for backward RNN.\n time_major (bool): Whether the first dimension of the input means the\n time steps. Defaults to False.\n\n Inputs:\n - **inputs** (Tensor): the input sequences of both RNN. If time_major is True, the shape of is `[time_steps, batch_size, input_size]`, else the shape is `[batch_size, time_steps, input_size]`, where input_size is the input size of both cells.\n - **initial_states** (list|tuple, optional): A tuple/list of the initial states of the forward cell and backward cell. Defaults to None. If not provided, `cell.get_initial_states` would be called to produce the initial states for each cell. Defaults to None.\n - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None. If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whose time step index are not less than the valid length are treated as paddings.\n - **kwargs**: Additional keyword arguments. Arguments passed to `forward` for each cell.\n\n Outputs:\n - **outputs** (Tensor): the outputs of the bidirectional RNN. It is the concatenation of the outputs from the forward RNN and backward RNN along the last axis. If time major is True, the shape is `[time_steps, batch_size, size]`, else the shape is `[batch_size, time_steps, size]`, where size is `cell_fw.hidden_size + cell_bw.hidden_size`.\n - **final_states** (tuple): A tuple of the final states of the forward cell and backward cell. \n\n Notes:\n This class is a low level API for wrapping rnn cells into a BiRNN \n network. Users should take care of the states of the cells. \n If `initial_states` is passed to the `forward` method, make sure that \n it satisfies the requirements of the cells.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n cell_fw = paddle.nn.LSTMCell(16, 32)\n cell_bw = paddle.nn.LSTMCell(16, 32)\n rnn = paddle.nn.BiRNN(cell_fw, cell_bw)\n\n inputs = paddle.rand((2, 23, 16))\n outputs, final_states = rnn(inputs)\n\n print(outputs.shape)\n print(final_states[0][0].shape,len(final_states),len(final_states[0]))\n\n #[4,23,64]\n #[2,32] 2 2\n\n \"\"\"\n\n def __init__(self, cell_fw, cell_bw, time_major=False):\n super(BiRNN, self).__init__()\n self.cell_fw = cell_fw\n self.cell_bw = cell_bw\n if cell_fw.input_size != cell_bw.input_size:\n raise ValueError(\"input size of forward cell({}) does not equals\"\n \"that of backward cell({})\".format(\n cell_fw.input_size, cell_bw.input_size))\n for cell in [self.cell_fw, self.cell_bw]:\n if not hasattr(cell, \"call\"):\n # for non-dygraph mode, `rnn` api uses cell.call\n cell.call = cell.forward\n self.time_major = time_major\n\n def forward(self,\n inputs,\n initial_states=None,\n sequence_length=None,\n **kwargs):\n if isinstance(initial_states, (list, tuple)):\n assert len(initial_states) == 2, \\\n \"length of initial_states should be 2 when it is a list/tuple\"\n\n outputs, final_states = paddle.fluid.layers.birnn(\n self.cell_fw, self.cell_bw, inputs, initial_states, sequence_length,\n self.time_major, **kwargs)\n return outputs, final_states\n\n\nclass RNNBase(LayerList):\n r\"\"\"\n RNNBase class for RNN networks. It provides `forward`, `flatten_parameters`\n and other common methods for SimpleRNN, LSTM and GRU.\n \"\"\"\n\n def __init__(self,\n mode,\n input_size,\n hidden_size,\n num_layers=1,\n direction=\"forward\",\n time_major=False,\n dropout=0.,\n weight_ih_attr=None,\n weight_hh_attr=None,\n bias_ih_attr=None,\n bias_hh_attr=None):\n super(RNNBase, self).__init__()\n bidirectional_list = [\"bidirectional\", \"bidirect\"]\n self.mode = mode\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.dropout = dropout\n self.num_directions = 2 if direction in bidirectional_list else 1\n self.time_major = time_major\n self.num_layers = num_layers\n self.state_components = 2 if mode == \"LSTM\" else 1\n\n kwargs = {\n \"weight_ih_attr\": weight_ih_attr,\n \"weight_hh_attr\": weight_hh_attr,\n \"bias_ih_attr\": bias_ih_attr,\n \"bias_hh_attr\": bias_hh_attr\n }\n\n if mode == \"LSTM\":\n rnn_cls = LSTMCell\n elif mode == \"GRU\":\n rnn_cls = GRUCell\n else:\n rnn_cls = SimpleRNNCell\n kwargs[\"activation\"] = self.activation\n\n if direction in [\"forward\"]:\n is_reverse = False\n cell = rnn_cls(input_size, hidden_size, **kwargs)\n self.append(RNN(cell, is_reverse, time_major))\n for i in range(1, num_layers):\n cell = rnn_cls(hidden_size, hidden_size, **kwargs)\n self.append(RNN(cell, is_reverse, time_major))\n elif direction in bidirectional_list:\n cell_fw = rnn_cls(input_size, hidden_size, **kwargs)\n cell_bw = rnn_cls(input_size, hidden_size, **kwargs)\n self.append(BiRNN(cell_fw, cell_bw, time_major))\n for i in range(1, num_layers):\n cell_fw = rnn_cls(2 * hidden_size, hidden_size, **kwargs)\n cell_bw = rnn_cls(2 * hidden_size, hidden_size, **kwargs)\n self.append(BiRNN(cell_fw, cell_bw, time_major))\n else:\n raise ValueError(\n \"direction should be forward or bidirect (or bidirectional), \"\n \"received direction = {}\".format(direction))\n\n self.could_use_cudnn = True\n self.could_use_cudnn &= len(self.parameters()) == num_layers * 4 * (\n 2 if direction in bidirectional_list else 1)\n\n # Expose params as RNN's attribute, which can make it compatible when\n # replacing small ops composed rnn with cpp rnn kernel.\n # Moreover, `jit.to_static` assumes params are added by current layer\n # and wouldn't include sublayer's params in current layer, which also\n # requires these params are added to current layer for `jit.save`.\n param_names = []\n for layer in range(self.num_layers):\n for direction in range(self.num_directions):\n suffix = '_reverse' if direction == 1 else ''\n param_names.extend(['weight_ih_l{}{}', 'weight_hh_l{}{}'])\n if bias_ih_attr != False: param_names.append('bias_ih_l{}{}')\n if bias_hh_attr != False: param_names.append('bias_hh_l{}{}')\n param_names = [x.format(layer, suffix) for x in param_names]\n for name, param in zip(param_names, self.parameters()):\n setattr(self, name, param)\n\n self.flatten_parameters()\n\n def flatten_parameters(self):\n \"\"\"\n Resets parameter data pointer to address in continuous memory block for\n cudnn usage.\n \"\"\"\n if self.could_use_cudnn:\n # layer.parameters() is depth first and ordered\n # for i in layer: for j in direct: w_ih, w_hh, b_ih, b_hh\n # need to reorganize to cudnn param layout:\n # all bias following all weights\n params = self.parameters(include_sublayers=False)\n shape = [np.prod(param.shape) for param in params]\n self._all_weights = [None] * len(params)\n for i, param in enumerate(params):\n offset = 0 if i % 4 < 2 else (2 * self.num_layers *\n self.num_directions)\n layer_idx = i // 4\n self._all_weights[offset + layer_idx * 2 + i % 2] = param\n # Wrap using a list to avoid registed into params and saving, maybe\n # need a better way to handle this later. Use `create_parameter` to\n # add both to main_program and startup_program for static-graph.\n # Use Constant initializer to avoid make effect on random generator.\n self._flat_weight = [\n self.create_parameter(\n shape=[np.sum(shape)],\n dtype=params[0].dtype,\n default_initializer=I.Constant(0.0))\n ]\n # dropout state may also can be hided and avoid saving\n # should dropout state be persistable for static-graph\n self._dropout_state = self.create_variable(\n dtype=fluid.core.VarDesc.VarType.UINT8)\n if fluid.framework.in_dygraph_mode():\n with paddle.no_grad():\n _C_ops.coalesce_tensor(self._all_weights, self._all_weights,\n self._flat_weight[0], \"copy_data\",\n True, \"use_align\", False, \"dtype\",\n params[0].dtype)\n return\n # for static-graph, append coalesce_tensor into startup program\n with fluid.program_guard(fluid.default_startup_program(),\n fluid.default_startup_program()):\n with paddle.no_grad():\n self._helper.append_op(\n type=\"coalesce_tensor\",\n inputs={\"Input\": self._all_weights},\n outputs={\n \"Output\": self._all_weights,\n \"FusedOutput\": self._flat_weight\n },\n attrs={\n \"copy_data\": True,\n \"use_align\": False,\n \"dtype\": params[0].dtype\n })\n\n def _cudnn_impl(self, inputs, initial_states, sequence_length):\n if not self.time_major:\n inputs = paddle.tensor.transpose(inputs, [1, 0, 2])\n\n if fluid.framework.in_dygraph_mode():\n _, _, out, state = _C_ops.rnn(\n inputs, initial_states, self._all_weights, sequence_length,\n self._dropout_state, self.state_components, 'dropout_prob',\n self.dropout, 'is_bidirec', self.num_directions == 2,\n 'input_size', self.input_size, 'hidden_size', self.hidden_size,\n 'num_layers', self.num_layers, 'mode', self.mode, 'is_test',\n not self.training)\n else:\n out = self._helper.create_variable_for_type_inference(inputs.dtype)\n state = [\n self._helper.create_variable_for_type_inference(inputs.dtype)\n for i in range(self.state_components)\n ]\n reserve = self._helper.create_variable_for_type_inference(\n dtype=fluid.core.VarDesc.VarType.UINT8, stop_gradient=True)\n\n inputs = {\n 'Input': inputs,\n 'WeightList': self._all_weights,\n 'PreState': initial_states,\n 'SequenceLength': sequence_length\n }\n attrs = {\n 'dropout_prob': self.dropout,\n 'is_bidirec': self.num_directions == 2,\n 'input_size': self.input_size,\n 'hidden_size': self.hidden_size,\n 'num_layers': self.num_layers,\n 'mode': self.mode,\n 'is_test': not self.training\n }\n\n outputs = {\n 'Out': out,\n 'State': state,\n 'Reserve': reserve,\n 'DropoutState': self._dropout_state,\n }\n\n self._helper.append_op(\n type=\"rnn\", inputs=inputs, outputs=outputs, attrs=attrs)\n\n out = paddle.tensor.transpose(out,\n [1, 0, 2]) if not self.time_major else out\n return out, tuple(state) if len(state) > 1 else state[0]\n\n def forward(self, inputs, initial_states=None, sequence_length=None):\n batch_index = 1 if self.time_major else 0\n dtype = inputs.dtype\n if initial_states is None:\n state_shape = (self.num_layers * self.num_directions, -1,\n self.hidden_size)\n initial_states = tuple([\n paddle.fluid.layers.fill_constant_batch_size_like(\n inputs, state_shape, dtype, 0, batch_index, 1)\n for _ in range(self.state_components)\n ])\n else:\n initial_states = [initial_states] if isinstance(\n initial_states, paddle.static.Variable) else initial_states\n\n if self.could_use_cudnn and (\n not paddle.device.is_compiled_with_rocm() or\n sequence_length is None):\n # Add CPU kernel and dispatch in backend later\n return self._cudnn_impl(inputs, initial_states, sequence_length)\n\n states = split_states(initial_states, self.num_directions == 2,\n self.state_components)\n final_states = []\n\n for i, rnn_layer in enumerate(self):\n if i > 0:\n inputs = F.dropout(\n inputs,\n self.dropout,\n training=self.training,\n mode=\"upscale_in_train\")\n outputs, final_state = rnn_layer(inputs, states[i], sequence_length)\n final_states.append(final_state)\n inputs = outputs\n\n final_states = concat_states(final_states, self.num_directions == 2,\n self.state_components)\n return outputs, final_states\n\n def extra_repr(self):\n main_str = '{input_size}, {hidden_size}'\n if self.num_layers != 1:\n main_str += ', num_layers={num_layers}'\n if self.time_major != False:\n main_str += ', time_major={time_major}'\n if self.dropout != 0:\n main_str += ', dropout={dropout}'\n return main_str.format(**self.__dict__)\n\n\nclass SimpleRNN(RNNBase):\n r\"\"\"\n Multilayer Elman network(SimpleRNN). It takes input sequences and initial \n states as inputs, and returns the output sequences and the final states.\n\n Each layer inside the SimpleRNN maps the input sequences and initial states \n to the output sequences and final states in the following manner: at each \n step, it takes step inputs(:math:`x_{t}`) and previous \n states(:math:`h_{t-1}`) as inputs, and returns step outputs(:math:`y_{t}`)\n and new states(:math:`h_{t}`).\n\n .. math::\n\n h_{t} & = act(W_{ih}x_{t} + b_{ih} + W_{hh}h_{t-1} + b_{hh})\n\n y_{t} & = h_{t}\n \n where :math:`act` is for :attr:`activation`.\n\n Using key word arguments to construct is recommended.\n\n Parameters:\n input_size (int): The input size for the first layer's cell.\n hidden_size (int): The hidden size for each layer's cell.\n num_layers (int, optional): Number of layers. Defaults to 1.\n direction (str, optional): The direction of the network. It can be \"forward\"\n or \"bidirect\"(or \"bidirectional\"). When \"bidirect\", the way to merge\n outputs of forward and backward is concatenating. Defaults to \"forward\".\n time_major (bool, optional): Whether the first dimension of the input means the\n time steps. Defaults to False.\n dropout (float, optional): The droput probability. Dropout is applied to the \n input of each layer except for the first layer. Defaults to 0.\n activation (str, optional): The activation in each SimpleRNN cell. It can be \n `tanh` or `relu`. Defaults to `tanh`.\n weight_ih_attr (ParamAttr, optional): The parameter attribute for \n `weight_ih` of each cell. Defaults to None.\n weight_hh_attr (ParamAttr, optional): The parameter attribute for \n `weight_hh` of each cell. Defaults to None.\n bias_ih_attr (ParamAttr, optional): The parameter attribute for the \n `bias_ih` of each cells. Defaults to None.\n bias_hh_attr (ParamAttr, optional): The parameter attribute for the \n `bias_hh` of each cells. Defaults to None.\n name (str, optional): Name for the operation (optional, default is \n None). For more information, please refer to :ref:`api_guide_Name`.\n\n Inputs:\n - **inputs** (Tensor): the input sequence. If `time_major` is True, the shape is `[time_steps, batch_size, input_size]`, else, the shape is `[batch_size, time_steps, hidden_size]`.\n - **initial_states** (Tensor, optional): the initial state. The shape is `[num_layers * num_directions, batch_size, hidden_size]`. If initial_state is not given, zero initial states are used.\n - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None. If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whose time step index are not less than the valid length are treated as paddings.\n\n Returns:\n\n - **outputs** (Tensor): the output sequence. If `time_major` is True, the shape is `[time_steps, batch_size, num_directions * hidden_size]`, else, the shape is `[batch_size, time_steps, num_directions * hidden_size]`. Note that `num_directions` is 2 if direction is \"bidirectional\" else 1.\n \n - **final_states** (Tensor): final states. The shape is `[num_layers * num_directions, batch_size, hidden_size]`. Note that `num_directions` is 2 if direction is \"bidirectional\" (the index of forward states are 0, 2, 4, 6... and the index of backward states are 1, 3, 5, 7...), else 1.\n\n Variables:\n - **weight_ih_l[k]**: the learnable input-hidden weights of the k-th layer. If `k = 0`, the shape is `[hidden_size, input_size]`. Otherwise, the shape is `[hidden_size, num_directions * hidden_size]`.\n - **weight_hh_l[k]**: the learnable hidden-hidden weights of the k-th layer, with shape `[hidden_size, hidden_size]`.\n - **bias_ih_l[k]**: the learnable input-hidden bias of the k-th layer, with shape `[hidden_size]`.\n - **bias_hh_l[k]**: the learnable hidden-hidden bias of the k-th layer, with shape `[hidden_size]`.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n rnn = paddle.nn.SimpleRNN(16, 32, 2)\n\n x = paddle.randn((4, 23, 16))\n prev_h = paddle.randn((2, 4, 32))\n y, h = rnn(x, prev_h)\n\n print(y.shape)\n print(h.shape)\n\n #[4,23,32]\n #[2,4,32]\n\n \"\"\"\n\n def __init__(self,\n input_size,\n hidden_size,\n num_layers=1,\n direction=\"forward\",\n time_major=False,\n dropout=0.,\n activation=\"tanh\",\n weight_ih_attr=None,\n weight_hh_attr=None,\n bias_ih_attr=None,\n bias_hh_attr=None,\n name=None):\n if activation == \"tanh\":\n mode = \"RNN_TANH\"\n elif activation == \"relu\":\n mode = \"RNN_RELU\"\n else:\n raise ValueError(\"Unknown activation '{}'\".format(activation))\n self.activation = activation\n super(SimpleRNN, self).__init__(\n mode, input_size, hidden_size, num_layers, direction, time_major,\n dropout, weight_ih_attr, weight_hh_attr, bias_ih_attr, bias_hh_attr)\n\n\nclass LSTM(RNNBase):\n r\"\"\"\n Multilayer LSTM. It takes a sequence and an initial state as inputs, and \n returns the output sequences and the final states.\n\n Each layer inside the LSTM maps the input sequences and initial states \n to the output sequences and final states in the following manner: at each \n step, it takes step inputs(:math:`x_{t}`) and previous \n states(:math:`h_{t-1}, c_{t-1}`) as inputs, and returns step \n outputs(:math:`y_{t}`) and new states(:math:`h_{t}, c_{t}`).\n\n .. math::\n\n i_{t} & = \\sigma(W_{ii}x_{t} + b_{ii} + W_{hi}h_{t-1} + b_{hi})\n\n f_{t} & = \\sigma(W_{if}x_{t} + b_{if} + W_{hf}h_{t-1} + b_{hf})\n\n o_{t} & = \\sigma(W_{io}x_{t} + b_{io} + W_{ho}h_{t-1} + b_{ho})\n\n \\widetilde{c}_{t} & = \\tanh (W_{ig}x_{t} + b_{ig} + W_{hg}h_{t-1} + b_{hg})\n\n c_{t} & = f_{t} * c_{t-1} + i_{t} * \\widetilde{c}_{t}\n\n h_{t} & = o_{t} * \\tanh(c_{t})\n\n y_{t} & = h_{t}\n\n where :math:`\\sigma` is the sigmoid fucntion, and * is the elemetwise \n multiplication operator.\n\n Using key word arguments to construct is recommended.\n\n Parameters:\n input_size (int): The input size for the first layer's cell.\n hidden_size (int): The hidden size for each layer's cell.\n num_layers (int, optional): Number of layers. Defaults to 1.\n direction (str, optional): The direction of the network. It can be \"forward\"\n or \"bidirect\"(or \"bidirectional\"). When \"bidirect\", the way to merge\n outputs of forward and backward is concatenating. Defaults to \"forward\".\n time_major (bool, optional): Whether the first dimension of the input \n means the time steps. Defaults to False.\n dropout (float, optional): The droput probability. Dropout is applied \n to the input of each layer except for the first layer. Defaults to 0.\n weight_ih_attr (ParamAttr, optional): The parameter attribute for \n `weight_ih` of each cell. Default: None.\n weight_hh_attr (ParamAttr, optional): The parameter attribute for \n `weight_hh` of each cell. Default: None.\n bias_ih_attr (ParamAttr, optional): The parameter attribute for the \n `bias_ih` of each cells. Default: None.\n bias_hh_attr (ParamAttr, optional): The parameter attribute for the \n `bias_hh` of each cells. Default: None.\n name (str, optional): Name for the operation (optional, default is \n None). For more information, please refer to :ref:`api_guide_Name`.\n\n Inputs:\n - **inputs** (Tensor): the input sequence. If `time_major` is True, the shape is `[time_steps, batch_size, input_size]`, else, the shape is `[batch_size, time_steps, hidden_size]`.\n - **initial_states** (list|tuple, optional): the initial state, a list/tuple of (h, c), the shape of each is `[num_layers * num_directions, batch_size, hidden_size]`. If initial_state is not given, zero initial states are used.\n - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None. If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whos time step index are not less than the valid length are treated as paddings.\n\n Returns:\n\n - **outputs** (Tensor): the output sequence. If `time_major` is True, the shape is `[time_steps, batch_size, num_directions * hidden_size]`, If `time_major` is False, the shape is `[batch_size, time_steps, num_directions * hidden_size]`. Note that `num_directions` is 2 if direction is \"bidirectional\" else 1.\n \n - **final_states** (tuple): the final state, a tuple of two tensors, h and c. The shape of each is `[num_layers * num_directions, batch_size, hidden_size]`. Note that `num_directions` is 2 if direction is \"bidirectional\" (the index of forward states are 0, 2, 4, 6... and the index of backward states are 1, 3, 5, 7...), else 1.\n\n Variables:\n - **weight_ih_l[k]**: the learnable input-hidden weights of the k-th layer. If `k = 0`, the shape is `[hidden_size, input_size]`. Otherwise, the shape is `[hidden_size, num_directions * hidden_size]`.\n - **weight_hh_l[k]**: the learnable hidden-hidden weights of the k-th layer, with shape `[hidden_size, hidden_size]`.\n - **bias_ih_l[k]**: the learnable input-hidden bias of the k-th layer, with shape `[hidden_size]`.\n - **bias_hh_l[k]**: the learnable hidden-hidden bias of the k-th layer, swith shape `[hidden_size]`.\n\n Examples:\n \n .. code-block:: python\n\n import paddle\n\n rnn = paddle.nn.LSTM(16, 32, 2)\n\n x = paddle.randn((4, 23, 16))\n prev_h = paddle.randn((2, 4, 32))\n prev_c = paddle.randn((2, 4, 32))\n y, (h, c) = rnn(x, (prev_h, prev_c))\n\n print(y.shape)\n print(h.shape)\n print(c.shape)\n\n #[4,23,32]\n #[2,4,32]\n #[2,4,32]\n\n \"\"\"\n\n def __init__(self,\n input_size,\n hidden_size,\n num_layers=1,\n direction=\"forward\",\n time_major=False,\n dropout=0.,\n weight_ih_attr=None,\n weight_hh_attr=None,\n bias_ih_attr=None,\n bias_hh_attr=None,\n name=None):\n super(LSTM, self).__init__(\n \"LSTM\", input_size, hidden_size, num_layers, direction, time_major,\n dropout, weight_ih_attr, weight_hh_attr, bias_ih_attr, bias_hh_attr)\n\n\nclass GRU(RNNBase):\n r\"\"\"\n Multilayer GRU. It takes input sequencse and initial states as inputs, and \n returns the output sequences and the final states.\n\n Each layer inside the GRU maps the input sequences and initial states \n to the output sequences and final states in the following manner: at each \n step, it takes step inputs(:math:`x_{t}`) and previous \n states(:math:`h_{t-1}`) as inputs, and returns step outputs(:math:`y_{t}`) \n and new states(:math:`h_{t}`).\n\n .. math::\n\n r_{t} & = \\sigma(W_{ir}x_{t} + b_{ir} + W_{hr}h_{t-1} + b_{hr})\n\n z_{t} & = \\sigma(W_{iz}x_{t} + b_{iz} + W_{hz}h_{t-1} + b_{hz})\n\n \\widetilde{h}_{t} & = \\tanh(W_{ic}x_{t} + b_{ic} + r_{t} * (W_{hc}h_{t-1} + b_{hc}))\n\n h_{t} & = z_{t} * h_{t-1} + (1 - z_{t}) * \\widetilde{h}_{t}\n\n y_{t} & = h_{t}\n\n where :math:`\\sigma` is the sigmoid fucntion, and * is the elemetwise \n multiplication operator.\n\n Using key word arguments to construct is recommended.\n\n Parameters:\n input_size (int): The input size for the first layer's cell.\n hidden_size (int): The hidden size for each layer's cell.\n num_layers (int, optional): Number of layers. Defaults to 1.\n direction (str, optional): The direction of the network. It can be \"forward\"\n or \"bidirect\"(or \"bidirectional\"). When \"bidirect\", the way to merge\n outputs of forward and backward is concatenating. Defaults to \"forward\".\n time_major (bool, optional): Whether the first dimension of the input \n means the time steps. Defaults to False.\n dropout (float, optional): The droput probability. Dropout is applied \n to the input of each layer except for the first layer. Defaults to 0.\n weight_ih_attr (ParamAttr, optional): The parameter attribute for \n `weight_ih` of each cell. Default: None.\n weight_hh_attr (ParamAttr, optional): The parameter attribute for \n `weight_hh` of each cell. Default: None.\n bias_ih_attr (ParamAttr, optional): The parameter attribute for the \n `bias_ih` of each cells. Default: None.\n bias_hh_attr (ParamAttr, optional): The parameter attribute for the \n `bias_hh` of each cells. Default: None.\n name (str, optional): Name for the operation (optional, default is \n None). For more information, please refer to :ref:`api_guide_Name`.\n\n Inputs:\n - **inputs** (Tensor): the input sequence. If `time_major` is True, the shape is `[time_steps, batch_size, input_size]`, else, the shape is `[batch_size, time_steps, hidden_size]`.\n - **initial_states** (Tensor, optional): the initial state. The shape is `[num_layers * num_directions, batch_size, hidden_size]`. If initial_state is not given, zero initial states are used. Defaults to None.\n - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None. If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whos time step index are not less than the valid length are treated as paddings.\n\n Returns:\n\n - **outputs** (Tensor): the output sequence. If `time_major` is True, the shape is `[time_steps, batch_size, num_directions * hidden_size]`, else, the shape is `[batch_size, time_steps, num_directions * hidden_size]`. Note that `num_directions` is 2 if direction is \"bidirectional\" else 1.\n \n - **final_states** (Tensor): final states. The shape is `[num_layers * num_directions, batch_size, hidden_size]`. Note that `num_directions` is 2 if direction is \"bidirectional\" (the index of forward states are 0, 2, 4, 6... and the index of backward states are 1, 3, 5, 7...), else 1.\n\n Variables:\n - **weight_ih_l[k]**: the learnable input-hidden weights of the k-th layer. If `k = 0`, the shape is `[hidden_size, input_size]`. Otherwise, the shape is `[hidden_size, num_directions * hidden_size]`.\n - **weight_hh_l[k]**: the learnable hidden-hidden weights of the k-th layer, with shape `[hidden_size, hidden_size]`.\n - **bias_ih_l[k]**: the learnable input-hidden bias of the k-th layer, with shape `[hidden_size]`.\n - **bias_hh_l[k]**: the learnable hidden-hidden bias of the k-th layer, with shape `[hidden_size]`.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n rnn = paddle.nn.GRU(16, 32, 2)\n\n x = paddle.randn((4, 23, 16))\n prev_h = paddle.randn((2, 4, 32))\n y, h = rnn(x, prev_h)\n\n print(y.shape)\n print(h.shape)\n\n #[4,23,32]\n #[2,4,32]\n\n \"\"\"\n\n def __init__(self,\n input_size,\n hidden_size,\n num_layers=1,\n direction=\"forward\",\n time_major=False,\n dropout=0.,\n weight_ih_attr=None,\n weight_hh_attr=None,\n bias_ih_attr=None,\n bias_hh_attr=None,\n name=None):\n super(GRU, self).__init__(\n \"GRU\", input_size, hidden_size, num_layers, direction, time_major,\n dropout, weight_ih_attr, weight_hh_attr, bias_ih_attr, bias_hh_attr)\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid import core\nfrom paddle.fluid import Program, program_guard\nfrom paddle.fluid.framework import _test_eager_guard\n\n\nclass TestDiagV2Op(OpTest):\n def setUp(self):\n self.op_type = \"diag_v2\"\n self.x = np.random.rand(10, 10)\n self.offset = 0\n self.padding_value = 0.0\n self.out = np.diag(self.x, self.offset)\n\n self.init_config()\n self.inputs = {'X': self.x}\n self.attrs = {\n 'offset': self.offset,\n 'padding_value': self.padding_value\n }\n self.outputs = {'Out': self.out}\n\n def test_check_output(self):\n paddle.enable_static()\n self.check_output(check_eager=True)\n\n def init_config(self):\n pass\n\n\nclass TestDiagV2OpCase1(TestDiagV2Op):\n def init_config(self):\n self.offset = 1\n self.out = np.diag(self.x, self.offset)\n\n\nclass TestDiagV2OpCase2(TestDiagV2Op):\n def init_config(self):\n self.offset = -1\n self.out = np.diag(self.x, self.offset)\n\n\nclass TestDiagV2OpCase3(TestDiagV2Op):\n def init_config(self):\n self.x = np.random.randint(-10, 10, size=(10, 10))\n self.out = np.diag(self.x, self.offset)\n\n\nclass TestDiagV2OpCase4(TestDiagV2Op):\n def init_config(self):\n self.x = np.random.rand(100)\n self.padding_value = 8\n n = self.x.size\n self.out = self.padding_value * np.ones((n, n)) + np.diag(\n self.x, self.offset) - np.diag(self.padding_value * np.ones(n))\n\n\nclass TestDiagV2Error(unittest.TestCase):\n def test_errors(self):\n paddle.enable_static()\n with program_guard(Program(), Program()):\n\n def test_diag_v2_type():\n x = [1, 2, 3]\n output = paddle.diag(x)\n\n self.assertRaises(TypeError, test_diag_v2_type)\n\n x = paddle.static.data('data', [3, 3])\n self.assertRaises(TypeError, paddle.diag, x, offset=2.5)\n\n self.assertRaises(TypeError, paddle.diag, x, padding_value=[9])\n\n x = paddle.static.data('data2', [3, 3, 3])\n self.assertRaises(ValueError, paddle.diag, x)\n\n\nclass TestDiagV2API(unittest.TestCase):\n def setUp(self):\n self.input_np = np.random.random(size=(10, 10)).astype(np.float32)\n self.expected0 = np.diag(self.input_np)\n self.expected1 = np.diag(self.input_np, k=1)\n self.expected2 = np.diag(self.input_np, k=-1)\n\n self.input_np2 = np.random.rand(100)\n self.offset = 0\n self.padding_value = 8\n n = self.input_np2.size\n self.expected3 = self.padding_value * np.ones(\n (n, n)) + np.diag(self.input_np2, self.offset) - np.diag(\n self.padding_value * np.ones(n))\n\n self.input_np3 = np.random.randint(-10, 10, size=(100)).astype(np.int64)\n self.padding_value = 8.0\n n = self.input_np3.size\n self.expected4 = self.padding_value * np.ones(\n (n, n)) + np.diag(self.input_np3, self.offset) - np.diag(\n self.padding_value * np.ones(n))\n\n self.padding_value = -8\n self.expected5 = self.padding_value * np.ones(\n (n, n)) + np.diag(self.input_np3, self.offset) - np.diag(\n self.padding_value * np.ones(n))\n\n self.input_np4 = np.random.random(size=(2000, 2000)).astype(np.float32)\n self.expected6 = np.diag(self.input_np4)\n self.expected7 = np.diag(self.input_np4, k=1)\n self.expected8 = np.diag(self.input_np4, k=-1)\n\n self.input_np5 = np.random.random(size=(2000)).astype(np.float32)\n self.expected9 = np.diag(self.input_np5)\n self.expected10 = np.diag(self.input_np5, k=1)\n self.expected11 = np.diag(self.input_np5, k=-1)\n\n self.input_np6 = np.random.random(size=(2000, 1500)).astype(np.float32)\n self.expected12 = np.diag(self.input_np6, k=-1)\n\n def run_imperative(self):\n x = paddle.to_tensor(self.input_np)\n y = paddle.diag(x)\n self.assertTrue(np.allclose(y.numpy(), self.expected0))\n\n y = paddle.diag(x, offset=1)\n self.assertTrue(np.allclose(y.numpy(), self.expected1))\n\n y = paddle.diag(x, offset=-1)\n self.assertTrue(np.allclose(y.numpy(), self.expected2))\n\n x = paddle.to_tensor(self.input_np2)\n y = paddle.diag(x, padding_value=8)\n self.assertTrue(np.allclose(y.numpy(), self.expected3))\n\n x = paddle.to_tensor(self.input_np3)\n y = paddle.diag(x, padding_value=8.0)\n self.assertTrue(np.allclose(y.numpy(), self.expected4))\n\n y = paddle.diag(x, padding_value=-8)\n self.assertTrue(np.allclose(y.numpy(), self.expected5))\n\n x = paddle.to_tensor(self.input_np4)\n y = paddle.diag(x)\n self.assertTrue(np.allclose(y.numpy(), self.expected6))\n\n y = paddle.diag(x, offset=1)\n self.assertTrue(np.allclose(y.numpy(), self.expected7))\n\n y = paddle.diag(x, offset=-1)\n self.assertTrue(np.allclose(y.numpy(), self.expected8))\n\n x = paddle.to_tensor(self.input_np5)\n y = paddle.diag(x)\n self.assertTrue(np.allclose(y.numpy(), self.expected9))\n\n y = paddle.diag(x, offset=1)\n self.assertTrue(np.allclose(y.numpy(), self.expected10))\n\n y = paddle.diag(x, offset=-1)\n self.assertTrue(np.allclose(y.numpy(), self.expected11))\n\n x = paddle.to_tensor(self.input_np6)\n y = paddle.diag(x, offset=-1)\n self.assertTrue(np.allclose(y.numpy(), self.expected12))\n\n def run_static(self, use_gpu=False):\n x = paddle.static.data(name='input', shape=[10, 10], dtype='float32')\n x2 = paddle.static.data(name='input2', shape=[100], dtype='float64')\n x3 = paddle.static.data(name='input3', shape=[100], dtype='int64')\n x4 = paddle.static.data(\n name='input4', shape=[2000, 2000], dtype='float32')\n x5 = paddle.static.data(name='input5', shape=[2000], dtype='float32')\n x6 = paddle.static.data(\n name='input6', shape=[2000, 1500], dtype='float32')\n result0 = paddle.diag(x)\n result1 = paddle.diag(x, offset=1)\n result2 = paddle.diag(x, offset=-1)\n result3 = paddle.diag(x, name='aaa')\n result4 = paddle.diag(x2, padding_value=8)\n result5 = paddle.diag(x3, padding_value=8.0)\n result6 = paddle.diag(x3, padding_value=-8)\n result7 = paddle.diag(x4)\n result8 = paddle.diag(x4, offset=1)\n result9 = paddle.diag(x4, offset=-1)\n result10 = paddle.diag(x5)\n result11 = paddle.diag(x5, offset=1)\n result12 = paddle.diag(x5, offset=-1)\n result13 = paddle.diag(x6, offset=-1)\n\n place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n res0, res1, res2, res4, res5, res6, res7, res8, res9, res10, res11, res12, res13 = exe.run(\n feed={\n \"input\": self.input_np,\n \"input2\": self.input_np2,\n 'input3': self.input_np3,\n 'input4': self.input_np4,\n 'input5': self.input_np5,\n 'input6': self.input_np6\n },\n fetch_list=[\n result0, result1, result2, result4, result5, result6, result7,\n result8, result9, result10, result11, result12, result13\n ])\n\n self.assertTrue(np.allclose(res0, self.expected0))\n self.assertTrue(np.allclose(res1, self.expected1))\n self.assertTrue(np.allclose(res2, self.expected2))\n self.assertTrue('aaa' in result3.name)\n self.assertTrue(np.allclose(res4, self.expected3))\n self.assertTrue(np.allclose(res5, self.expected4))\n self.assertTrue(np.allclose(res6, self.expected5))\n self.assertTrue(np.allclose(res7, self.expected6))\n self.assertTrue(np.allclose(res8, self.expected7))\n self.assertTrue(np.allclose(res9, self.expected8))\n self.assertTrue(np.allclose(res10, self.expected9))\n self.assertTrue(np.allclose(res11, self.expected10))\n self.assertTrue(np.allclose(res12, self.expected11))\n self.assertTrue(np.allclose(res13, self.expected12))\n\n def test_cpu(self):\n paddle.disable_static(place=paddle.fluid.CPUPlace())\n self.run_imperative()\n with _test_eager_guard():\n self.run_imperative()\n\n paddle.enable_static()\n\n with fluid.program_guard(fluid.Program()):\n self.run_static()\n\n def test_gpu(self):\n if not fluid.core.is_compiled_with_cuda():\n return\n\n paddle.disable_static(place=paddle.fluid.CUDAPlace(0))\n self.run_imperative()\n with _test_eager_guard():\n self.run_imperative()\n paddle.enable_static()\n\n with fluid.program_guard(fluid.Program()):\n self.run_static(use_gpu=True)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport math\nimport numpy as np\nimport sys\nimport unittest\nsys.path.append(\"..\")\n\nimport paddle\n\nfrom op_test_xpu import XPUOpTest\nfrom xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper\n\npaddle.enable_static()\n\n\nclass XPUTestPriorBoxOp(XPUOpTestWrapper):\n def __init__(self):\n self.op_name = 'prior_box'\n self.use_dynamic_create_class = False\n\n class TestPriorBoxOp(XPUOpTest):\n def setUp(self):\n self.op_type = \"prior_box\"\n self.use_xpu = True\n self.dtype = self.in_type\n self.set_data()\n\n def set_data(self):\n self.init_test_params()\n self.init_test_input()\n self.init_test_output()\n self.inputs = {'Input': self.input, 'Image': self.image}\n\n self.attrs = {\n 'min_sizes': self.min_sizes,\n 'aspect_ratios': self.aspect_ratios,\n 'variances': self.variances,\n 'flip': self.flip,\n 'clip': self.clip,\n 'min_max_aspect_ratios_order': self.min_max_aspect_ratios_order,\n 'step_w': self.step_w,\n 'step_h': self.step_h,\n 'offset': self.offset\n }\n if len(self.max_sizes) > 0:\n self.attrs['max_sizes'] = self.max_sizes\n\n self.outputs = {'Boxes': self.out_boxes, 'Variances': self.out_var}\n\n def test_check_output(self):\n place = paddle.XPUPlace(0)\n self.check_output_with_place(place)\n\n def set_max_sizes(self):\n max_sizes = [5, 10]\n self.max_sizes = np.array(max_sizes).astype('float32').tolist()\n\n def set_min_max_aspect_ratios_order(self):\n self.min_max_aspect_ratios_order = False\n\n def init_test_params(self):\n self.layer_w = 32\n self.layer_h = 32\n\n self.image_w = 40\n self.image_h = 40\n\n self.step_w = float(self.image_w) / float(self.layer_w)\n self.step_h = float(self.image_h) / float(self.layer_h)\n\n self.input_channels = 2\n self.image_channels = 3\n self.batch_size = 10\n\n self.min_sizes = [2, 4]\n self.min_sizes = np.array(self.min_sizes).astype('float32').tolist()\n self.set_max_sizes()\n self.aspect_ratios = [2.0, 3.0]\n self.flip = True\n self.set_min_max_aspect_ratios_order()\n self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0]\n self.aspect_ratios = np.array(\n self.aspect_ratios, dtype=np.float).flatten()\n self.variances = [0.1, 0.1, 0.2, 0.2]\n self.variances = np.array(self.variances, dtype=np.float).flatten()\n\n self.clip = True\n self.num_priors = len(self.real_aspect_ratios) * len(self.min_sizes)\n if len(self.max_sizes) > 0:\n self.num_priors += len(self.max_sizes)\n self.offset = 0.5\n\n def init_test_input(self):\n self.image = np.random.random(\n (self.batch_size, self.image_channels, self.image_w,\n self.image_h)).astype(self.dtype)\n\n self.input = np.random.random(\n (self.batch_size, self.input_channels, self.layer_w,\n self.layer_h)).astype(self.dtype)\n\n def init_test_output(self):\n out_dim = (self.layer_h, self.layer_w, self.num_priors, 4)\n out_boxes = np.zeros(out_dim).astype(self.dtype)\n out_var = np.zeros(out_dim).astype(self.dtype)\n\n idx = 0\n for h in range(self.layer_h):\n for w in range(self.layer_w):\n c_x = (w + self.offset) * self.step_w\n c_y = (h + self.offset) * self.step_h\n idx = 0\n for s in range(len(self.min_sizes)):\n min_size = self.min_sizes[s]\n if not self.min_max_aspect_ratios_order:\n # rest of priors\n for r in range(len(self.real_aspect_ratios)):\n ar = self.real_aspect_ratios[r]\n c_w = min_size * math.sqrt(ar) / 2\n c_h = (min_size / math.sqrt(ar)) / 2\n out_boxes[h, w, idx, :] = [\n (c_x - c_w) / self.image_w, (c_y - c_h) /\n self.image_h, (c_x + c_w) / self.image_w,\n (c_y + c_h) / self.image_h\n ]\n idx += 1\n\n if len(self.max_sizes) > 0:\n max_size = self.max_sizes[s]\n # second prior: aspect_ratio = 1,\n c_w = c_h = math.sqrt(min_size * max_size) / 2\n out_boxes[h, w, idx, :] = [\n (c_x - c_w) / self.image_w, (c_y - c_h) /\n self.image_h, (c_x + c_w) / self.image_w,\n (c_y + c_h) / self.image_h\n ]\n idx += 1\n else:\n c_w = c_h = min_size / 2.\n out_boxes[h, w, idx, :] = [\n (c_x - c_w) / self.image_w, (c_y - c_h) /\n self.image_h, (c_x + c_w) / self.image_w,\n (c_y + c_h) / self.image_h\n ]\n idx += 1\n if len(self.max_sizes) > 0:\n max_size = self.max_sizes[s]\n # second prior: aspect_ratio = 1,\n c_w = c_h = math.sqrt(min_size * max_size) / 2\n out_boxes[h, w, idx, :] = [\n (c_x - c_w) / self.image_w, (c_y - c_h) /\n self.image_h, (c_x + c_w) / self.image_w,\n (c_y + c_h) / self.image_h\n ]\n idx += 1\n\n # rest of priors\n for r in range(len(self.real_aspect_ratios)):\n ar = self.real_aspect_ratios[r]\n if abs(ar - 1.) < 1e-6:\n continue\n c_w = min_size * math.sqrt(ar) / 2\n c_h = (min_size / math.sqrt(ar)) / 2\n out_boxes[h, w, idx, :] = [\n (c_x - c_w) / self.image_w, (c_y - c_h) /\n self.image_h, (c_x + c_w) / self.image_w,\n (c_y + c_h) / self.image_h\n ]\n idx += 1\n\n # clip the prior's coordidate such that it is within[0, 1]\n if self.clip:\n out_boxes = np.clip(out_boxes, 0.0, 1.0)\n # set the variance.\n out_var = np.tile(self.variances, (self.layer_h, self.layer_w,\n self.num_priors, 1))\n self.out_boxes = out_boxes.astype(self.dtype)\n self.out_var = out_var.astype(self.dtype)\n\n class TestPriorBoxOpWithoutMaxSize(TestPriorBoxOp):\n def set_max_sizes(self):\n self.max_sizes = []\n\n class TestPriorBoxOpWithSpecifiedOutOrder(TestPriorBoxOp):\n def set_min_max_aspect_ratios_order(self):\n self.min_max_aspect_ratios_order = True\n\n\nsupport_types = get_xpu_op_support_types('prior_box')\nfor stype in support_types:\n create_test_class(globals(), XPUTestPriorBoxOp, stype)\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\nimport paddle\nimport paddle.fluid.core as core\nimport paddle.fluid as fluid\nfrom paddle.fluid import Program, program_guard\nfrom paddle.fluid.framework import _test_eager_guard\n\npaddle.enable_static()\n\n\nclass TestTruncOp(OpTest):\n def setUp(self):\n self.op_type = \"trunc\"\n self.dtype = np.float64\n np.random.seed(2021)\n self.inputs = {'X': np.random.random((20, 20)).astype(self.dtype)}\n self.outputs = {'Out': (np.trunc(self.inputs['X']))}\n\n def init_dtype_type(self):\n self.dtype = np.float64\n\n def test_check_output(self):\n self.check_output(check_eager=True)\n\n def test_check_grad(self):\n self.check_grad(['X'], 'Out', numeric_grad_delta=1e-5, check_eager=True)\n\n\nclass TestFloatTruncOp(TestTruncOp):\n def init_dtype_type(self):\n self.dtype = np.float32\n\n\nclass TestIntTruncOp(TestTruncOp):\n def init_dtype_type(self):\n self.dtype = np.int32\n\n\nclass TestTruncAPI(unittest.TestCase):\n def setUp(self):\n self.shape = [20, 20]\n self.x = np.random.random((20, 20)).astype(np.float32)\n self.place = paddle.CPUPlace()\n\n def test_api_static(self):\n paddle.enable_static()\n with paddle.static.program_guard(paddle.static.Program()):\n x = paddle.fluid.data('X', self.shape)\n out = paddle.trunc(x)\n exe = paddle.static.Executor(self.place)\n res = exe.run(feed={'X': self.x}, fetch_list=[out])\n out_ref = np.trunc(self.x)\n for out in res:\n self.assertEqual(np.allclose(out, out_ref, rtol=1e-08), True)\n\n def test_api_dygraph(self):\n paddle.disable_static(self.place)\n x_tensor = paddle.to_tensor(self.x)\n out = paddle.trunc(x_tensor)\n out_ref = np.trunc(self.x)\n self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-08), True)\n paddle.enable_static()\n\n def test_api_eager_dygraph(self):\n with _test_eager_guard():\n self.test_api_dygraph()\n\n def test_errors(self):\n with paddle.static.program_guard(paddle.static.Program()):\n x = paddle.fluid.data('X', [20, 20], 'bool')\n self.assertRaises(TypeError, paddle.trunc, x)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.sum", "numpy.prod" ], [ "numpy.diag", "numpy.random.random", "numpy.allclose", "numpy.ones", "numpy.random.rand", "numpy.random.randint" ], [ "numpy.random.random", "numpy.clip", "numpy.tile", "numpy.array", "numpy.zeros" ], [ "numpy.trunc", "numpy.random.random", "numpy.allclose", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kafan1986/Peppa-Facial-Landmark-PyTorch
[ "238063317fd31c4c21c5c43692e6a5d769970370" ]
[ "utils/augmentation.py" ]
[ "import os\nimport numpy as np\nimport cv2\nimport random\nimport math\n\n\ndef Rotate_aug(src, angle, label=None, center=None, scale=1.0):\n image = src\n (h, w) = image.shape[:2]\n if center is None:\n center = (w / 2, h / 2)\n M = cv2.getRotationMatrix2D(center, angle, scale)\n if label is None:\n for i in range(image.shape[2]):\n image[:, :, i] = cv2.warpAffine(image[:, :, i], M, (w, h), flags=cv2.INTER_CUBIC,\n borderMode=cv2.BORDER_CONSTANT, borderValue=[127.,127.,127.])\n return image, None\n else:\n label = label.T\n full_M = np.row_stack((M, np.asarray([0, 0, 1])))\n img_rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT,\n borderValue=[127.,127.,127.])\n full_label = np.row_stack((label, np.ones(shape=(1, label.shape[1]))))\n label_rotated = np.dot(full_M, full_label)\n label_rotated = label_rotated[0:2, :]\n label_rotated = label_rotated.T\n return img_rotated, label_rotated\n\n\ndef Rotate_coordinate(label, rt_matrix):\n if rt_matrix.shape[0] == 2:\n rt_matrix = np.row_stack((rt_matrix, np.asarray([0, 0, 1])))\n full_label = np.row_stack((label, np.ones(shape=(1, label.shape[1]))))\n label_rotated = np.dot(rt_matrix, full_label)\n label_rotated = label_rotated[0:2, :]\n return label_rotated\n\n\ndef box_to_point(boxes):\n points_set = np.zeros(shape=[4 * boxes.shape[0], 2])\n for i in range(boxes.shape[0]):\n points_set[4 * i] = np.array([boxes[i][0], boxes[i][1]])\n points_set[4 * i + 1] = np.array([boxes[i][0], boxes[i][3]])\n points_set[4 * i + 2] = np.array([boxes[i][2], boxes[i][3]])\n points_set[4 * i + 3] = np.array([boxes[i][2], boxes[i][1]])\n return points_set\n\n\ndef point_to_box(points):\n boxes = []\n points = points.reshape([-1, 4, 2])\n for i in range(points.shape[0]):\n box = [np.min(points[i][:, 0]), np.min(points[i][:, 1]), np.max(points[i][:, 0]), np.max(points[i][:, 1])]\n boxes.append(box)\n return np.array(boxes)\n\n\ndef Rotate_with_box(src, angle, boxes=None, center=None, scale=1.0):\n label = box_to_point(boxes)\n image = src\n (h, w) = image.shape[:2]\n if center is None:\n center = (w / 2, h / 2)\n M = cv2.getRotationMatrix2D(center, angle, scale)\n new_size = Rotate_coordinate(np.array([[0, w, w, 0], [0, 0, h, h]]), M)\n new_h, new_w = np.max(new_size[1]) - np.min(new_size[1]), np.max(new_size[0]) - np.min(new_size[0])\n scale = min(h / new_h, w / new_w)\n M = cv2.getRotationMatrix2D(center, angle, scale)\n if boxes is None:\n for i in range(image.shape[2]):\n image[:, :, i] = cv2.warpAffine(image[:, :, i], M, (w, h), flags=cv2.INTER_CUBIC,\n borderMode=cv2.BORDER_CONSTANT)\n return image, None\n else:\n label = label.T\n full_M = np.row_stack((M, np.asarray([0, 0, 1])))\n img_rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)\n full_label = np.row_stack((label, np.ones(shape=(1, label.shape[1]))))\n label_rotated = np.dot(full_M, full_label)\n label_rotated = label_rotated[0:2, :]\n label_rotated = label_rotated.T\n boxes_rotated = point_to_box(label_rotated)\n return img_rotated, boxes_rotated\n\n\ndef Perspective_aug(src, strength, label=None):\n image = src\n pts_base = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])\n pts1 = np.random.rand(4, 2) * random.uniform(-strength, strength) + pts_base\n pts1 = pts1.astype(np.float32)\n M = cv2.getPerspectiveTransform(pts1, pts_base)\n trans_img = cv2.warpPerspective(image, M, (src.shape[1], src.shape[0]))\n label_rotated = None\n if label is not None:\n label = label.T\n full_label = np.row_stack((label, np.ones(shape=(1, label.shape[1]))))\n label_rotated = np.dot(M, full_label)\n label_rotated = label_rotated.astype(np.int32)\n label_rotated = label_rotated.T\n return trans_img, label_rotated\n\n\ndef Affine_aug(src, strength, label=None):\n image = src\n pts_base = np.float32([[10, 100], [200, 50], [100, 250]])\n pts1 = np.random.rand(3, 2) * random.uniform(-strength, strength) + pts_base\n pts1 = pts1.astype(np.float32)\n M = cv2.getAffineTransform(pts1, pts_base)\n trans_img = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]), borderMode=cv2.BORDER_CONSTANT,\n borderValue=[127.,127.,127.])\n label_rotated = None\n if label is not None:\n label = label.T\n full_label = np.row_stack((label, np.ones(shape=(1, label.shape[1]))))\n label_rotated = np.dot(M, full_label)\n label_rotated = label_rotated.T\n return trans_img, label_rotated\n\n\ndef Padding_aug(src, max_pattern_ratio=0.05):\n src = src.astype(np.float32)\n pattern = np.ones_like(src)\n ratio = random.uniform(0, max_pattern_ratio)\n height, width, _ = src.shape\n if random.uniform(0, 1) > 0.5:\n if random.uniform(0, 1) > 0.5:\n pattern[0:int(ratio * height), :, :] = 0\n else:\n pattern[height - int(ratio * height):, :, :] = 0\n else:\n if random.uniform(0, 1) > 0.5:\n pattern[:, 0:int(ratio * width), :] = 0\n else:\n pattern[:, width - int(ratio * width):, :] = 0\n bias_pattern = (1 - pattern) * [127.,127.,127.]\n img = src * pattern + bias_pattern\n img = img.astype(np.uint8)\n return img\n\n\ndef Blur_heatmaps(src, ksize=(3, 3)):\n for i in range(src.shape[2]):\n src[:, :, i] = cv2.GaussianBlur(src[:, :, i], ksize, 0)\n amin, amax = src[:, :, i].min(), src[:, :, i].max() # 求最大最小值\n if amax > 0:\n src[:, :, i] = (src[:, :, i] - amin) / (amax - amin) # (矩阵元素-最小值)/(最大值-最小值)\n return src\n\n\ndef Blur_aug(src, ksize=(3, 3)):\n for i in range(src.shape[2]):\n src[:, :, i] = cv2.GaussianBlur(src[:, :, i], ksize, 1.5)\n return src\n\n\ndef Img_dropout(src, max_pattern_ratio=0.05):\n width_ratio = random.uniform(0, max_pattern_ratio)\n height_ratio = random.uniform(0, max_pattern_ratio)\n width = src.shape[1]\n height = src.shape[0]\n block_width = width * width_ratio\n block_height = height * height_ratio\n width_start = int(random.uniform(0, width - block_width))\n width_end = int(width_start + block_width)\n height_start = int(random.uniform(0, height - block_height))\n height_end = int(height_start + block_height)\n src[height_start:height_end, width_start:width_end, :] = np.array([127.,127.,127.], dtype=src.dtype)\n return src\n\n\ndef Fill_img(img_raw, target_height, target_width, label=None):\n channel = img_raw.shape[2]\n raw_height = img_raw.shape[0]\n raw_width = img_raw.shape[1]\n if raw_width / raw_height >= target_width / target_height:\n shape_need = [int(target_height / target_width * raw_width), raw_width, channel]\n img_fill = np.zeros(shape_need, dtype=img_raw.dtype) + np.array([127.,127.,127.], dtype=img_raw.dtype)\n shift_x = (img_fill.shape[1] - raw_width) // 2\n shift_y = (img_fill.shape[0] - raw_height) // 2\n for i in range(channel):\n img_fill[shift_y:raw_height + shift_y, shift_x:raw_width + shift_x, i] = img_raw[:, :, i]\n else:\n shape_need = [raw_height, int(target_width / target_height * raw_height), channel]\n img_fill = np.zeros(shape_need, dtype=img_raw.dtype) + np.array([127.,127.,127.], dtype=img_raw.dtype)\n shift_x = (img_fill.shape[1] - raw_width) // 2\n shift_y = (img_fill.shape[0] - raw_height) // 2\n for i in range(channel):\n img_fill[shift_y:raw_height + shift_y, shift_x:raw_width + shift_x, i] = img_raw[:, :, i]\n if label is None:\n return img_fill, shift_x, shift_y\n else:\n label[:, 0] += shift_x\n label[:, 1] += shift_y\n return img_fill, label\n\n\ndef Random_crop(src, shrink):\n h, w, _ = src.shape\n h_shrink = int(h * shrink)\n w_shrink = int(w * shrink)\n bimg = cv2.copyMakeBorder(src, h_shrink, h_shrink, w_shrink, w_shrink, borderType=cv2.BORDER_CONSTANT,\n value=(0, 0, 0))\n start_h = random.randint(0, 2 * h_shrink)\n start_w = random.randint(0, 2 * w_shrink)\n target_img = bimg[start_h:start_h + h, start_w:start_w + w, :]\n return target_img\n\n\ndef box_in_img(img, boxes, min_overlap=0.5):\n raw_bboxes = np.array(boxes)\n face_area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])\n h, w, _ = img.shape\n boxes[:, 0][boxes[:, 0] <= 0] = 0\n boxes[:, 0][boxes[:, 0] >= w] = w\n boxes[:, 2][boxes[:, 2] <= 0] = 0\n boxes[:, 2][boxes[:, 2] >= w] = w\n boxes[:, 1][boxes[:, 1] <= 0] = 0\n boxes[:, 1][boxes[:, 1] >= h] = h\n boxes[:, 3][boxes[:, 3] <= 0] = 0\n boxes[:, 3][boxes[:, 3] >= h] = h\n boxes_in = []\n for i in range(boxes.shape[0]):\n box = boxes[i]\n if ((box[3] - box[1]) * (box[2] - box[0])) / face_area[i] > min_overlap:\n boxes_in.append(boxes[i])\n boxes_in = np.array(boxes_in)\n return boxes_in\n\n\ndef Random_scale_withbbox(image, bboxes, target_shape, jitter=0.5):\n hi, wi, _ = image.shape\n while 1:\n if len(bboxes) == 0:\n print('errrrrrr')\n bboxes_ = np.array(bboxes)\n crop_h = int(hi * random.uniform(0.2, 1))\n crop_w = int(wi * random.uniform(0.2, 1))\n start_h = random.randint(0, hi - crop_h)\n start_w = random.randint(0, wi - crop_w)\n croped = image[start_h:start_h + crop_h, start_w:start_w + crop_w, :]\n bboxes_[:, 0] = bboxes_[:, 0] - start_w\n bboxes_[:, 1] = bboxes_[:, 1] - start_h\n bboxes_[:, 2] = bboxes_[:, 2] - start_w\n bboxes_[:, 3] = bboxes_[:, 3] - start_h\n bboxes_fix = box_in_img(croped, bboxes_)\n if len(bboxes_fix) > 0:\n break\n h, w = target_shape\n croped_h, croped_w, _ = croped.shape\n croped_h_w_ratio = croped_h / croped_w\n rescale_h = int(h * random.uniform(0.5, 1))\n rescale_w = int(rescale_h / (random.uniform(0.7, 1.3) * croped_h_w_ratio))\n rescale_w = np.clip(rescale_w, 0, w)\n image = cv2.resize(croped, (rescale_w, rescale_h))\n new_image = np.zeros(shape=[h, w, 3], dtype=np.uint8)\n dx = int(random.randint(0, w - rescale_w))\n dy = int(random.randint(0, h - rescale_h))\n new_image[dy:dy + rescale_h, dx:dx + rescale_w, :] = image\n bboxes_fix[:, 0] = bboxes_fix[:, 0] * rescale_w / croped_w + dx\n bboxes_fix[:, 1] = bboxes_fix[:, 1] * rescale_h / croped_h + dy\n bboxes_fix[:, 2] = bboxes_fix[:, 2] * rescale_w / croped_w + dx\n bboxes_fix[:, 3] = bboxes_fix[:, 3] * rescale_h / croped_h + dy\n return new_image, bboxes_fix\n\n\ndef Random_flip(im, boxes):\n im_lr = np.fliplr(im).copy()\n h, w, _ = im.shape\n xmin = w - boxes[:, 2]\n xmax = w - boxes[:, 0]\n boxes[:, 0] = xmin\n boxes[:, 2] = xmax\n return im_lr, boxes\n\n\ndef Mirror(src, label=None, symmetry=None):\n img = cv2.flip(src, 1)\n if label is None:\n return img, label\n\n width = img.shape[1]\n cod = []\n allc = []\n for i in range(label.shape[0]):\n x, y = label[i][0], label[i][1]\n if x >= 0:\n x = width - 1 - x\n cod.append((x, y))\n for (q, w) in symmetry:\n cod[q], cod[w] = cod[w], cod[q]\n for i in range(label.shape[0]):\n allc.append(cod[i][0])\n allc.append(cod[i][1])\n label = np.array(allc).reshape(label.shape[0], 2)\n return img, label\n\n\ndef produce_heat_maps(label, map_size, stride, sigma):\n def produce_heat_map(center, map_size, stride, sigma):\n grid_y = map_size[0] // stride\n grid_x = map_size[1] // stride\n start = stride / 2.0 - 0.5\n y_range = [i for i in range(grid_y)]\n x_range = [i for i in range(grid_x)]\n xx, yy = np.meshgrid(x_range, y_range)\n xx = xx * stride + start\n yy = yy * stride + start\n d2 = (xx - center[0]) ** 2 + (yy - center[1]) ** 2\n exponent = d2 / 2.0 / sigma / sigma\n heatmap = np.exp(-exponent)\n am = np.amax(heatmap)\n if am > 0:\n heatmap /= am / 255.\n return heatmap\n\n all_keypoints = label\n point_num = all_keypoints.shape[0]\n heatmaps_this_img = np.zeros([map_size[0] // stride, map_size[1] // stride, point_num])\n for k in range(point_num):\n heatmap = produce_heat_map([all_keypoints[k][0], all_keypoints[k][1]], map_size, stride, sigma)\n heatmaps_this_img[:, :, k] = heatmap\n return heatmaps_this_img\n\n\ndef visualize_heatmap_target(heatmap):\n map_size = heatmap.shape[0:2]\n frame_num = heatmap.shape[2]\n heat_ = np.zeros([map_size[0], map_size[1]])\n for i in range(frame_num):\n heat_ = heat_ + heatmap[:, :, i]\n cv2.namedWindow('heat_map', 0)\n cv2.imshow('heat_map', heat_)\n cv2.waitKey(0)\n\n\ndef produce_heatmaps_with_bbox(image, label, h_out, w_out, num_klass, ksize=9, sigma=0):\n heatmap = np.zeros(shape=[h_out, w_out, num_klass])\n h, w, _ = image.shape\n for single_box in label:\n if single_box[4] >= 0:\n center = [(single_box[0] + single_box[2]) / 2 / w, (single_box[1] + single_box[3]) / 2 / h] ###0-1\n heatmap[round(center[1] * h_out), round(center[0] * w_out), int(single_box[4])] = 1.\n heatmap = cv2.GaussianBlur(heatmap, (ksize, ksize), sigma)\n am = np.amax(heatmap)\n if am > 0:\n heatmap /= am / 255.\n heatmap = np.expand_dims(heatmap, -1)\n return heatmap\n\n\ndef produce_heatmaps_with_keypoint(image, label, h_out, w_out, num_klass, ksize=7, sigma=0):\n heatmap = np.zeros(shape=[h_out, w_out, num_klass])\n h, w, _ = image.shape\n for i in range(label.shape[0]):\n single_point = label[i]\n if single_point[0] > 0 and single_point[1] > 0:\n heatmap[int(single_point[1] * (h_out - 1)), int(single_point[0] * (w_out - 1)), i] = 1.\n heatmap = cv2.GaussianBlur(heatmap, (ksize, ksize), sigma)\n am = np.amax(heatmap)\n if am > 0:\n heatmap /= am / 255.\n return heatmap\n\n\nif __name__ == '__main__':\n pass\n" ]
[ [ "numpy.dot", "numpy.amax", "numpy.expand_dims", "numpy.ones_like", "numpy.meshgrid", "numpy.clip", "numpy.min", "numpy.fliplr", "numpy.asarray", "numpy.ones", "numpy.max", "numpy.random.rand", "numpy.float32", "numpy.exp", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kanguyn/Deep_Learning_Spring2018
[ "90b65b2aa15ca19de570a3b99976906ed5533f67" ]
[ "Lab1-ANN-handwritten-digit-recognition/NeuralNetwork.py" ]
[ "import time\nimport random\nimport numpy as np\nfrom utils import *\nfrom transfer_functions import * \n\n\nclass NeuralNetwork(object):\n \n def __init__(self, input_layer_size, hidden_layer_size, output_layer_size, transfer_f=sigmoid, transfer_df=dsigmoid):\n \"\"\"\n input_layer_size: number of input neurons\n hidden_layer_size: number of hidden neurons\n output_layer_size: number of output neurons\n iterations: number of iterations\n learning_rate: initial learning rate\n \"\"\"\n\n # initialize transfer functions\n self.transfer_f = transfer_f\n self.transfer_df = transfer_df\n\n # initialize layer sizes\n self.input_layer_size = input_layer_size+1 # +1 for the bias node in the input Layer\n self.hidden_layer_size = hidden_layer_size+1 # +1 for the bias node in the hidden layer \n self.output_layer_size = output_layer_size\n\n # initialize arrays for activations\n self.u_hidden = np.zeros((1, self.hidden_layer_size-1))\n self.u_output = np.zeros((1, self.output_layer_size))\n\n # initialize arrays for outputs\n self.o_input = np.ones((1, self.input_layer_size))\n self.o_hidden = np.ones((1, self.hidden_layer_size))\n self.o_output = np.ones((1, self.output_layer_size))\n\n # initialize arrays for partial derivatives according to activations\n self.dE_du_hidden = np.zeros((1, self.hidden_layer_size-1))\n self.dE_du_output = np.zeros((1, self.output_layer_size))\n\n # create randomized weights Yann LeCun method in 1988's paper ( Default values)\n input_range = 1.0 / self.input_layer_size ** (1/2)\n self.W_input_to_hidden = np.random.normal(loc = 0, scale = input_range, size =(self.input_layer_size, self.hidden_layer_size-1))\n self.W_hidden_to_output = np.random.uniform(size = (self.hidden_layer_size, self.output_layer_size)) / np.sqrt(self.hidden_layer_size)\n\n def weights_init(self,wi=None,wo=None):\n input_range = 1.0 / self.input_layer_size ** (1/2)\n if wi is not None:\n self.W_input_to_hidden = wi # weights between input and hidden layers\n else:\n self.W_input_to_hidden = np.random.normal(loc = 0, scale = input_range, size =(self.input_layer_size, self.hidden_layer_size-1))\n if wo is not None:\n self.W_hidden_to_output = wo # weights between hidden and output layers\n else:\n self.W_hidden_to_output = np.random.uniform(size = (self.hidden_layer_size, self.output_layer_size)) / np.sqrt(self.hidden_layer_size)\n\n def train(self, data, validation_data, iterations=50, learning_rate=5.0, verbose=False):\n start_time = time.time()\n training_accuracies = []\n validation_accuracies = []\n errors = []\n inputs = data[0]\n targets = data[1]\n best_val_acc = 100*self.predict(validation_data)/len(validation_data[0])\n best_i2h_W = self.W_input_to_hidden\n best_h2o_W = self.W_hidden_to_output\n for it in range(iterations):\n self.feedforward(inputs)\n self.backpropagate(targets, learning_rate=learning_rate)\n error = targets - self.o_output\n error *= error\n training_accuracies.append(100*self.predict(data)/len(data[0]))\n validation_accuracies.append(100*self.predict(validation_data)/len(validation_data[0]))\n if validation_accuracies[-1] > best_val_acc:\n best_i2h_W = self.W_input_to_hidden\n best_h2o_W = self.W_hidden_to_output\n if verbose:\n print(\"[Iteration %2d/%2d] -Training_Accuracy: %2.2f %% -Validation_Accuracy: %2.2f %% -time: %2.2f \" %(it+1, iterations,\n training_accuracies[-1], validation_accuracies[-1], time.time() - start_time))\n print(\" - MSE:\", np.sum(error)/len(targets))\n print(\"Training time:\", time.time()-start_time)\n plot_train_val(range(1, iterations+1), training_accuracies, validation_accuracies, \"Accuracy\")\n \n def train_xe(self, data, validation_data, iterations=50, learning_rate=5.0, verbose=False):\n start_time = time.time()\n training_accuracies = []\n validation_accuracies = []\n errors = []\n xes = []\n inputs = data[0]\n targets = data[1]\n best_val_acc = 100*self.predict(validation_data)/len(validation_data[0])\n best_i2h_W = self.W_input_to_hidden\n best_h2o_W = self.W_hidden_to_output\n for it in range(iterations):\n self.feedforward_xe(inputs)\n self.backpropagate_xe(targets, learning_rate=learning_rate)\n xe = targets*np.log(self.o_output)*(-1)\n error = targets - self.o_output\n error *= error\n training_accuracies.append(100*self.predict(data)/len(data[0]))\n validation_accuracies.append(100*self.predict(validation_data)/len(validation_data[0]))\n if validation_accuracies[-1] > best_val_acc:\n best_i2h_W = self.W_input_to_hidden\n best_h2o_W = self.W_hidden_to_output\n if verbose:\n print(\"[Iteration %2d/%2d] -Training_Accuracy: %2.2f %% -Validation_Accuracy: %2.2f %% -time: %2.2f \" %(it+1, iterations,\n training_accuracies[-1], validation_accuracies[-1], time.time() - start_time))\n print(\" - MSE:\", np.sum(error)/len(targets))\n print(\" - X-Entropy:\", np.sum(xe)/len(targets))\n print(\"Training time:\", time.time()-start_time)\n self.W_input_to_hidden = best_i2h_W\n self.W_hidden_to_output = best_h2o_W\n plot_train_val(range(1, iterations+1), training_accuracies, validation_accuracies, \"Accuracy\")\n\n def predict(self, test_data):\n \"\"\" Evaluate performance by counting how many examples in test_data are correctly \n evaluated. \"\"\"\n self.feedforward(test_data[0])\n answer = np.argmax(test_data[1], axis=1)\n prediction = np.argmax(self.o_output, axis=1)\n count = len(test_data[0]) - np.count_nonzero(answer - prediction)\n return count \n\n" ]
[ [ "numpy.log", "numpy.sqrt", "numpy.ones", "numpy.random.normal", "numpy.argmax", "numpy.count_nonzero", "numpy.random.uniform", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joshbriegal/roto
[ "58694285932e101286e407bc521b2fa80e4eed47", "58694285932e101286e407bc521b2fa80e4eed47" ]
[ "src/roto/methods/lombscargle.py", "src/roto/methods/gacf.py" ]
[ "import logging\nfrom typing import Optional\n\nimport numpy as np\nimport progressbar\nfrom astropy.timeseries import LombScargle\nfrom matplotlib.axes import Axes\n\nfrom roto.methods.periodfinder import PeriodFinder, Periodogram, PeriodResult\n\nlogger = logging.getLogger(__name__)\n\n\nclass LombScarglePeriodFinder(PeriodFinder):\n \"\"\"LombScargle method to find periods.\n Conforms to PeriodFinder interface.\n \"\"\"\n\n def __init__(\n self,\n timeseries: np.ndarray,\n flux: np.ndarray,\n flux_errors: Optional[np.ndarray] = None,\n min_ratio_of_maximum_peak_size: float = 0.2,\n samples_per_peak: int = 3,\n time_units: str = \"days\",\n flux_units: str = \"relative flux units\",\n fit_mean: Optional[bool] = True,\n center_data: Optional[bool] = True,\n nterms: Optional[bool] = 1,\n normalization: Optional[bool] = \"standard\",\n sliding: Optional[bool] = True,\n ):\n \"\"\"\n Args:\n timeseries (np.ndarray): array like time series.\n flux (np.ndarray): array like flux values\n flux_errors (Optional[np.ndarray], optional): array like errors on flux values. Defaults to None.\n fit_mean (Optional[bool]): if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage.\n center_data (Optional[bool]): if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if fit_mean = False.\n nterms (Optional[bool]): number of terms to use in the Fourier fit. {‘standard’, ‘model’, ‘log’, ‘psd’},\n normalization (Optional[bool]): Normalization to use for the periodogram.\n sliding (Optional[bool]): Use a sliding window to generate an error on the period.\n \"\"\"\n super().__init__(\n timeseries,\n flux,\n flux_errors,\n min_ratio_of_maximum_peak_size,\n samples_per_peak,\n time_units,\n flux_units,\n )\n\n self._lombscargle = LombScargle(\n self.timeseries,\n self.flux,\n dy=self.flux_errors,\n fit_mean=fit_mean,\n center_data=center_data,\n nterms=nterms,\n normalization=normalization,\n )\n\n self.sliding = sliding\n if self.sliding:\n self.ls_kwargs = {\n \"fit_mean\": fit_mean,\n \"center_data\": center_data,\n \"nterms\": nterms,\n \"normalization\": normalization,\n }\n\n def __call__(self, **kwargs) -> PeriodResult:\n \"\"\"Call the PeriodFinder object to return a PeriodResult object.\n If sliding, will run first run the standard period finder to find a period,\n and then generate a set of PeriodResults using a sliding window over periods.\n\n Returns:\n PeriodResult: PeriodResult contains period, error and method information.\n \"\"\"\n period_result = super().__call__(**kwargs)\n\n if not self.sliding:\n return period_result\n\n return self._sliding_ls_periodogram(period_result, **kwargs)\n\n def _sliding_ls_periodogram(\n self,\n period_result_estimate: PeriodResult,\n n_periods: int = 5,\n sliding_aggregation: str = \"median\",\n max_sliding_windows: int = 100,\n **autopower_kwargs,\n ) -> PeriodResult:\n \"\"\"Generate a set of PeriodResults using a sliding window over n_periods.\n\n Args:\n period_result_estimate (PeriodResult): First estimate period result\n n_periods (int, optional): Number of complete periods to consider in each window. Defaults to 5.\n sliding_aggregation (str, optional): How to aggregate the outputted periods. Defaults to \"median\". One of [\"mean\", \"median\"].\n max_sliding_windows (int, optional): Max number of sliding windows to consider. Defaults to 100.\n If period is too short, will cap the number of windows at this value.\n\n Raises:\n ValueError: If incorrect method given.\n\n Returns:\n PeriodResult: Single PeriodResult with errors calculated using spread across window calculations.\n \"\"\"\n\n methods = [\"mean\", \"median\"]\n if sliding_aggregation not in methods:\n raise ValueError(\n f\"method must be on of {methods}, not {sliding_aggregation}\"\n )\n\n period_estimate = period_result_estimate.period\n\n periods = []\n epoch = self.timeseries.min()\n time_tolerance = np.diff(\n self.timeseries\n ).min() # allow a small tolerance when calculating last window\n number_of_windows = (\n int(\n (\n (self.timeseries.max() + time_tolerance)\n - (period_estimate * n_periods)\n - epoch\n )\n / period_estimate\n )\n + 1\n )\n\n if number_of_windows < 3:\n logger.warning(\n \"Sliding window too large to generate good estimate, returning regular lombscargle\"\n )\n return period_result_estimate\n\n if number_of_windows > max_sliding_windows:\n logger.warning(\n \"Attempting to calculate too many sliding windows, reducing to %d\"\n % max_sliding_windows\n )\n number_of_windows = max_sliding_windows\n n_periods = ((self.timeseries.max() - epoch) / period_estimate) - (\n number_of_windows - 1\n )\n\n count = 0\n with progressbar.ProgressBar(\n max_value=number_of_windows,\n widgets=[\n \"Sliding LombScargle Window: \",\n progressbar.Counter(),\n \" windows (\",\n progressbar.Timer(),\n \")\",\n ],\n ) as bar:\n while epoch <= (self.timeseries.max() + time_tolerance) - (\n period_estimate * n_periods\n ):\n idxs = np.logical_and(\n self.timeseries >= epoch,\n self.timeseries < epoch + (period_estimate * n_periods),\n )\n\n if len(self.timeseries[idxs]) == 0:\n logger.debug(\"Empty slice %d, continuing\" % count)\n epoch += period_estimate\n count += 1\n bar.update(count)\n continue\n\n ls_periodfinder = LombScarglePeriodFinder(\n self.timeseries[idxs],\n self.flux[idxs],\n self.flux_errors[idxs] if self.flux_errors is not None else None,\n **self.ls_kwargs,\n sliding=False,\n )\n\n period_result = ls_periodfinder(**autopower_kwargs)\n\n if period_result is not None:\n periods.append(period_result.period)\n\n epoch += period_estimate\n count += 1\n bar.update(count)\n\n logger.info(\"Calculated %d periods from sliding windows\" % len(periods))\n logger.debug(periods)\n\n try:\n if sliding_aggregation == \"median\":\n percentiles = np.percentile(periods, [10, 50, 90])\n ave_period = percentiles[1]\n std_period = percentiles[2] - percentiles[0]\n elif sliding_aggregation == \"mean\":\n ave_period = np.nanmean(periods)\n std_period = np.nanstd(periods)\n except (IndexError, ValueError) as err:\n logger.error(\n \"Unable to calculate %s, returning initial guess\" % sliding_aggregation\n )\n logger.error(err, exc_info=True)\n return period_result_estimate\n\n return PeriodResult(\n ave_period, std_period, std_period, method=self.__class__.__name__\n )\n\n def calculate_periodogram(self, **kwargs) -> Periodogram:\n \"\"\"Calculate LS Periodogram of data\n\n Args:\n method (str, optional): [description]. Defaults to \"auto\".\n method_kwds ([type], optional): [description]. Defaults to None.\n normalization ([type], optional): [description]. Defaults to None.\n samples_per_peak (int, optional): [description]. Defaults to 5.\n nyquist_factor (int, optional): [description]. Defaults to 5.\n minimum_frequency ([type], optional): [description]. Defaults to None.\n maximum_frequency ([type], optional): [description]. Defaults to None.\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: The frequency and Lomb-Scargle power\n \"\"\"\n\n method = kwargs.get(\"method\", \"auto\")\n method_kwds = kwargs.get(\"method_kwds\", None)\n normalization = kwargs.get(\"normalization\", None)\n samples_per_peak = kwargs.get(\"samples_per_peak\", 5)\n nyquist_factor = kwargs.get(\"nyquist_factor\", 5)\n minimum_frequency = kwargs.get(\"minimum_frequency\", None)\n maximum_frequency = kwargs.get(\"maximum_frequency\", None)\n\n if maximum_frequency is None:\n # set max frequency to nyquist limit to prevent small spurious periods.\n min_timestamp_diff = np.min(np.diff(self.timeseries))\n maximum_frequency = 1.0 / (nyquist_factor * min_timestamp_diff)\n\n return Periodogram(\n *self._lombscargle.autopower(\n method=method,\n method_kwds=method_kwds,\n normalization=normalization,\n samples_per_peak=samples_per_peak,\n nyquist_factor=nyquist_factor,\n minimum_frequency=minimum_frequency,\n maximum_frequency=maximum_frequency,\n )\n )\n\n def plot(\n self, ax: Axes, period: PeriodResult, colour: Optional[str] = \"orange\"\n ) -> Axes:\n \"\"\"Given a figure and an axis plot the interesting output of the object.\n\n Args:\n ax ([type]): Matplotlib axis\n period (PeriodResult): Outputted period to plot around\n \"\"\"\n ax = self.plot_periodogram(ax, period, colour=colour)\n ax.set_title(\"Lomb Scargle Periodogram\")\n return ax\n", "from typing import Optional, Tuple\n\nimport numpy as np\nfrom astropy.convolution import Gaussian1DKernel, convolve\nfrom gacf import GACF\nfrom matplotlib.axes import Axes\nfrom scipy.stats import median_abs_deviation\n\nfrom roto.methods.fft import FFTPeriodFinder\nfrom roto.methods.periodfinder import PeriodFinder, PeriodResult\n\n\nclass GACFPeriodFinder(PeriodFinder):\n \"\"\"Generalised Autocorrelation Function (G-ACF) method to find periods.\n Conforms to PeriodFinder interface.\n \"\"\"\n\n def __init__(\n self,\n timeseries: np.ndarray,\n flux: np.ndarray,\n flux_errors: Optional[np.ndarray] = None,\n min_ratio_of_maximum_peak_size: float = 0.2,\n samples_per_peak: int = 3,\n time_units: str = \"days\",\n flux_units: str = \"relative flux units\",\n ):\n \"\"\"\n Args:\n timeseries (np.ndarray): array like time series.\n flux (np.ndarray): array like flux values\n flux_errors (Optional[np.ndarray], optional): array like errors on flux values. Defaults to None.\n \"\"\"\n super().__init__(\n timeseries,\n flux,\n flux_errors,\n min_ratio_of_maximum_peak_size,\n samples_per_peak,\n time_units,\n flux_units,\n )\n self._gacf = GACF(self.timeseries, self.flux, self.flux_errors)\n self.lag_timeseries = None\n self.correlations = None\n\n def calculate_periodogram(self, **kwargs) -> None:\n \"\"\"A \"periodogram\" does not exist for an ACF\n Returns:\n None\n \"\"\"\n return None\n\n def calculate_autocorrelation(self, **kwargs) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Calculate G-ACF of data.\n It is recommended to leave selection_function and weight_function as default for speed.\n\n Args:\n min_lag (float, optional): min lag in time_units. Defaults to None.\n max_lag (float, optional): max lag in time_units. Defaults to None.\n lag_resolution (float, optional): lag resolution in time_units. Defaults to None.\n alpha (float, optional): weight function characteristic length scale, default is t.median_time. Defaults to None.\n selection_function (str, optional): 'fast' or 'natural' - see paper for more details. Defaults to \"natural\".\n weight_function: (str, optional) 'gaussian' or 'fractional' see paper for more details. Defaults to \"fractional\".\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: G-ACF lag timeseries and correlations\n \"\"\"\n\n min_lag = kwargs.get(\"min_lag\", 0)\n max_lag = kwargs.get(\"max_lag\", None)\n lag_resolution = kwargs.get(\"lag_resolution\", None)\n selection_function = kwargs.get(\"selection_function\", \"natural\")\n weight_function = kwargs.get(\"weight_function\", \"fractional\")\n alpha = kwargs.get(\"alpha\", None)\n\n lag_timeseries, correlations = self._gacf.autocorrelation(\n min_lag=min_lag,\n max_lag=max_lag,\n lag_resolution=lag_resolution,\n selection_function=selection_function,\n weight_function=weight_function,\n alpha=alpha,\n )\n\n return np.array(lag_timeseries), np.array(correlations)\n\n def __call__(self, gacf_method=\"fft\", **kwargs) -> PeriodResult:\n \"\"\"Overrides parent call method to allow 2-stage period extraction.\n\n Args:\n method (str, optional): Method used to get final period values. Defaults to \"fft\".\n - \"fft\" will use an FFT on the G-ACF\n - \"peaks\" will find peaks within the G-ACF itself.\n\n Returns:\n PeriodResult: [description]\n \"\"\"\n\n self.lag_timeseries, self.correlations = self.calculate_autocorrelation(\n **kwargs\n )\n if gacf_method == \"fft\":\n fft = FFTPeriodFinder(self.lag_timeseries, self.correlations)\n fft_period = fft(**kwargs)\n return PeriodResult(\n period=fft_period.period,\n neg_error=fft_period.neg_error,\n pos_error=fft_period.pos_error,\n method=self.__class__.__name__,\n )\n elif gacf_method == \"peaks\":\n return self.find_acf_peaks(self.lag_timeseries, self.correlations)\n\n def find_acf_peaks(\n self, lag_timeseries: np.ndarray, correlation: np.ndarray\n ) -> PeriodResult:\n \"\"\"Method taken from McQuillan 2013:\n Convolve ACF with Gaussian Kernel\n Identify peaks in ACF\n Select peak associated with mean rotation period\n Evaluate uncertainty on error\n\n Args:\n lag_timeseries (np.ndarray): Lag time series. Must be positive side only.\n correlation (np.ndarray): Correlations\n\n Returns:\n PeriodResult: [description]\n \"\"\"\n gaussian_fwhm = lag_timeseries[18] - lag_timeseries[0]\n gauss_kernel = Gaussian1DKernel(\n gaussian_fwhm, x_size=(np.ceil(gaussian_fwhm * (57 / 18)) // 2 * 2 + 1)\n )\n smoothed_correlations = convolve(correlation, gauss_kernel)\n\n acf_peak_indexes = self.calculate_peak_indexes(\n smoothed_correlations, sort=False\n )\n\n # Remove zero point as not a real peak\n acf_peak_indexes = np.delete(acf_peak_indexes, 0)\n\n if len(acf_peak_indexes) <= 1:\n # just one peak, use width of Gaussian as stdev\n # find left min:\n central_index = acf_peak_indexes[0]\n left_idx = acf_peak_indexes[0]\n value = smoothed_correlations[left_idx]\n while value > 0.5 * smoothed_correlations[central_index]:\n try:\n value = smoothed_correlations[left_idx]\n left_idx -= 1\n except IndexError:\n left_idx = None\n break\n # find right min:\n right_idx = acf_peak_indexes[0]\n value = smoothed_correlations[right_idx]\n while value > 0.5 * smoothed_correlations[central_index]:\n try:\n value = smoothed_correlations[right_idx]\n right_idx += 1\n except IndexError:\n right_idx = None\n break\n sigma_p = 0\n if left_idx and right_idx:\n sigma_p = lag_timeseries[right_idx] - lag_timeseries[left_idx]\n\n return PeriodResult(\n period=lag_timeseries[acf_peak_indexes[0]],\n neg_error=sigma_p,\n pos_error=sigma_p,\n method=self.__class__.__name__,\n )\n\n peak_lags = lag_timeseries[acf_peak_indexes]\n local_heights = np.zeros(len(acf_peak_indexes))\n\n for i, peak_idx in enumerate(acf_peak_indexes):\n\n # find left min:\n left_idx = peak_idx\n diff = smoothed_correlations[left_idx] - smoothed_correlations[left_idx - 1]\n while diff > 0:\n try:\n diff = (\n smoothed_correlations[left_idx]\n - smoothed_correlations[left_idx - 1]\n )\n left_idx -= 1\n except IndexError:\n left_idx = None\n break\n if left_idx:\n left_height = correlation[peak_idx] - correlation[left_idx]\n\n # find right min:\n right_idx = peak_idx\n diff = (\n smoothed_correlations[right_idx] - smoothed_correlations[right_idx + 1]\n )\n while diff > 0:\n try:\n diff = (\n smoothed_correlations[right_idx]\n - smoothed_correlations[right_idx + 1]\n )\n right_idx += 1\n except IndexError:\n right_idx = None\n break\n if right_idx:\n right_height = correlation[peak_idx] - correlation[right_idx]\n if left_height and right_height:\n local_heights[i] = (left_height + right_height) / 2\n elif right_height:\n local_heights[i] = right_height\n elif left_height:\n local_heights[i] = left_height\n else:\n local_heights[i] = np.nan\n\n first_lag = peak_lags[0]\n second_lag = peak_lags[1]\n p_start = 0\n\n if not (2 * first_lag * 0.8) <= second_lag <= (2 * first_lag * 1.2):\n if local_heights[1] > local_heights[0]:\n p_start = 1\n\n valid_peaks = [peak_lags[p_start]]\n valid_peak_indexes = [p_start]\n gap = 0\n peak_number = 2\n for i in range(1, len(peak_lags)):\n if (i + p_start) >= len(peak_lags):\n break\n if len(valid_peaks) >= 10:\n break\n if i + p_start - 1 >= 0:\n gap = peak_lags[i + p_start] - peak_lags[i + p_start - 1]\n gap_ratio = gap / peak_lags[p_start]\n\n if (\n (peak_lags[p_start] * 0.8 * peak_number)\n <= peak_lags[i + p_start]\n <= (peak_lags[p_start] * 1.2 * peak_number)\n ):\n if gap_ratio > 0.3:\n valid_peaks.append(peak_lags[i + p_start] / peak_number)\n valid_peak_indexes.append(i + p_start)\n peak_number += 1\n\n # use median / MAD estimate from multiple peaks.\n mad = median_abs_deviation(valid_peaks)\n sigma_p = 1.483 * mad / np.sqrt(len(valid_peaks) - 1)\n med_p = np.median(valid_peaks)\n\n return PeriodResult(\n period=med_p,\n neg_error=sigma_p,\n pos_error=sigma_p,\n method=self.__class__.__name__,\n )\n\n def plot(\n self, ax: Axes, period: PeriodResult, colour: Optional[str] = \"orange\"\n ) -> Axes:\n \"\"\"Given a figure and an axis plot the interesting output of the object.\n\n Args:\n ax (Axes): Matplotlib axis\n period (PeriodResult): Outputted period to plot around\n \"\"\"\n if (self.lag_timeseries is None) or (self.correlations is None):\n self()\n\n ax.scatter(self.lag_timeseries, self.correlations, s=1, color=\"k\")\n\n ax.axvline(period.period, color=colour, lw=1)\n ax.axvspan(\n period.period - period.neg_error,\n period.period + period.pos_error,\n color=colour,\n alpha=0.5,\n )\n\n ax.set_xlim([0, min(5 * period.period, self.lag_timeseries[-1])])\n\n ax.set_xlabel(f\"Lag time / {self.time_units}\")\n ax.set_ylabel(\"G-ACF Power\")\n ax.set_title(\"G-ACF\")\n\n return ax\n" ]
[ [ "numpy.percentile", "numpy.diff", "numpy.nanmean", "numpy.nanstd", "numpy.logical_and" ], [ "numpy.median", "scipy.stats.median_abs_deviation", "numpy.ceil", "numpy.delete", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.9", "1.5", "1.7", "1.8" ], "tensorflow": [] } ]
HobnobMancer/cazy_webscraper
[ "3f74492f46db2093f7e6cd91fffcb8347694e54e" ]
[ "tests/test_crawler_get_html_pages.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# (c) University of St Andrews 2020-2021\n# (c) University of Strathclyde 2020-2021\n# (c) James Hutton Institute 2020-2021\n#\n# Author:\n# Emma E. M. Hobbs\n#\n# Contact\n# [email protected]\n#\n# Emma E. M. Hobbs,\n# Biomolecular Sciences Building,\n# University of St Andrews,\n# North Haugh Campus,\n# St Andrews,\n# KY16 9ST\n# Scotland,\n# UK\n#\n# The MIT License\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"Tests the script get_cazy_pages which creates a local library of HTML pages from CAZy.\n\nThese test are intened to be run from the root of the repository using:\npytest -v\n\"\"\"\n\nimport pytest\n\nimport pandas as pd\n\nfrom argparse import Namespace\nfrom datetime import datetime\n\nfrom bs4 import BeautifulSoup\n\nfrom scraper import cazy_webscraper, crawler, sql, utilities\nfrom scraper import crawler\nfrom scraper.crawler import Family\nfrom scraper.crawler.cazy_html_pages import get_cazy_pages\nfrom scraper.utilities import file_io, parse_configuration, parsers\n\n\[email protected]\ndef input_dir(test_dir):\n path_ = test_dir / \"test_inputs\" / \"test_inputs_crawler\" / \"get_cazy_pages\"\n return path_\n\n\[email protected]\ndef output_dir(test_dir):\n path_ = test_dir / \"test_outputs\"\n return path_\n\n\[email protected]\ndef html_dir(output_dir):\n path_ = output_dir / \"test_outputs_get_cazy_pages\" / \"html_pages\"\n return path_\n\n\[email protected]\ndef logs_dir(output_dir):\n path_ = output_dir / \"test_outputs_get_cazy_pages\" / \"test_logs\"\n return path_\n\n\[email protected]\ndef input_pages(test_dir):\n path_ = test_dir / \"test_inputs\" / \"test_inputs_crawler\" / \"scrape_all_inputs\" / \"test_get_pagination_urls\"\n return path_\n\n\[email protected]\ndef pag_dir(test_dir):\n path_ = test_dir / \"test_inputs\" / \"test_inputs_crawler\" / \"scrape_by_kingdom_inputs\"\n return path_\n\n\[email protected]\ndef args(logs_dir):\n argsdict = {\n \"args\": Namespace(\n subfamilies=True,\n retries=2,\n timeout=5,\n output=logs_dir,\n )\n }\n return argsdict\n\n\[email protected]\ndef args_pages(html_dir):\n argsdict = {\n \"args\": Namespace(\n subfamilies=True,\n retries=2,\n timeout=5,\n output=html_dir,\n )\n }\n return argsdict\n\n\[email protected]\ndef start_time():\n start_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") # used in terminating message\n start_time = pd.to_datetime(start_time)\n return start_time\n\n\[email protected]\ndef protein_gen():\n result_list = [\n {\"url\": None, \"format\": None, \"sql\": None, \"error\": None},\n {\"url\": \"www.cazy.org/GH1.html\", \"error\": \"error message\"},\n {\"url\": None, \"error\": \"error message\"},\n {\"url\": \"www.cazy.org/GH3.html\", \"error\": \"error message\"},\n ]\n return (_ for _ in result_list)\n\n\[email protected]\ndef protein_gen_success():\n result_list = [\n {\"url\": None, \"format\": None, \"sql\": None, \"error\": None},\n {\"url\": None, \"format\": None, \"sql\": None, \"error\": None},\n {\"url\": None, \"format\": None, \"sql\": None, \"error\": None},\n ]\n return (_ for _ in result_list)\n\n\[email protected]\ndef fam_url():\n url = \"http://www.cazy.org/GH14_all.html\"\n return url\n\n\[email protected]\ndef fam_template():\n fam = Family(\"famName\", \"CAZyClass\", fam_url)\n return fam\n\n\n# test get_cazy_pages\n\n\ndef test_pages_first_class_parse_no_fam_urls(\n cazy_dictionary, cazy_home_url, logs_dir, args, start_time, monkeypatch,\n):\n \"\"\"Test get_cazy_pages when parsing the CAZy class for the first time \n and no fam urls are retrieved.\"\"\"\n\n file_io.make_output_directory(logs_dir, True, False)\n\n def mock_get_classes(*args, **kwargs):\n class1 = crawler.CazyClass(\"test_class\", \"test_class_url.html\", 0)\n return [class1]\n\n def mock_get_families(*args, **kwargs):\n return None, \"test error message\", [\"test_url1\", \"test_url2\"]\n\n monkeypatch.setattr(crawler, \"get_cazy_classes\", mock_get_classes)\n monkeypatch.setattr(crawler, \"get_cazy_family_urls\", mock_get_families)\n\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n get_cazy_pages.get_cazy_pages(\n args=args[\"args\"],\n cazy_home=cazy_home_url,\n time_stamp=\"time_stamp\",\n excluded_classes=None,\n cazy_dict=cazy_dictionary,\n config_dict=None,\n kingdoms=None,\n start_time=start_time,\n )\n assert pytest_wrapped_e.type == SystemExit\n\n file_io.make_output_directory(logs_dir, True, False)\n\n\ndef test_pages_reparse_class_parse_not_kingdom_no_config(\n cazy_dictionary, cazy_home_url, logs_dir, args, start_time, monkeypatch,\n):\n \"\"\"Test get_cazy_pages when reparsing the CAZy class\"\"\"\n\n file_io.make_output_directory(logs_dir, True, False)\n\n fam1 = crawler.Family(\"test_fam\", \"test_class\", \"test_url\")\n\n def mock_get_classes(*args, **kwargs):\n class1 = crawler.CazyClass(\n name=\"test_class\",\n url=\"test_class_url.html\",\n tries=0,\n failed_families={fam1: 0}\n )\n return [class1]\n\n def mock_get_families(*args, **kwargs):\n return [fam1], \"error message\", [\"in\", \"cor\", \"rect\", \"urls\"]\n\n def mock_parse_family(*args, **kwargs):\n return fam1, True, [\"fail1\", \"fail2\"], [\"format error\"]\n\n monkeypatch.setattr(crawler, \"get_cazy_classes\", mock_get_classes)\n monkeypatch.setattr(crawler, \"get_cazy_family_urls\", mock_get_families)\n monkeypatch.setattr(get_cazy_pages, \"parse_all_family\", mock_parse_family)\n\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n get_cazy_pages.get_cazy_pages(\n args=args[\"args\"],\n cazy_home=cazy_home_url,\n time_stamp=\"time_stamp\",\n excluded_classes=None,\n cazy_dict=cazy_dictionary,\n config_dict=None,\n kingdoms='all',\n start_time=start_time,\n )\n assert pytest_wrapped_e.type == SystemExit\n\n file_io.make_output_directory(logs_dir, True, False)\n\n\ndef test_pages_reparse_class_parse_kingdom_no_config(\n cazy_dictionary, cazy_home_url, logs_dir, args, start_time, monkeypatch,\n):\n \"\"\"Test get_cazy_pages when reparsing the CAZy class\"\"\"\n\n file_io.make_output_directory(logs_dir, True, False)\n\n fam1 = crawler.Family(\"test_fam\", \"test_class\", \"test_url\")\n\n def mock_get_classes(*args, **kwargs):\n class1 = crawler.CazyClass(\n name=\"test_class\",\n url=\"test_class_url.html\",\n tries=0,\n failed_families={fam1: 0}\n )\n return [class1]\n\n def mock_get_families(*args, **kwargs):\n return [fam1], \"error message\", [\"in\", \"cor\", \"rect\", \"urls\"]\n\n def mock_parse_family(*args, **kwargs):\n return fam1, True, [\"fail1\", \"fail2\"], [\"format error\"]\n\n monkeypatch.setattr(crawler, \"get_cazy_classes\", mock_get_classes)\n monkeypatch.setattr(crawler, \"get_cazy_family_urls\", mock_get_families)\n monkeypatch.setattr(get_cazy_pages, \"parse_family_by_kingdom\", mock_parse_family)\n\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n get_cazy_pages.get_cazy_pages(\n args=args[\"args\"],\n cazy_home=cazy_home_url,\n time_stamp=\"time_stamp\",\n excluded_classes=None,\n cazy_dict=cazy_dictionary,\n config_dict=None,\n kingdoms=[\"Bacteria\"],\n start_time=start_time,\n )\n assert pytest_wrapped_e.type == SystemExit\n\n file_io.make_output_directory(logs_dir, True, False)\n\n\n# def test_pages_reparse_class_parse_not_kingdom_config(\n# cazy_dictionary, cazy_home_url, logs_dir, args, start_time, monkeypatch,\n# ):\n# \"\"\"Test get_cazy_pages when reparsing the CAZy class\"\"\"\n\n# file_io.make_output_directory(logs_dir, True, False)\n\n# fam1 = crawler.Family(\"GH3_1\", \"test_class\", \"test_url\")\n\n# config_dict = {\"Glycoside Hydrolases\": [\"GH3\"]}\n\n# def mock_get_classes(*args, **kwargs):\n# class1 = crawler.CazyClass(\n# name=\"Glycoside Hydrolases\",\n# url=\"test_class_url.html\",\n# tries=0,\n# failed_families={fam1: 0}\n# )\n# return [class1]\n\n# def mock_get_families(*args, **kwargs):\n# return [fam1], \"error message\", [\"in\", \"cor\", \"rect\", \"urls\"]\n\n# def mock_parse_family(*args, **kwargs):\n# return fam1, True, [\"fail1\", \"fail2\"], [\"format error\"]\n\n# monkeypatch.setattr(crawler, \"get_cazy_classes\", mock_get_classes)\n# monkeypatch.setattr(crawler, \"get_cazy_family_urls\", mock_get_families)\n# monkeypatch.setattr(get_cazy_pages, \"parse_family_by_kingdom\", mock_parse_family)\n\n# with pytest.raises(SystemExit) as pytest_wrapped_e:\n# get_cazy_pages.get_cazy_pages(\n# args=args[\"args\"],\n# cazy_home=cazy_home_url,\n# time_stamp=\"time_stamp\",\n# excluded_classes=None,\n# cazy_dict=cazy_dictionary,\n# config_dict=config_dict,\n# kingdoms=\"all\",\n# start_time=start_time,\n# )\n# assert pytest_wrapped_e.type == SystemExit\n\n# file_io.make_output_directory(logs_dir, True, False)\n\n\ndef test_pages_reparse_class_parse_kingdom_config(\n cazy_dictionary, cazy_home_url, logs_dir, start_time, monkeypatch,\n):\n \"\"\"Test get_cazy_pages when reparsing the CAZy class\"\"\"\n\n args = {\n \"args\": Namespace(\n subfamilies=False,\n retries=2,\n timeout=5,\n output=logs_dir,\n )\n }\n\n file_io.make_output_directory(logs_dir, True, False)\n\n fam1 = crawler.Family(\"GH3_1\", \"test_class\", \"test_url\")\n\n config_dict = {\"Glycoside Hydrolases\": [\"GH3\"]}\n\n def mock_get_classes(*args, **kwargs):\n class1 = crawler.CazyClass(\n name=\"Glycoside Hydrolases\",\n url=\"test_class_url.html\",\n tries=0,\n failed_families={fam1: 0}\n )\n return [class1]\n\n def mock_get_families(*args, **kwargs):\n return [fam1], \"error message\", [\"in\", \"cor\", \"rect\", \"urls\"]\n\n def mock_parse_family(*args, **kwargs):\n return fam1, True, [\"fail1\", \"fail2\"], [\"format error\"]\n\n monkeypatch.setattr(crawler, \"get_cazy_classes\", mock_get_classes)\n monkeypatch.setattr(crawler, \"get_cazy_family_urls\", mock_get_families)\n monkeypatch.setattr(get_cazy_pages, \"parse_family_by_kingdom\", mock_parse_family)\n\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n get_cazy_pages.get_cazy_pages(\n args=args[\"args\"],\n cazy_home=cazy_home_url,\n time_stamp=\"time_stamp\",\n excluded_classes=None,\n cazy_dict=cazy_dictionary,\n config_dict=config_dict,\n kingdoms=[\"Bacteria\"],\n start_time=start_time,\n )\n assert pytest_wrapped_e.type == SystemExit\n\n file_io.make_output_directory(logs_dir, True, False)\n\n\n# test parse_all_family()\n\n\ndef test_pages_parse_all_rescrape_first_pag(args, protein_gen, cazy_home_url, monkeypatch):\n \"\"\"Test parse_all_family when rescraping a family and \n need to rescrape the first paginiation page.\"\"\"\n\n fam = Family(\n \"testFam\",\n \"testClass\",\n \"testURL\",\n failed_pages={\"url.com\": [\"first_pagination\", 1]},\n )\n\n def mock_parse_pagination_page(*args, **kwargs):\n return ['urls', 'urls']\n\n def mock_get_pages(*args, **kwargs):\n return protein_gen\n\n monkeypatch.setattr(get_cazy_pages, \"get_pagination_pages\", mock_parse_pagination_page)\n monkeypatch.setattr(get_cazy_pages, \"get_html_page\", mock_get_pages)\n\n get_cazy_pages.parse_all_family(\n family=fam,\n cazy_home=cazy_home_url,\n args=args[\"args\"],\n )\n\n\ndef test_pages_parse_all_format_error(args, protein_gen, cazy_home_url, monkeypatch):\n \"\"\"Test parse_all_family when family url is not formatted correctly.\"\"\"\n\n fam = Family(\n \"testFam\",\n \"testClass\",\n \"testURL\",\n )\n\n def mock_parse_pagination_page(*args, **kwargs):\n return [1, 2, 3, 4, 5, 6]\n\n def mock_get_pages(*args, **kwargs):\n return protein_gen\n\n monkeypatch.setattr(get_cazy_pages, \"get_pagination_pages\", mock_parse_pagination_page)\n monkeypatch.setattr(get_cazy_pages, \"get_html_page\", mock_get_pages)\n\n get_cazy_pages.parse_all_family(\n family=fam,\n cazy_home=cazy_home_url,\n args=args[\"args\"],\n )\n\n\ndef test_pages_parse_all_first_fam_parse(fam_url, args, protein_gen, cazy_home_url, monkeypatch):\n \"\"\"Test parse_all_family when parsing the fam for the first time.\"\"\"\n\n fam = Family(\n \"testFam\",\n \"testClass\",\n fam_url,\n )\n\n def mock_parse_pagination_page(*args, **kwargs):\n return [1, 2, 3, 4, 5, 6]\n\n def mock_get_pages(*args, **kwargs):\n return protein_gen\n\n monkeypatch.setattr(get_cazy_pages, \"get_pagination_pages\", mock_parse_pagination_page)\n monkeypatch.setattr(get_cazy_pages, \"get_html_page\", mock_get_pages)\n\n get_cazy_pages.parse_all_family(\n family=fam,\n cazy_home=cazy_home_url,\n args=args[\"args\"],\n )\n\n\n# test get_pagination_pages()\n\n\ndef test_pages_get_pag_no_page(args, cazy_home_url, fam_url, fam_template, monkeypatch):\n \"\"\"Test get_pagination_pages when no page is returned.\"\"\"\n\n def mock_get_page(*args, **kwargs):\n return None, \"error message\"\n\n monkeypatch.setattr(crawler, \"get_page\", mock_get_page)\n\n res1, res2 = get_cazy_pages.get_pagination_pages(\n first_pagination_url=fam_url,\n family=fam_template,\n cazy_home=cazy_home_url,\n args=args[\"args\"],\n )\n\n assert res1 == {'url': 'http://www.cazy.org/GH14_all.html\\tCAZyClass\\tFailed to connect to first pagination page for famName, therefore could not retrieve URLs to all pagination pages\\terror message', 'format': None}\n assert res2 is None\n\n\ndef test_pages_get_page_successful(args, cazy_home_url, fam_url, fam_template, monkeypatch):\n \"\"\"Test get_pagination_pages when successful\"\"\"\n\n def mock_get_page(*args, **kwargs):\n return \"page\", \"error message\"\n\n def mock_get_urls(*args, **kwargs):\n return [\"url\"]\n\n monkeypatch.setattr(crawler, \"get_page\", mock_get_page)\n monkeypatch.setattr(get_cazy_pages, \"get_pagination_page_urls\", mock_get_urls)\n\n res1 = get_cazy_pages.get_pagination_pages(\n first_pagination_url=fam_url,\n family=fam_template,\n cazy_home=cazy_home_url,\n args=args[\"args\"],\n )\n\n assert res1 == [\"url\"]\n\n\n# test get_pagination_page_urls()\n\n\ndef test_pages_get_urls_pag(fam_url, input_pages, cazy_home_url):\n \"\"\"Test get_pagination_page_urls() when there is pagination.\"\"\"\n\n test_input_path = input_pages / \"pag_page.html\"\n\n with open(test_input_path) as fp:\n soup = BeautifulSoup(fp, features=\"lxml\")\n\n res = get_cazy_pages.get_pagination_page_urls(fam_url, soup, cazy_home_url, \"testFam\")\n assert len(res) == 40\n\n\ndef test_pages_get_urls_no_pag(fam_url, input_pages, cazy_home_url):\n \"\"\"Test get_pagination_page_urls() when there is pagination.\"\"\"\n\n test_input_path = input_pages / \"no_pagination_pag.html\"\n\n with open(test_input_path) as fp:\n soup = BeautifulSoup(fp, features=\"lxml\")\n\n res = get_cazy_pages.get_pagination_page_urls(fam_url, soup, cazy_home_url, \"testFam\")\n assert res == ['http://www.cazy.org/GH14_all.html']\n\n\n# test get_html_page()\n\n\ndef test_pages_get_html_pages_no_page(fam_url, args_pages, html_dir, monkeypatch):\n \"\"\"Test get_html_pages() when no page is returned.\"\"\"\n file_io.make_output_directory(html_dir, True, False)\n\n def mock_get_pages(*args, **kwargs):\n return None, \"error\"\n\n monkeypatch.setattr(crawler, \"get_page\", mock_get_pages)\n\n get_cazy_pages.get_html_page(fam_url, \"testFam\", args_pages[\"args\"])\n\n file_io.make_output_directory(html_dir, True, False)\n\n\ndef test_pages_get_html_pages_deleted_fam(fam_url, args_pages, html_dir, input_pages, monkeypatch):\n \"\"\"Test get_html_pages() when no page is returned.\"\"\"\n file_io.make_output_directory(html_dir, True, False)\n\n test_input_path = input_pages / \"deleted_fam.html\"\n\n with open(test_input_path) as fp:\n soup = BeautifulSoup(fp, features=\"lxml\")\n\n def mock_get_pages(*args, **kwargs):\n return soup, None\n\n monkeypatch.setattr(crawler, \"get_page\", mock_get_pages)\n\n get_cazy_pages.get_html_page(fam_url, \"testFam\", args_pages[\"args\"])\n\n file_io.make_output_directory(html_dir, True, False)\n\n\ndef test_pages_get_html_pages_no_protein_count(fam_url, args_pages, html_dir, input_pages, monkeypatch):\n \"\"\"Test get_html_pages() when no protien count can be retrieved.\"\"\"\n file_io.make_output_directory(html_dir, True, False)\n\n test_input_path = input_pages / \"no_protein_total.html\"\n\n with open(test_input_path) as fp:\n soup = BeautifulSoup(fp, features=\"lxml\")\n\n def mock_get_pages(*args, **kwargs):\n return soup, None\n\n monkeypatch.setattr(crawler, \"get_page\", mock_get_pages)\n\n get_cazy_pages.get_html_page(fam_url, \"testFam\", args_pages[\"args\"])\n\n file_io.make_output_directory(html_dir, True, False)\n\n\ndef test_pages_get_html_pages_empty(fam_url, args_pages, html_dir, input_pages, monkeypatch):\n \"\"\"Test get_html_pages() when the family is empty.\"\"\"\n file_io.make_output_directory(html_dir, True, False)\n\n test_input_path = input_pages / \"empty_fam.html\"\n\n with open(test_input_path) as fp:\n soup = BeautifulSoup(fp, features=\"lxml\")\n\n def mock_get_pages(*args, **kwargs):\n return soup, None\n\n monkeypatch.setattr(crawler, \"get_page\", mock_get_pages)\n\n get_cazy_pages.get_html_page(fam_url, \"testFam\", args_pages[\"args\"])\n\n file_io.make_output_directory(html_dir, True, False)\n\n\ndef test_pages_get_html_pages_no_table(fam_url, args_pages, html_dir, input_pages, monkeypatch):\n \"\"\"Test get_html_pages() when the CAZyme table is not populated.\"\"\"\n file_io.make_output_directory(html_dir, True, False)\n\n test_input_path = str(input_pages).replace(\"test_get_pagination_urls\", \"test_parsing_proteins/no_cazyme_table.html\")\n\n with open(test_input_path) as fp:\n soup = BeautifulSoup(fp, features=\"lxml\")\n\n def mock_get_pages(*args, **kwargs):\n return soup, None\n\n def mock_retry_connection(*args, **kwargs):\n return None\n\n monkeypatch.setattr(crawler, \"get_page\", mock_get_pages)\n monkeypatch.setattr(get_cazy_pages, \"retry_failed_connections\", mock_retry_connection)\n\n get_cazy_pages.get_html_page(fam_url, \"testFam\", args_pages[\"args\"])\n\n file_io.make_output_directory(html_dir, True, False)\n\n\n# test retry_failed_connections()\n\n\ndef test_pages_retry_failed_connections(fam_url, args_pages, html_dir, input_pages, monkeypatch):\n \"\"\"Test retry_failed_connections()\"\"\"\n\n file_io.make_output_directory(html_dir, True, False)\n\n test_input_path = input_pages / \"pag_page.html\"\n\n with open(test_input_path) as fp:\n soup = BeautifulSoup(fp, features=\"lxml\")\n\n def mock_get_pages(*args, **kwargs):\n return soup, None\n\n monkeypatch.setattr(crawler, \"get_page\", mock_get_pages)\n\n get_cazy_pages.retry_failed_connections(\n fam_url,\n \"TestFam\",\n args_pages[\"args\"],\n \"filename.html\"\n )\n\n file_io.make_output_directory(html_dir, True, False)\n\n\n# test parse_family_by_kingdom()\n\n\ndef test_pages_kingdom_reparse_pagination(args, protein_gen, cazy_home_url, monkeypatch):\n \"\"\"Test parse_family_by_kingdom() when reparsing a family and need to reparse the pagination.\"\"\"\n\n test_fam = Family(\"famName\", \"CAZyClass\", \"http://www.cazy.org/GH14.html\")\n test_fam.failed_pages = {\"http://www.cazy.org/GH14_all.html\": 1}\n\n def mock_get_pag(*args, **kwargs):\n return [\"http://www.cazy.org/GH14_all.html\"]\n\n def mock_get_pages(*args, **kwargs):\n return protein_gen\n\n monkeypatch.setattr(get_cazy_pages, \"get_pagination_pages_kingdom\", mock_get_pag)\n monkeypatch.setattr(get_cazy_pages, \"get_html_page\", mock_get_pages)\n\n get_cazy_pages.parse_family_by_kingdom(\n family=test_fam,\n cazy_home=cazy_home_url,\n args=args[\"args\"],\n kingdoms=[\"Bacteria\"],\n )\n\n\ndef test_pages_kingdom_first_parse_format_error(args, protein_gen, cazy_home_url, monkeypatch):\n \"\"\"Test parse_family_by_kingdom() when parsing a fam for the first time and \n URL is incorrectly formated.\"\"\"\n\n test_fam = Family(\"famName\", \"CAZyClass\", \"http://www.caasdasdaszy.org/GH14.html\")\n\n def mock_get_pag(*args, **kwargs):\n return [\"http://www.cazy.org/GH14_all.html\"]\n\n def mock_get_pages(*args, **kwargs):\n return protein_gen\n\n monkeypatch.setattr(get_cazy_pages, \"get_pagination_pages_kingdom\", mock_get_pag)\n monkeypatch.setattr(get_cazy_pages, \"get_html_page\", mock_get_pages)\n\n get_cazy_pages.parse_family_by_kingdom(\n family=test_fam,\n cazy_home=cazy_home_url,\n args=args[\"args\"],\n kingdoms=[\"Bacteria\"],\n )\n\n\ndef test_pages_kingdom_successful(args, protein_gen_success, cazy_home_url, monkeypatch):\n \"\"\"Test parse_family_by_kingdom() when all is successful.\"\"\"\n\n test_fam = Family(\"famName\", \"CAZyClass\", \"http://www.cazy.org/GH14.html\")\n\n def mock_get_pag(*args, **kwargs):\n return [\"http://www.cazy.org/GH14_all.html\"]\n\n def mock_get_pages(*args, **kwargs):\n return protein_gen_success\n\n monkeypatch.setattr(get_cazy_pages, \"get_pagination_pages_kingdom\", mock_get_pag)\n monkeypatch.setattr(get_cazy_pages, \"get_html_page\", mock_get_pages)\n\n get_cazy_pages.parse_family_by_kingdom(\n family=test_fam,\n cazy_home=cazy_home_url,\n args=args[\"args\"],\n kingdoms=[\"Bacteria\"],\n )\n\n\n# test get_pagination_pages_kingdom\n\n\ndef test_pages_king_pag_no_page(monkeypatch, fam_url, fam_template, cazy_home_url, args):\n \"\"\"Test get_pagination_pages_kingdom when no page is returned.\"\"\"\n\n def mock_get_page(*args, **kwargs):\n return None, \"error\"\n\n monkeypatch.setattr(crawler, \"get_page\", mock_get_page)\n\n get_cazy_pages.get_pagination_pages_kingdom(\n first_pagination_url=fam_url,\n family=fam_template,\n cazy_home=cazy_home_url,\n args=args[\"args\"],\n kingdom=\"Bacteria\"\n )\n\n\ndef test_pages_king_pag_successful(monkeypatch, fam_url, fam_template, cazy_home_url, args):\n \"\"\"Test get_pagination_pages_kingdom when successful.\"\"\"\n\n def mock_get_page(*args, **kwargs):\n return \"page\", \"error\"\n\n def mock_pag(*args, **kwargs):\n return [1, 2]\n\n monkeypatch.setattr(crawler, \"get_page\", mock_get_page)\n monkeypatch.setattr(get_cazy_pages, \"get_tax_page_urls\", mock_pag)\n\n get_cazy_pages.get_pagination_pages_kingdom(\n first_pagination_url=fam_url,\n family=fam_template,\n cazy_home=cazy_home_url,\n args=args[\"args\"],\n kingdom=\"Bacteria\"\n )\n\n\n# test get_tax_pages_urls()\n\n\ndef test_pages_get_tax_pag_no_pag(pag_dir, fam_url, cazy_home_url):\n \"\"\"Test get_tax_page_urls when there is no pagination.\"\"\"\n\n test_input_path = pag_dir / \"kb_no_pag.html\"\n\n with open(test_input_path) as fp:\n soup = BeautifulSoup(fp, features=\"lxml\")\n\n get_cazy_pages.get_tax_page_urls(fam_url, soup, cazy_home_url, \"fam\")\n\n\ndef test_pages_get_tax_pag_pag(pag_dir, fam_url, cazy_home_url):\n \"\"\"Test get_tax_page_urls when there is pagination.\"\"\"\n\n test_input_path = pag_dir / \"kb_pag_page.html\"\n\n with open(test_input_path) as fp:\n soup = BeautifulSoup(fp, features=\"lxml\")\n\n get_cazy_pages.get_tax_page_urls(fam_url, soup, cazy_home_url, \"fam\")\n" ]
[ [ "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
eto/study
[ "95f506569657c174bf2347b5240768fafedaa285" ]
[ "0104/face_camera.py" ]
[ "#!/usr/bin/env python\n# from https://qiita.com/YomamaBanana/items/4197c4f9ec26a05416ed\n\n# ②モジュールをimportする\nimport yaml\n\nimport sys\nimport argparse\nfrom loguru import logger\n\nimport cv2\nimport numpy as np\nimport mediapipe as mp\n\nimport pyvirtualcam\n\n# VirtualCameraクラス\nclass VirtualCamera():\n def __init__(self, width, height, fps) -> None:\n self.v_cam = pyvirtualcam.Camera(width=width, height=height, fps=fps)\n\n def _send(self, image):\n self.v_cam.send(image)\n self.v_cam.sleep_until_next_frame()\n\n# ③Configクラス\nclass Config():\n def __init__(self, yaml_file) -> None:\n logger.info(f\"Reading config file: {yaml_file}\")\n with open(yaml_file, \"r\") as f:\n self.yaml_cfg = yaml.safe_load(f)\n\n# ④検知器クラス\n# Mediapipeのインスタンス化\n# src/face_filter.py\nclass Detector():\n def __init__(self, thickness=1, circle_radius=1, color=(255,0,255), min_detection_confidence=0.5, min_tracking_confidence=0.5) -> None:\n # Mediapipeのインスタンス化\n logger.info(\"Mediapipe detector initiated.\")\n self.drawing = mp.solutions.drawing_utils\n self.drawing_spec = self.drawing.DrawingSpec(\n thickness=thickness, \n circle_radius=circle_radius, \n color=color)\n\n self.drawing_styles = mp.solutions.drawing_styles\n self.face_mesh = mp.solutions.face_mesh\n\n self.face_detector = self.face_mesh.FaceMesh(\n static_image_mode=False,\n refine_landmarks=True,\n min_detection_confidence=min_detection_confidence,\n min_tracking_confidence=min_tracking_confidence)\n\n self._parts_init()\n\n# 顔のバーツの指定\n def _parts_init(self) -> None:\n self.lips = list(self.face_mesh.FACEMESH_LIPS)\n self.lips = np.ravel(self.lips)\n\n self.l_eyes = list(self.face_mesh.FACEMESH_LEFT_EYE)\n self.l_eyes = np.ravel(self.l_eyes)\n\n self.r_eyes = list(self.face_mesh.FACEMESH_RIGHT_EYE)\n self.r_eyes = np.ravel(self.r_eyes)\n\n self.l_eyebrow = list(self.face_mesh.FACEMESH_LEFT_EYEBROW)\n self.l_eyebrow = np.ravel(self.l_eyebrow)\n\n self.r_eyebrow = list(self.face_mesh.FACEMESH_RIGHT_EYEBROW)\n self.r_eyebrow = np.ravel(self.r_eyebrow)\n\n# 顔検知を行う\n def __call__(self, image):\n self.img = image\n self.results = self.face_detector.process(image)\n\n# 検知した顔に対して色と重みをつけ、後処理を行う。\n def post_processing(self, mask, cfg):\n face_dict = {}\n if self.results.multi_face_landmarks:\n for face_landmarks in self.results.multi_face_landmarks:\n\n mask_lip = []\n mask_face = []\n mask_l_eyes = []\n mask_r_eyes = []\n mask_l_eyebrow = []\n mask_r_eyebrow = []\n for i in range(self.face_mesh.FACEMESH_NUM_LANDMARKS):\n\n if i in self.lips:\n pt1 = face_landmarks.landmark[i]\n x = int(pt1.x * self.img.shape[1])\n y = int(pt1.y * self.img.shape[0])\n mask_lip.append((x, y))\n\n elif i in self.l_eyes:\n pt1 = face_landmarks.landmark[i]\n x = int(pt1.x * self.img.shape[1])\n y = int(pt1.y * self.img.shape[0])\n\n mask_l_eyes.append((x, y))\n\n elif i in self.r_eyes:\n pt1 = face_landmarks.landmark[i]\n x = int(pt1.x * self.img.shape[1])\n y = int(pt1.y * self.img.shape[0])\n mask_r_eyes.append((x, y))\n\n elif i in self.r_eyebrow:\n pt1 = face_landmarks.landmark[i]\n x = int(pt1.x * self.img.shape[1])\n y = int(pt1.y * self.img.shape[0])\n mask_r_eyebrow.append((x, y))\n\n elif i in self.l_eyebrow:\n pt1 = face_landmarks.landmark[i]\n x = int(pt1.x * self.img.shape[1])\n y = int(pt1.y * self.img.shape[0])\n mask_l_eyebrow.append((x, y))\n\n else:\n pt1 = face_landmarks.landmark[i]\n x = int(pt1.x * self.img.shape[1])\n y = int(pt1.y * self.img.shape[0])\n mask_face.append((x, y))\n\n face_dict[\"mask_lip\"] = np.array(mask_lip)\n face_dict[\"mask_face\"] = np.array(mask_face)\n face_dict[\"mask_l_eyes\"] = np.array(mask_l_eyes)\n face_dict[\"mask_r_eyes\"] = np.array(mask_r_eyes)\n face_dict[\"mask_l_eyebrow\"] = np.array(mask_l_eyebrow)\n face_dict[\"mask_r_eyebrow\"] = np.array(mask_r_eyebrow)\n\n full_mask = mask.copy()\n\n for part, v in face_dict.items():\n base = mask.copy()\n convexhull = cv2.convexHull(v)\n if \"eyes\" in part:\n color = cfg[\"eyes\"][\"color\"]\n weight = cfg[\"eyes\"][\"weight\"]\n elif \"eyebrow\" in part:\n color = cfg[\"eyebrow\"][\"color\"]\n weight = cfg[\"eyebrow\"][\"weight\"]\n\n elif \"face\" in part:\n color = cfg[\"face\"][\"color\"]\n weight = cfg[\"face\"][\"weight\"]\n\n elif \"lip\" in part:\n color = cfg[\"lip\"][\"color\"]\n weight = cfg[\"lip\"][\"weight\"]\n else:\n color = (0, 0, 0)\n\n base = cv2.fillConvexPoly(base, convexhull, (color[2], color[1], color[0]))\n\n full_mask = cv2.addWeighted(full_mask, 1, base, weight, 1)\n full_mask = cv2.GaussianBlur(full_mask, (7, 7), 20)\n tmp = cv2.addWeighted(self.img, 1, full_mask, 1, 1)\n\n return tmp, full_mask\n logger.warning(\"Face not detected.\")\n return self.img, mask\n\nclass Camera():\n # 画像サイズを同調させるため、予め横軸を定義する。\n #def __init__(self, index, config, detector, width:int=1920, height:int=1080, fps:int=30) -> None:\n def __init__(self, index, config, detector, width:int=1280, height:int=720, fps:int=30) -> None:\n self.config = config\n self.detector = detector\n\n self.width = width\n self.height = height\n self.fps = fps\n\n self.start(index)\n\n def start(self, index) -> None:\n self.cap = cv2.VideoCapture(index)\n #self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)\n #self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)\n self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)\n self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)\n success, frame = self.cap.read()\n if not success:\n logger.error(f\"Camera not successful: video input: {index}\")\n sys.exit()\n self.mask = np.zeros((frame.shape[0], frame.shape[1], 3), dtype=np.uint8)\n\n # 仮想カメラを始動する。\n self.v_cam = VirtualCamera(width=self.width, height=self.height, fps=self.fps)\n\n def capture(self) -> None:\n logger.info(\"Catpuring images from video input... (press 'q' to exit.)\")\n while True: \n success, frame = self.cap.read()\n if not success:\n print(\"Ignoring empty camera frame.\")\n # If loading a video, use 'break' instead of 'continue'.\n continue\n\n frame.flags.writeable = False\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n self.detector(frame)\n\n frame.flags.writeable = True\n frame, mask = self.detector.post_processing(self.mask, self.config.yaml_cfg)\n mask = cv2.flip(mask, 1)\n\n # 結果を送信する。\n self.v_cam._send(frame)\n\n # マスクを表示する。\n mask = cv2.resize(mask, dsize=(400, 320))\n cv2.imshow(\"mask\", cv2.cvtColor(mask, cv2.COLOR_BGR2RGB))\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# ⑥引数管理(argparse)\ndef arg_parser() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"yaml_file\") #yamlファイルの指定\n parser.add_argument(\"--video\", required=True, type=int) #利用するカメル(ビデオ入力)の番号\n args = parser.parse_args()\n return args\n\n# ⑦メイン関数\ndef main(yaml_file, cam_idx):\n # Config処理\n config = Config(yaml_file)\n\n # 検知器定義\n detector = Detector()\n\n # カメル\n CAM = Camera(cam_idx, config, detector)\n\n # 画像を取得し、フィルターを適用する。\n CAM.capture()\n\n# ⑧実行\n# src/face_filter.py\nif __name__ == \"__main__\":\n args = arg_parser()\n main(args.yaml_file, args.video)\n\n" ]
[ [ "numpy.ravel", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
liuyu2022/game_robot
[ "d4261219ba81eee844fd19ab691f45522e0b127a" ]
[ "lib/pyscreeze/__init__.py" ]
[ "# PyScreeze\r\n\r\n\"\"\"\r\nNOTE:\r\nApparently Pillow support on Ubuntu 64-bit has several additional steps since it doesn't have JPEG/PNG support out of the box. Description here:\r\n\r\nhttps://stackoverflow.com/questions/7648200/pip-install-pil-e-tickets-1-no-jpeg-png-support\r\nhttp://ubuntuforums.org/showthread.php?t=1751455\r\n\"\"\"\r\n\r\n__version__ = '0.1.26'\r\n\r\nimport collections\r\nimport datetime\r\nimport functools\r\nimport os\r\nimport subprocess\r\nimport sys\r\nimport time\r\nimport errno\r\n\r\nfrom contextlib import contextmanager\r\n\r\ntry:\r\n from PIL import Image\r\n from PIL import ImageOps\r\n from PIL import ImageDraw\r\n if sys.platform == 'win32': # TODO - Pillow now supports ImageGrab on macOS.\r\n from PIL import ImageGrab\r\n _PILLOW_UNAVAILABLE = False\r\nexcept ImportError:\r\n # We ignore this because failures due to Pillow not being installed\r\n # should only happen when the functions that specifically depend on\r\n # Pillow are called. The main use case is when PyAutoGUI imports\r\n # PyScreeze, but Pillow isn't installed because the user is running\r\n # some platform/version of Python that Pillow doesn't support, then\r\n # importing PyAutoGUI should not automatically fail because it\r\n # imports PyScreeze.\r\n # So we have a `pass` statement here since a failure to import\r\n # Pillow shouldn't crash PyScreeze.\r\n _PILLOW_UNAVAILABLE = True\r\n\r\n\r\ntry:\r\n import cv2, numpy\r\n useOpenCV = True\r\n RUNNING_CV_2 = cv2.__version__[0] < '3'\r\nexcept ImportError:\r\n useOpenCV = False\r\n\r\nRUNNING_PYTHON_2 = sys.version_info[0] == 2\r\nif useOpenCV:\r\n if RUNNING_CV_2:\r\n LOAD_COLOR = cv2.CV_LOAD_IMAGE_COLOR\r\n LOAD_GRAYSCALE = cv2.CV_LOAD_IMAGE_GRAYSCALE\r\n else:\r\n LOAD_COLOR = cv2.IMREAD_COLOR\r\n LOAD_GRAYSCALE = cv2.IMREAD_GRAYSCALE\r\n\r\nif not RUNNING_PYTHON_2:\r\n unicode = str # On Python 3, all the isinstance(spam, (str, unicode)) calls will work the same as Python 2.\r\n\r\nif sys.platform == 'win32':\r\n # On Windows, the monitor scaling can be set to something besides normal 100%.\r\n # PyScreeze and Pillow needs to account for this to make accurate screenshots.\r\n # TODO - How does macOS and Linux handle monitor scaling?\r\n import ctypes\r\n try:\r\n ctypes.windll.user32.SetProcessDPIAware()\r\n except AttributeError:\r\n pass # Windows XP doesn't support monitor scaling, so just do nothing.\r\n\r\n\r\nGRAYSCALE_DEFAULT = False\r\n\r\n# For version 0.1.19 I changed it so that ImageNotFoundException was raised\r\n# instead of returning None. In hindsight, this change came too late, so I'm\r\n# changing it back to returning None. But I'm also including this option for\r\n# folks who would rather have it raise an exception.\r\nUSE_IMAGE_NOT_FOUND_EXCEPTION = False\r\n\r\nscrotExists = False\r\ntry:\r\n if sys.platform not in ('java', 'darwin', 'win32'):\r\n whichProc = subprocess.Popen(\r\n ['which', 'scrot'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n scrotExists = whichProc.wait() == 0\r\nexcept OSError as ex:\r\n if ex.errno == errno.ENOENT:\r\n # if there is no \"which\" program to find scrot, then assume there\r\n # is no scrot.\r\n pass\r\n else:\r\n raise\r\n\r\n\r\nif sys.platform == 'win32':\r\n from ctypes import windll\r\n\r\n # win32 DC(DeviceContext) Manager\r\n @contextmanager\r\n def __win32_openDC(hWnd):\r\n \"\"\"\r\n TODO\r\n \"\"\"\r\n hDC = windll.user32.GetDC(hWnd)\r\n if hDC == 0: #NULL\r\n raise WindowsError(\"windll.user32.GetDC failed : return NULL\")\r\n try:\r\n yield hDC\r\n finally:\r\n if windll.user32.ReleaseDC(hWnd, hDC) == 0:\r\n raise WindowsError(\"windll.user32.ReleaseDC failed : return 0\")\r\n\r\nBox = collections.namedtuple('Box', 'left top width height')\r\nPoint = collections.namedtuple('Point', 'x y')\r\nRGB = collections.namedtuple('RGB', 'red green blue')\r\n\r\nclass PyScreezeException(Exception):\r\n pass # This is a generic exception class raised when a PyScreeze-related error happens.\r\n\r\nclass ImageNotFoundException(PyScreezeException):\r\n pass # This is an exception class raised when the locate functions fail to locate an image.\r\n\r\n\r\ndef requiresPillow(wrappedFunction):\r\n \"\"\"\r\n A decorator that marks a function as requiring Pillow to be installed.\r\n This raises PyScreezeException if Pillow wasn't imported.\r\n \"\"\"\r\n @functools.wraps(wrappedFunction)\r\n def wrapper(*args, **kwargs):\r\n if _PILLOW_UNAVAILABLE:\r\n raise PyScreezeException('The Pillow package is required to use this function.')\r\n return wrappedFunction(*args, **kwargs)\r\n return wrapper\r\n\r\ndef load_cv2(img,grayscale=None):\r\n return _load_cv2(img,grayscale=None)\r\n\r\ndef _load_cv2(img, grayscale=None):\r\n \"\"\"\r\n TODO\r\n \"\"\"\r\n # load images if given filename, or convert as needed to opencv\r\n # Alpha layer just causes failures at this point, so flatten to RGB.\r\n # RGBA: load with -1 * cv2.CV_LOAD_IMAGE_COLOR to preserve alpha\r\n # to matchTemplate, need template and image to be the same wrt having alpha\r\n\r\n if grayscale is None:\r\n grayscale = GRAYSCALE_DEFAULT\r\n if isinstance(img, (str, unicode)):\r\n # The function imread loads an image from the specified file and\r\n # returns it. If the image cannot be read (because of missing\r\n # file, improper permissions, unsupported or invalid format),\r\n # the function returns an empty matrix\r\n # http://docs.opencv.org/3.0-beta/modules/imgcodecs/doc/reading_and_writing_images.html\r\n if grayscale:\r\n img_cv = cv2.imread(img, LOAD_GRAYSCALE)\r\n else:\r\n img_cv = cv2.imread(img, LOAD_COLOR)\r\n if img_cv is None:\r\n raise IOError(\"Failed to read %s because file is missing, \"\r\n \"has improper permissions, or is an \"\r\n \"unsupported or invalid format\" % img)\r\n elif isinstance(img, numpy.ndarray):\r\n # don't try to convert an already-gray image to gray\r\n if grayscale and len(img.shape) == 3: # and img.shape[2] == 3:\r\n img_cv = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n else:\r\n img_cv = img\r\n elif hasattr(img, 'convert'):\r\n # assume its a PIL.Image, convert to cv format\r\n img_array = numpy.array(img.convert('RGB'))\r\n img_cv = img_array[:, :, ::-1].copy() # -1 does RGB -> BGR\r\n if grayscale:\r\n img_cv = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)\r\n else:\r\n raise TypeError('expected an image filename, OpenCV numpy array, or PIL image')\r\n return img_cv\r\n\r\ndef locateAll_opencv(*args,**kwargs):\r\n return _locateAll_opencv(*args,**kwargs)\r\n\r\ndef _locateAll_opencv(needleImage, haystackImage, grayscale=None, limit=10000, region=None, step=1,\r\n confidence=0.999,threshold=False):\r\n \"\"\"\r\n TODO - rewrite this\r\n faster but more memory-intensive than pure python\r\n step 2 skips every other row and column = ~3x faster but prone to miss;\r\n to compensate, the algorithm automatically reduces the confidence\r\n threshold by 5% (which helps but will not avoid all misses).\r\n limitations:\r\n - OpenCV 3.x & python 3.x not tested\r\n - RGBA images are treated as RBG (ignores alpha channel)\r\n \"\"\"\r\n if grayscale is None:\r\n grayscale = GRAYSCALE_DEFAULT\r\n\r\n confidence = float(confidence)\r\n\r\n needleImage = _load_cv2(needleImage, grayscale)\r\n needleHeight, needleWidth = needleImage.shape[:2]\r\n haystackImage = _load_cv2(haystackImage, grayscale)\r\n if region:\r\n haystackImage = haystackImage[region[1]:region[1]+region[3],\r\n region[0]:region[0]+region[2]]\r\n else:\r\n region = (0, 0) # full image; these values used in the yield statement\r\n \r\n # if confidence == 0.5:\r\n # cv2.imwrite(\"im_save.png\", haystackImage)\r\n \r\n if threshold:\r\n #二值化\r\n pass\r\n # ret, needleImage = cv2.threshold(needleImage, 96, 255, cv2.THRESH_BINARY_INV)\r\n # ret, haystackImage = cv2.threshold(haystackImage, 96, 255, cv2.THRESH_BINARY_INV)\r\n\r\n if (haystackImage.shape[0] < needleImage.shape[0] or\r\n haystackImage.shape[1] < needleImage.shape[1]):\r\n # avoid semi-cryptic OpenCV error below if bad size\r\n raise ValueError('needle dimension(s) exceed the haystack image or region dimensions')\r\n if step == 2:\r\n confidence *= 0.95\r\n needleImage = needleImage[::step, ::step]\r\n haystackImage = haystackImage[::step, ::step]\r\n else:\r\n step = 1\r\n\r\n # get all matches at once, credit: https://stackoverflow.com/questions/7670112/finding-a-subimage-inside-a-numpy-image/9253805#9253805\r\n result = cv2.matchTemplate(haystackImage, needleImage, cv2.TM_CCOEFF_NORMED)\r\n match_indices = numpy.arange(result.size)[(result > confidence).flatten()]\r\n matches = numpy.unravel_index(match_indices[:limit], result.shape)\r\n\r\n if len(matches[0]) == 0:\r\n if USE_IMAGE_NOT_FOUND_EXCEPTION:\r\n raise ImageNotFoundException('Could not locate the image (highest confidence = %.3f)' % result.max())\r\n else:\r\n return\r\n\r\n # use a generator for API consistency:\r\n matchx = matches[1] * step + region[0] # vectorized\r\n matchy = matches[0] * step + region[1]\r\n for x, y in zip(matchx, matchy):\r\n yield Box(x, y, needleWidth, needleHeight)\r\n\r\n\r\n# TODO - We should consider renaming _locateAll_python to _locateAll_pillow, since Pillow is the real dependency.\r\n@requiresPillow\r\ndef _locateAll_python(needleImage, haystackImage, grayscale=None, limit=None, region=None, step=1):\r\n \"\"\"\r\n TODO\r\n \"\"\"\r\n # setup all the arguments\r\n if grayscale is None:\r\n grayscale = GRAYSCALE_DEFAULT\r\n\r\n needleFileObj = None\r\n if isinstance(needleImage, (str, unicode)):\r\n # 'image' is a filename, load the Image object\r\n needleFileObj = open(needleImage, 'rb')\r\n needleImage = Image.open(needleFileObj)\r\n\r\n haystackFileObj = None\r\n if isinstance(haystackImage, (str, unicode)):\r\n # 'image' is a filename, load the Image object\r\n haystackFileObj = open(haystackImage, 'rb')\r\n haystackImage = Image.open(haystackFileObj)\r\n\r\n if region is not None:\r\n haystackImage = haystackImage.crop((region[0], region[1], region[0] + region[2], region[1] + region[3]))\r\n else:\r\n region = (0, 0) # set to 0 because the code always accounts for a region\r\n\r\n if grayscale: # if grayscale mode is on, convert the needle and haystack images to grayscale\r\n needleImage = ImageOps.grayscale(needleImage)\r\n haystackImage = ImageOps.grayscale(haystackImage)\r\n else:\r\n # if not using grayscale, make sure we are comparing RGB images, not RGBA images.\r\n if needleImage.mode == 'RGBA':\r\n needleImage = needleImage.convert('RGB')\r\n if haystackImage.mode == 'RGBA':\r\n haystackImage = haystackImage.convert('RGB')\r\n\r\n # setup some constants we'll be using in this function\r\n needleWidth, needleHeight = needleImage.size\r\n haystackWidth, haystackHeight = haystackImage.size\r\n\r\n needleImageData = tuple(needleImage.getdata())\r\n haystackImageData = tuple(haystackImage.getdata())\r\n\r\n needleImageRows = [needleImageData[y * needleWidth:(y+1) * needleWidth] for y in range(needleHeight)] # LEFT OFF - check this\r\n needleImageFirstRow = needleImageRows[0]\r\n\r\n assert len(needleImageFirstRow) == needleWidth, 'For some reason, the calculated width of first row of the needle image is not the same as the width of the image.'\r\n assert [len(row) for row in needleImageRows] == [needleWidth] * needleHeight, 'For some reason, the needleImageRows aren\\'t the same size as the original image.'\r\n\r\n numMatchesFound = 0\r\n\r\n # NOTE: After running tests/benchmarks.py on the following code, it seem that having a step\r\n # value greater than 1 does not give *any* significant performance improvements.\r\n # Since using a step higher than 1 makes for less accurate matches, it will be\r\n # set to 1.\r\n step = 1 # hard-code step as 1 until a way to improve it can be figured out.\r\n\r\n if step == 1:\r\n firstFindFunc = _kmp\r\n else:\r\n firstFindFunc = _steppingFind\r\n\r\n\r\n for y in range(haystackHeight): # start at the leftmost column\r\n for matchx in firstFindFunc(needleImageFirstRow, haystackImageData[y * haystackWidth:(y+1) * haystackWidth], step):\r\n foundMatch = True\r\n for searchy in range(1, needleHeight, step):\r\n haystackStart = (searchy + y) * haystackWidth + matchx\r\n if needleImageData[searchy * needleWidth:(searchy+1) * needleWidth] != haystackImageData[haystackStart:haystackStart + needleWidth]:\r\n foundMatch = False\r\n break\r\n if foundMatch:\r\n # Match found, report the x, y, width, height of where the matching region is in haystack.\r\n numMatchesFound += 1\r\n yield Box(matchx + region[0], y + region[1], needleWidth, needleHeight)\r\n if limit is not None and numMatchesFound >= limit:\r\n # Limit has been reached. Close file handles.\r\n if needleFileObj is not None:\r\n needleFileObj.close()\r\n if haystackFileObj is not None:\r\n haystackFileObj.close()\r\n return\r\n\r\n\r\n # There was no limit or the limit wasn't reached, but close the file handles anyway.\r\n if needleFileObj is not None:\r\n needleFileObj.close()\r\n if haystackFileObj is not None:\r\n haystackFileObj.close()\r\n\r\n if numMatchesFound == 0:\r\n if USE_IMAGE_NOT_FOUND_EXCEPTION:\r\n raise ImageNotFoundException('Could not locate the image.')\r\n else:\r\n return\r\n\r\n\r\ndef locate(needleImage, haystackImage, **kwargs):\r\n \"\"\"\r\n TODO\r\n \"\"\"\r\n # Note: The gymnastics in this function is because we want to make sure to exhaust the iterator so that the needle and haystack files are closed in locateAll.\r\n kwargs['limit'] = 1\r\n points = tuple(locateAll(needleImage, haystackImage, **kwargs))\r\n if len(points) > 0:\r\n return points[0]\r\n else:\r\n if USE_IMAGE_NOT_FOUND_EXCEPTION:\r\n raise ImageNotFoundException('Could not locate the image.')\r\n else:\r\n return None\r\n\r\n\r\ndef locateAllOnScreen(image, **kwargs):\r\n \"\"\"\r\n TODO\r\n \"\"\"\r\n\r\n # TODO - Should this raise an exception if zero instances of the image can be found on the screen, instead of always returning a generator?\r\n screenshotIm = screenshot(region=None) # the locateAll() function must handle cropping to return accurate coordinates, so don't pass a region here.\r\n retVal = locateAll(image, screenshotIm, **kwargs)\r\n try:\r\n screenshotIm.fp.close()\r\n except AttributeError:\r\n # Screenshots on Windows won't have an fp since they came from\r\n # ImageGrab, not a file. Screenshots on Linux will have fp set\r\n # to None since the file has been unlinked\r\n pass\r\n return retVal\r\n\r\n@requiresPillow\r\ndef showRegionOnScreen(region, outlineColor='red', filename='_showRegionOnScreen.png'):\r\n \"\"\"\r\n TODO\r\n \"\"\"\r\n # TODO - This function is useful! Document it!\r\n screenshotIm = screenshot()\r\n draw = ImageDraw.Draw(screenshotIm)\r\n region = (region[0], region[1], region[2] + region[0], region[3] + region[1]) # convert from (left, top, right, bottom) to (left, top, width, height)\r\n draw.rectangle(region, outline=outlineColor)\r\n screenshotIm.save(filename)\r\n\r\n\r\n@requiresPillow\r\ndef _screenshot_win32(imageFilename=None, region=None):\r\n \"\"\"\r\n TODO\r\n \"\"\"\r\n # TODO - Use the winapi to get a screenshot, and compare performance with ImageGrab.grab()\r\n # https://stackoverflow.com/a/3586280/1893164\r\n im = ImageGrab.grab()\r\n if region is not None:\r\n assert len(region) == 4, 'region argument must be a tuple of four ints'\r\n region = [int(x) for x in region]\r\n im = im.crop((region[0], region[1], region[2] + region[0], region[3] + region[1]))\r\n if imageFilename is not None:\r\n im.save(imageFilename)\r\n return im\r\n\r\n\r\ndef _screenshot_osx(imageFilename=None, region=None):\r\n \"\"\"\r\n TODO\r\n \"\"\"\r\n # TODO - use tmp name for this file.\r\n if imageFilename is None:\r\n tmpFilename = 'screenshot%s.png' % (datetime.datetime.now().strftime('%Y-%m%d_%H-%M-%S-%f'))\r\n else:\r\n tmpFilename = imageFilename\r\n subprocess.call(['screencapture', '-x', tmpFilename])\r\n im = Image.open(tmpFilename)\r\n\r\n if region is not None:\r\n assert len(region) == 4, 'region argument must be a tuple of four ints'\r\n region = [int(x) for x in region]\r\n im = im.crop((region[0], region[1], region[2] + region[0], region[3] + region[1]))\r\n os.unlink(tmpFilename) # delete image of entire screen to save cropped version\r\n im.save(tmpFilename)\r\n else:\r\n # force loading before unlinking, Image.open() is lazy\r\n im.load()\r\n\r\n if imageFilename is None:\r\n os.unlink(tmpFilename)\r\n return im\r\n\r\n\r\ndef _screenshot_linux(imageFilename=None, region=None):\r\n \"\"\"\r\n TODO\r\n \"\"\"\r\n if not scrotExists:\r\n raise NotImplementedError('\"scrot\" must be installed to use screenshot functions in Linux. Run: sudo apt-get install scrot')\r\n if imageFilename is None:\r\n tmpFilename = '.screenshot%s.png' % (datetime.datetime.now().strftime('%Y-%m%d_%H-%M-%S-%f'))\r\n else:\r\n tmpFilename = imageFilename\r\n if scrotExists:\r\n subprocess.call(['scrot', '-z', tmpFilename])\r\n im = Image.open(tmpFilename)\r\n\r\n if region is not None:\r\n assert len(region) == 4, 'region argument must be a tuple of four ints'\r\n region = [int(x) for x in region]\r\n im = im.crop((region[0], region[1], region[2] + region[0], region[3] + region[1]))\r\n os.unlink(tmpFilename) # delete image of entire screen to save cropped version\r\n im.save(tmpFilename)\r\n else:\r\n # force loading before unlinking, Image.open() is lazy\r\n im.load()\r\n\r\n if imageFilename is None:\r\n os.unlink(tmpFilename)\r\n return im\r\n else:\r\n raise Exception('The scrot program must be installed to take a screenshot with PyScreeze on Linux. Run: sudo apt-get install scrot')\r\n\r\n\r\n\r\ndef _kmp(needle, haystack, _dummy): # Knuth-Morris-Pratt search algorithm implementation (to be used by screen capture)\r\n \"\"\"\r\n TODO\r\n \"\"\"\r\n # build table of shift amounts\r\n shifts = [1] * (len(needle) + 1)\r\n shift = 1\r\n for pos in range(len(needle)):\r\n while shift <= pos and needle[pos] != needle[pos-shift]:\r\n shift += shifts[pos-shift]\r\n shifts[pos+1] = shift\r\n\r\n # do the actual search\r\n startPos = 0\r\n matchLen = 0\r\n for c in haystack:\r\n while matchLen == len(needle) or \\\r\n matchLen >= 0 and needle[matchLen] != c:\r\n startPos += shifts[matchLen]\r\n matchLen -= shifts[matchLen]\r\n matchLen += 1\r\n if matchLen == len(needle):\r\n yield startPos\r\n\r\n\r\ndef _steppingFind(needle, haystack, step):\r\n \"\"\"\r\n TODO\r\n \"\"\"\r\n for startPos in range(0, len(haystack) - len(needle) + 1):\r\n foundMatch = True\r\n for pos in range(0, len(needle), step):\r\n if haystack[startPos + pos] != needle[pos]:\r\n foundMatch = False\r\n break\r\n if foundMatch:\r\n yield startPos\r\n\r\ndef pixelMatchesColor(x, y, expectedRGBColor, tolerance=0):\r\n \"\"\"\r\n TODO\r\n \"\"\"\r\n pix = pixel(x, y)\r\n if len(pix) == 3 or len(expectedRGBColor) == 3: #RGB mode\r\n r, g, b = pix[:3]\r\n exR, exG, exB = expectedRGBColor[:3]\r\n return (abs(r - exR) <= tolerance) and (abs(g - exG) <= tolerance) and (abs(b - exB) <= tolerance)\r\n elif len(pix) == 4 and len(expectedRGBColor) == 4: #RGBA mode\r\n r, g, b, a = pix\r\n exR, exG, exB, exA = expectedRGBColor\r\n return (abs(r - exR) <= tolerance) and (abs(g - exG) <= tolerance) and (abs(b - exB) <= tolerance) and (abs(a - exA) <= tolerance)\r\n else:\r\n assert False, 'Color mode was expected to be length 3 (RGB) or 4 (RGBA), but pixel is length %s and expectedRGBColor is length %s' % (len(pix), len(expectedRGBColor))\r\n\r\ndef pixel(x, y):\r\n \"\"\"\r\n TODO\r\n \"\"\"\r\n if sys.platform == 'win32':\r\n # On Windows, calling GetDC() and GetPixel() is twice as fast as using our screenshot() function.\r\n with __win32_openDC(0) as hdc: # handle will be released automatically\r\n color = windll.gdi32.GetPixel(hdc, x, y)\r\n if color < 0:\r\n raise WindowsError(\"windll.gdi32.GetPixel failed : return {}\".format(color))\r\n # color is in the format 0xbbggrr https://msdn.microsoft.com/en-us/library/windows/desktop/dd183449(v=vs.85).aspx\r\n bbggrr = \"{:0>6x}\".format(color) # bbggrr => 'bbggrr' (hex)\r\n b, g, r = (int(bbggrr[i:i+2], 16) for i in range(0, 6, 2))\r\n return (r, g, b)\r\n else:\r\n # Need to select only the first three values of the color in\r\n # case the returned pixel has an alpha channel\r\n return RGB(*(screenshot().getpixel((x, y))[:3]))\r\n\r\n\r\n# set the screenshot() function based on the platform running this module\r\nif sys.platform.startswith('java'):\r\n raise NotImplementedError('Jython is not yet supported by PyScreeze.')\r\nelif sys.platform == 'darwin':\r\n screenshot = _screenshot_osx\r\nelif sys.platform == 'win32':\r\n screenshot = _screenshot_win32\r\nelse: # TODO - Make this more specific. \"Anything else\" does not necessarily mean \"Linux\".\r\n screenshot = _screenshot_linux\r\n\r\ngrab = screenshot # for compatibility with Pillow/PIL's ImageGrab module.\r\n\r\n# set the locateAll function to use opencv if possible; python 3 needs opencv 3.0+\r\n# TODO - Should this raise an exception if zero instances of the image can be found on the screen, instead of always returning a generator?\r\nif useOpenCV:\r\n locateAll = _locateAll_opencv\r\n if not RUNNING_PYTHON_2 and cv2.__version__ < '3':\r\n locateAll = _locateAll_python\r\nelse:\r\n locateAll = _locateAll_python\r\n" ]
[ [ "numpy.arange", "numpy.unravel_index" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
avivajpeyi/gw_pe_judge
[ "151d597fdd6128a278e1d4cff65d3e6776e1fa83", "151d597fdd6128a278e1d4cff65d3e6776e1fa83" ]
[ "deep_gw_pe_followup/plotting/ci.py", "tests/test_prob_calculators.py" ]
[ "import numpy as np\nimport pandas as pd\n\n\ndef plot_ci(x, y, ax, label='', alpha=0.7, zorder=-10):\n cols = ['#EE7550', '#F19463', '#F6B176']\n ci = bin_by(x, y)\n # plot the 3rd stdv\n ax.fill_between(ci.x, ci['5th'], ci['95th'], alpha=alpha, color=cols[2], zorder=zorder - 3)\n ax.fill_between(ci.x, ci['10th'], ci['90th'], alpha=alpha, color=cols[1], zorder=zorder - 2)\n ax.fill_between(ci.x, ci['25th'], ci['75th'], alpha=alpha, color=cols[0], zorder=zorder - 1)\n # plt the line\n ax.plot(ci.x, ci['median'], color=cols[0], label=label, zorder=zorder)\n\n\ndef bin_by(x, y, nbins=30, bins=None):\n \"\"\"\n Divide the x axis into sections and return groups of y based on its x value\n \"\"\"\n if bins is None:\n bins = np.linspace(x.min(), x.max(), nbins)\n\n bin_space = (bins[-1] - bins[0]) / (len(bins) - 1) / 2\n\n indicies = np.digitize(x, bins + bin_space)\n\n output = []\n for i in range(0, len(bins)):\n output.append(y[indicies == i])\n #\n # prepare a dataframe with cols: median; mean; 1up, 1dn, 2up, 2dn, 3up, 3dn\n df_names = ['mean', 'median', '5th', '95th', '10th', '90th', '25th', '75th']\n df = pd.DataFrame(columns=df_names)\n to_delete = []\n # for each bin, determine the std ranges\n for y_set in output:\n if y_set.size > 0:\n av = y_set.mean()\n intervals = np.percentile(y_set, q=[50, 5, 95, 10, 90, 25, 75])\n res = [av] + list(intervals)\n df = df.append(pd.DataFrame([res], columns=df_names))\n else:\n # just in case there are no elements in the bin\n to_delete.append(len(df) + 1 + len(to_delete))\n\n # add x values\n bins = np.delete(bins, to_delete)\n df['x'] = bins\n\n return df\n", "import multiprocessing\nimport os\nimport unittest\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom bilby.core.prior import DeltaFunction, PriorDict, Uniform\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm\n\nfrom deep_gw_pe_followup.sample_cacher.cacher import (load_probabilities,\n store_probabilities)\nfrom deep_gw_pe_followup.restricted_prior.conversions import calc_xeff\nfrom deep_gw_pe_followup.plotting import plot_ci, plot_probs\nfrom deep_gw_pe_followup.restricted_prior.prior import RestrictedPrior\nfrom deep_gw_pe_followup.restricted_prior.prob_calculators import (\n get_p_cos1_given_xeff_q_a1,\n get_p_cos2_given_xeff_q_a1_cos1)\n\nnum_cores = multiprocessing.cpu_count()\n\nplt.style.use(\n 'https://gist.githubusercontent.com/avivajpeyi/4d9839b1ceb7d3651cbb469bc6b0d69b/raw/55fab35b1c811dc3c0fdc6d169d67607e2c3edfc/publication.mplstyle')\n\nCLEAN_AFTER = False\n\n\nclass TestProbCalculators(unittest.TestCase):\n\n def setUp(self):\n self.outdir = \"./out_restricted_prior\"\n os.makedirs(self.outdir, exist_ok=True)\n\n def tearDown(self):\n import shutil\n if os.path.exists(self.outdir) and CLEAN_AFTER:\n shutil.rmtree(self.outdir)\n\n def test_pa1(self):\n self.p_a1_computer(xeff=0.3, q=0.9, fname=f'{self.outdir}/pa1_test1.png')\n self.p_a1_computer(xeff=-0.4, q=0.8, fname=f'{self.outdir}/pa1_test2.png')\n\n def test_pcos1_given_a1_xeff_q(self):\n self.p_cos1_a1_computer(xeff=0.3, q=0.9, fname=f'{self.outdir}/pcos1a1_test1.png')\n\n def test_pcos2_given_cos1_a1_xeff_q(self):\n self.p_cos2_computer(xeff=0.3, q=0.9, cos1=0.4, a1=0.2, fname=f'{self.outdir}/pcos2_test1.png')\n\n def p_cos2_computer(self, xeff, q, cos1, a1, fname='pcos2_test.png'):\n dc2 = 0.01\n cos2s = np.arange(-1, 1, dc2)\n p_c2 = np.array(\n [get_p_cos2_given_xeff_q_a1_cos1(xeff=xeff, q=q, a1=a1, cos1=cos1, cos2=cos2) for cos2 in cos2s])\n p_c2 = p_c2 / np.sum(p_c2) / dc2\n plt.plot(cos2s, p_c2)\n plt.xlabel('cos2')\n plt.ylabel('p(cos2)')\n plt.title(f\"xeff={xeff},q={q}, a1={a1},cos1={cos1}\")\n plt.savefig(fname)\n\n def p_cos1_a1_computer(self, xeff=0.3, q=0.9, fname='pa1_test.png'):\n mc_integral_n = int(5e4)\n dcos1, da1 = 0.01, 0.005\n a1s = np.arange(0, 1, da1)\n cos1s = np.arange(-1, 1, dcos1)\n\n samples_fname = f\"{self.outdir}/cached_pcos1a1.h5\"\n if os.path.isfile(samples_fname):\n data = load_probabilities(samples_fname)\n else:\n data = dict(a1=np.array([]), cos1=np.array([]), p=np.array([]))\n for a1 in tqdm(a1s):\n p_cos1_for_a1 = Parallel(n_jobs=num_cores)(\n delayed(get_p_cos1_given_xeff_q_a1)(cos1, a1, xeff, q, mc_integral_n) for cos1 in cos1s)\n data['a1'] = np.append(data['a1'], np.array([a1 for _ in cos1s]))\n data['cos1'] = np.append(data['cos1'], cos1s)\n data['p'] = np.append(data['p'], p_cos1_for_a1)\n data = pd.DataFrame(data)\n store_probabilities(data, f\"{self.outdir}/cached_pcos1a1.h5\")\n\n s = self.sample_uniform_dist(int(1e6), q, xeff)\n fig, axes = plot_probs(data.a1, data.cos1, data.p, plabel='p', xlabel='a1', ylabel='cos1', fname=fname)\n for ax in axes:\n ax.plot(s.a1, s.cos1, ',w')\n ax.set_xlim(0, 1)\n ax.set_ylim(-1, 1)\n fig.tight_layout()\n fig.savefig(fname)\n fig, axes = plot_probs(data.a1, data.cos1, np.log(data.p), plabel='lnp', xlabel='a1', ylabel='cos1',\n fname=fname.replace(\".png\", \"_lnp.png\"))\n for ax in axes:\n ax.plot(s.a1, s.cos1, ',w')\n ax.set_xlim(0, 1)\n ax.set_ylim(-1, 1)\n fig.tight_layout()\n fig.savefig(fname.replace(\".png\", \"_lnp.png\"))\n\n def p_a1_computer(self, xeff=0.3, q=0.9, fname='pa1_test.png', n=int(1e5)):\n\n mc_integral_n = int(5e4)\n dcos1, da1 = 0.01, 0.005\n a1s = np.arange(0, 1, da1)\n cos1s = np.arange(-1, 1, dcos1)\n\n p = RestrictedPrior(dictionary=dict(q=q, xeff=xeff), build_cache=False).get_a1_prior()\n p_a1 = p.prob(a1s)\n\n # rejection-sampled distribution\n data = dict(x=np.array([]), y=np.array([]))\n for i in range(10):\n tolerance = 0.01\n s = self.sample_uniform_dist(n, q)\n s = s[np.abs(s['xeff'] - xeff) <= tolerance]\n p_a1_rej, bins = np.histogram(s.a1, bins=a1s, density=True)\n p_a1_rej = p_a1_rej / np.sum(p_a1_rej) / da1\n a1_cents = 0.5 * (bins[1:] + bins[:-1])\n data['x'] = np.append(data['x'], a1_cents)\n data['y'] = np.append(data['y'], p_a1_rej)\n\n fig, ax = plt.subplots(1, 1)\n\n plot_ci(data['x'], data['y'], ax, label=\"Rejection Sampling\", alpha=0.3)\n ax.plot(a1s, p_a1, label=\"Analytical\", color='tab:blue')\n ax.set_xlabel('$a_1$')\n ax.set_ylabel(r'$p(a_1|\\chi_{\\rm eff},q)$')\n ax.set_title(r\"$\\chi_{\\rm eff} = \" + str(xeff) + \", q=\" + str(q) + \"$\")\n ax.set_xlim(0, 1)\n ax.legend()\n plt.tight_layout()\n fig.savefig(fname)\n\n def sample_uniform_dist(self, n, q, xeff, xeff_tol=0.01):\n s = pd.DataFrame(PriorDict(dict(\n a1=Uniform(0, 1),\n a2=Uniform(0, 1),\n cos1=Uniform(-1, 1),\n cos2=Uniform(-1, 1),\n q=DeltaFunction(q),\n )).sample(n))\n s['xeff'] = calc_xeff(**s.to_dict('list'))\n s = s[np.abs(s['xeff'] - xeff) <= xeff_tol]\n return s\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.digitize", "numpy.percentile", "numpy.delete", "pandas.DataFrame" ], [ "numpy.log", "matplotlib.pyplot.tight_layout", "numpy.sum", "numpy.abs", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "pandas.DataFrame", "matplotlib.pyplot.plot", "numpy.append", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.histogram", "matplotlib.pyplot.style.use", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
seva100/grid-artefacts-mwe
[ "c6914370ff6603dcf3fe0a9cb72221aa9dd9ebc1" ]
[ "train.py" ]
[ "import torch\nimport torch.nn as nn\nfrom backbone.model_resnet import ResNet_50\n\n# from tensorboardX import SummaryWriter\n# import wandb\n# from tqdm import tqdm\nimport time\nimport os\nfrom glob import glob\n\nif __name__ == '__main__':\n # os.makedirs(os.path.join(MODEL_ROOT, EXP_NAME), exist_ok=True)\n # os.makedirs(LOG_ROOT, exist_ok=True)\n\n MODEL_ROOT = '.'\n EXP_NAME = '123'\n DEVICE = 'cuda:0'\n GPU_ID = [0]\n\n os.makedirs(os.path.join(MODEL_ROOT, EXP_NAME), exist_ok=True)\n\n # wandb.init(project=PROJECT_NAME, config=cfg)\n\n #======= model & loss & optimizer =======#\n BACKBONE = ResNet_50([112, 112])\n\n BACKBONE = nn.DataParallel(BACKBONE, device_ids = GPU_ID)\n BACKBONE = BACKBONE.to(DEVICE)\n \n for i in range(1000):\n torch.save(BACKBONE.module.state_dict(), os.path.join(MODEL_ROOT, \"Backbone_checkpoint.pth\"))\n time.sleep(100)\n" ]
[ [ "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jsk389/lightkurve
[ "2fe031708f4c241b61796ccdacf658717b2ffa44" ]
[ "lightkurve/tests/test_targetpixelfile.py" ]
[ "from __future__ import division, print_function\n\nimport os\nfrom astropy.utils.data import get_pkg_data_filename\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nimport pytest\nimport tempfile\n\nfrom ..targetpixelfile import KeplerTargetPixelFile, KeplerTargetPixelFileFactory\nfrom ..targetpixelfile import TessTargetPixelFile\n\n\nfilename_tpf_all_zeros = get_pkg_data_filename(\"data/test-tpf-all-zeros.fits\")\nfilename_tpf_one_center = get_pkg_data_filename(\"data/test-tpf-non-zero-center.fits\")\nTABBY_Q8 = (\"https://archive.stsci.edu/missions/kepler/lightcurves\"\n \"/0084/008462852/kplr008462852-2011073133259_llc.fits\")\nTABBY_TPF = (\"https://archive.stsci.edu/missions/kepler/target_pixel_files\"\n \"/0084/008462852/kplr008462852-2011073133259_lpd-targ.fits.gz\")\nTESS_SIM = (\"https://archive.stsci.edu/missions/tess/ete-6/tid/00/000\"\n \"/004/176/tess2019128220341-0000000417699452-0016-s_tp.fits\")\n\[email protected]_data\ndef test_load_bad_file():\n '''Test if a light curve can be opened without exception.'''\n with pytest.raises(ValueError) as exc:\n tpf = KeplerTargetPixelFile(TABBY_Q8)\n assert('is this a target pixel file?' in exc.value.args[0])\n with pytest.raises(ValueError) as exc:\n tpf = TessTargetPixelFile(TABBY_Q8)\n assert('is this a target pixel file?' in exc.value.args[0])\n\n\ndef test_tpf_shapes():\n \"\"\"Are the data array shapes of the TargetPixelFile object consistent?\"\"\"\n for tpf in [KeplerTargetPixelFile(filename_tpf_all_zeros),\n TessTargetPixelFile(filename_tpf_all_zeros)]:\n assert tpf.quality_mask.shape == tpf.hdu[1].data['TIME'].shape\n assert tpf.flux.shape == tpf.flux_err.shape\n\n\ndef test_tpf_plot():\n \"\"\"Sanity check to verify that tpf plotting works\"\"\"\n for tpf in [KeplerTargetPixelFile(filename_tpf_one_center),\n TessTargetPixelFile(filename_tpf_one_center)]:\n ax = tpf.plot()\n tpf.plot(aperture_mask=tpf.pipeline_mask)\n tpf.plot(aperture_mask='all')\n tpf.plot(frame=5)\n with pytest.raises(ValueError):\n tpf.plot(frame=999999)\n tpf.plot(cadenceno=125250)\n with pytest.raises(ValueError):\n tpf.plot(cadenceno=999)\n tpf.plot(bkg=True)\n tpf.plot(scale=\"sqrt\")\n tpf.plot(scale=\"log\")\n with pytest.raises(ValueError):\n tpf.plot(scale=\"blabla\")\n plt.close('all')\n\n\ndef test_tpf_zeros():\n \"\"\"Does the LightCurve of a zero-flux TPF make sense?\"\"\"\n tpf = KeplerTargetPixelFile(filename_tpf_all_zeros, quality_bitmask=None)\n lc = tpf.to_lightcurve()\n # If you don't mask out bad data, time contains NaNs\n assert np.any(lc.time != tpf.time) # Using the property that NaN does not equal NaN\n # When you do mask out bad data everything should work.\n assert (tpf.astropy_time.jd == 0).any()\n tpf = KeplerTargetPixelFile(filename_tpf_all_zeros, quality_bitmask='hard')\n lc = tpf.to_lightcurve()\n assert len(lc.time) == len(lc.flux)\n assert np.all(lc.time == tpf.time)\n assert np.all(lc.flux == 0)\n # The default QUALITY bitmask should have removed all NaNs in the TIME\n assert ~np.any(np.isnan(tpf.time))\n\n\ndef test_tpf_ones():\n \"\"\"Does the LightCurve of a one-flux TPF make sense?\"\"\"\n for tpf in [KeplerTargetPixelFile(filename_tpf_one_center),\n TessTargetPixelFile(filename_tpf_one_center)]:\n lc = tpf.to_lightcurve(aperture_mask='all')\n assert np.all(lc.flux == 1)\n assert np.all((lc.centroid_col < tpf.column+tpf.shape[1]).all()\n * (lc.centroid_col > tpf.column).all())\n assert np.all((lc.centroid_row < tpf.row+tpf.shape[2]).all()\n * (lc.centroid_row > tpf.row).all())\n\n\[email protected](\"quality_bitmask,answer\", [(None, 1290), ('none', 1290),\n ('default', 1233),\n ('hard', 1101), ('hardest', 1101),\n (1, 1290), (100, 1278), (2096639, 1101)])\ndef test_bitmasking(quality_bitmask, answer):\n '''Test whether the bitmasking behaves like it should'''\n tpf = KeplerTargetPixelFile(filename_tpf_one_center, quality_bitmask=quality_bitmask)\n lc = tpf.to_lightcurve()\n flux = lc.flux\n assert len(flux) == answer\n\n\ndef test_wcs():\n \"\"\"Test the wcs property.\"\"\"\n for tpf in [KeplerTargetPixelFile(filename_tpf_one_center),\n TessTargetPixelFile(filename_tpf_one_center)]:\n w = tpf.wcs\n ra, dec = tpf.get_coordinates()\n assert ra.shape == tpf.shape\n assert dec.shape == tpf.shape\n assert type(w).__name__ == 'WCS'\n\n\ndef test_wcs_tabby():\n '''Test the centroids from Tabby's star against simbad values'''\n tpf = KeplerTargetPixelFile(TABBY_TPF)\n w = tpf.wcs\n ra, dec = tpf.get_coordinates(0)\n col, row = tpf.centroids()\n col -= tpf.column\n row -= tpf.row\n y, x = int(np.round(col[0])), int(np.round(row[1]))\n # Compare with RA and Dec from Simbad\n assert np.isclose(ra[x, y], 301.5643971, 1e-4)\n assert np.isclose(dec[x, y], 44.4568869, 1e-4)\n\n\ndef test_astropy_time():\n '''Test the lc.date() function'''\n for tpf in [KeplerTargetPixelFile(filename_tpf_all_zeros),\n TessTargetPixelFile(filename_tpf_all_zeros)]:\n astropy_time = tpf.astropy_time\n assert astropy_time.scale == 'tdb'\n assert len(astropy_time.iso) == len(tpf.time)\n\n\ndef test_properties():\n \"\"\"Test the short-hand properties.\"\"\"\n tpf = KeplerTargetPixelFile(filename_tpf_all_zeros)\n assert(tpf.channel == tpf.hdu[0].header['CHANNEL'])\n assert(tpf.module == tpf.hdu[0].header['MODULE'])\n assert(tpf.output == tpf.hdu[0].header['OUTPUT'])\n assert(tpf.ra == tpf.hdu[0].header['RA_OBJ'])\n assert(tpf.dec == tpf.hdu[0].header['DEC_OBJ'])\n assert_array_equal(tpf.flux, tpf.hdu[1].data['FLUX'][tpf.quality_mask])\n assert_array_equal(tpf.flux_err, tpf.hdu[1].data['FLUX_ERR'][tpf.quality_mask])\n assert_array_equal(tpf.flux_bkg, tpf.hdu[1].data['FLUX_BKG'][tpf.quality_mask])\n assert_array_equal(tpf.flux_bkg_err, tpf.hdu[1].data['FLUX_BKG_ERR'][tpf.quality_mask])\n assert_array_equal(tpf.quality, tpf.hdu[1].data['QUALITY'][tpf.quality_mask])\n assert(tpf.campaign == tpf.hdu[0].header['CAMPAIGN'])\n assert(tpf.quarter is None)\n\n\ndef test_repr():\n \"\"\"Do __str__ and __repr__ work?\"\"\"\n for tpf in [KeplerTargetPixelFile(filename_tpf_all_zeros),\n TessTargetPixelFile(filename_tpf_all_zeros)]:\n str(tpf)\n repr(tpf)\n\n\ndef test_to_lightcurve():\n for tpf in [KeplerTargetPixelFile(filename_tpf_all_zeros),\n TessTargetPixelFile(filename_tpf_all_zeros)]:\n tpf.to_lightcurve()\n tpf.to_lightcurve(aperture_mask=None)\n tpf.to_lightcurve(aperture_mask='all')\n lc = tpf.to_lightcurve(aperture_mask='pipeline')\n assert lc.astropy_time.scale == 'tdb'\n\n\ndef test_bkg_lightcurve():\n for tpf in [KeplerTargetPixelFile(filename_tpf_all_zeros),\n TessTargetPixelFile(filename_tpf_all_zeros)]:\n lc = tpf.get_bkg_lightcurve()\n lc = tpf.get_bkg_lightcurve(aperture_mask=None)\n lc = tpf.get_bkg_lightcurve(aperture_mask='all')\n assert lc.astropy_time.scale == 'tdb'\n assert lc.flux.shape == lc.flux_err.shape\n assert len(lc.time) == len(lc.flux)\n\n\ndef test_aperture_photometry():\n for tpf in [KeplerTargetPixelFile(filename_tpf_all_zeros),\n TessTargetPixelFile(filename_tpf_all_zeros)]:\n tpf.aperture_photometry()\n tpf.aperture_photometry(aperture_mask=None)\n tpf.aperture_photometry(aperture_mask='all')\n tpf.aperture_photometry(aperture_mask='pipeline')\n\ndef test_tpf_to_fits():\n \"\"\"Can we write a TPF back to a fits file?\"\"\"\n for tpf in [KeplerTargetPixelFile(filename_tpf_all_zeros),\n TessTargetPixelFile(filename_tpf_all_zeros)]:\n # `delete=False` is necessary to enable writing to the file on Windows\n # but it means we have to clean up the tmp file ourselves\n tmp = tempfile.NamedTemporaryFile(delete=False)\n try:\n tpf.to_fits(tmp.name)\n finally:\n tmp.close()\n os.remove(tmp.name)\n\n\ndef test_tpf_factory():\n \"\"\"Can we create TPFs using KeplerTargetPixelFileFactory?\"\"\"\n factory = KeplerTargetPixelFileFactory(n_cadences=10, n_rows=6, n_cols=8)\n flux_0 = np.ones((6, 8))\n factory.add_cadence(frameno=0, flux=flux_0,\n header={'TSTART': 0, 'TSTOP': 10})\n flux_9 = 3 * np.ones((6, 8))\n factory.add_cadence(frameno=9, flux=flux_9,\n header={'TSTART': 90, 'TSTOP': 100})\n tpf = factory.get_tpf()\n assert_array_equal(tpf.flux[0], flux_0)\n assert_array_equal(tpf.flux[9], flux_9)\n assert(tpf.time[0] == 5)\n assert(tpf.time[9] == 95)\n\n\ndef test_tpf_from_images():\n \"\"\"Basic tests of tpf.from_fits_images()\"\"\"\n from astropy.io.fits import ImageHDU\n images = [ImageHDU(data=np.ones((5, 5))) for i in range(5)]\n tpf = KeplerTargetPixelFile.from_fits_images(images, size=(3, 3))\n\n\ndef test_properties2(capfd):\n '''Test if the describe function produces an output.\n The output is 1870 characters at the moment, but we might add more properties.'''\n tpf = KeplerTargetPixelFile(filename_tpf_all_zeros)\n tpf.properties()\n out, err = capfd.readouterr()\n assert len(out) > 1000\n\n\ndef test_interact():\n \"\"\"Test the Jupyter notebook interact() widget.\"\"\"\n for tpf in [KeplerTargetPixelFile(filename_tpf_one_center),\n TessTargetPixelFile(filename_tpf_one_center)]:\n tpf.interact()\n tpf.interact(lc=tpf.to_lightcurve(aperture_mask='all'))\n\n\ndef test_from_archive_should_accept_path():\n \"\"\"If a path is accidentally passed to `from_archive` it should still just work.\"\"\"\n KeplerTargetPixelFile.from_archive(filename_tpf_all_zeros)\n\ndef test_from_fits():\n \"\"\"Does the tpf.from_fits() method work like the constructor?\"\"\"\n tpf = KeplerTargetPixelFile.from_fits(filename_tpf_one_center)\n assert isinstance(tpf, KeplerTargetPixelFile)\n assert tpf.keplerid == KeplerTargetPixelFile(filename_tpf_one_center).keplerid\n assert tpf.keplerid == tpf.targetid\n # Execute the same test for TESS\n tpf = TessTargetPixelFile.from_fits(filename_tpf_one_center)\n assert isinstance(tpf, TessTargetPixelFile)\n assert tpf.ticid == TessTargetPixelFile(filename_tpf_one_center).ticid\n assert tpf.ticid == tpf.targetid\n\n\ndef test_get_models():\n \"\"\"Can we obtain PRF and TPF models?\"\"\"\n tpf = KeplerTargetPixelFile(filename_tpf_all_zeros, quality_bitmask=None)\n tpf.get_model()\n tpf.get_prf_model()\n\n\n#@pytest.mark.remote_data\ndef test_tess_simulation():\n \"\"\"Can we read simulated TESS data?\"\"\"\n tpf = TessTargetPixelFile(TESS_SIM)\n assert tpf.mission == 'TESS'\n assert tpf.astropy_time.scale == 'tdb'\n assert tpf.flux.shape == tpf.flux_err.shape\n tpf.wcs\n col, row = tpf.centroids()\n" ]
[ [ "numpy.isnan", "numpy.ones", "numpy.all", "numpy.testing.assert_array_equal", "numpy.round", "numpy.any", "matplotlib.pyplot.close", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leejaeyong7/OpenSfM
[ "893354cdd2f131782407d7d0be88c8fe92911b6f", "893354cdd2f131782407d7d0be88c8fe92911b6f" ]
[ "opensfm/test/test_reconstruction_resect.py", "opensfm/feature_loading.py" ]
[ "import numpy as np\nimport networkx as nx\n\nfrom opensfm import reconstruction\nfrom opensfm import multiview\nfrom opensfm import config\nfrom opensfm import types\nfrom opensfm.test import data_generation\n\n\ndef test_corresponding_tracks():\n t1 = {1: {\"feature_id\": 1}}\n t2 = {1: {\"feature_id\": 2}}\n\n correspondences = reconstruction.corresponding_tracks(t1, t2)\n assert len(correspondences) == 0\n\n t1 = {1: {\"feature_id\": 3}}\n t2 = {2: {\"feature_id\": 3}}\n\n correspondences = reconstruction.corresponding_tracks(t1, t2)\n assert len(correspondences) == 1\n assert correspondences[0] == (1, 2)\n\n t1 = {1: {\"feature_id\": 3}, 2: {\"feature_id\": 4}}\n t2 = {1: {\"feature_id\": 4}, 2: {\"feature_id\": 5}}\n\n correspondences = reconstruction.corresponding_tracks(t1, t2)\n assert len(correspondences) == 1\n assert correspondences[0] == (2, 1)\n\n t1 = {1: {\"feature_id\": 5}, 2: {\"feature_id\": 6}}\n t2 = {3: {\"feature_id\": 5}, 4: {\"feature_id\": 6}}\n\n correspondences = reconstruction.corresponding_tracks(t1, t2)\n correspondences.sort(key=lambda c: c[0] + c[1])\n assert len(correspondences) == 2\n assert correspondences[0] == (1, 3)\n assert correspondences[1] == (2, 4)\n\n\ndef synthetic_reconstruction():\n cube_dataset = data_generation.CubeDataset(10, 100, 0.001, 0.3)\n synthetic_reconstruction = types.Reconstruction()\n for shot in cube_dataset.shots.values():\n synthetic_reconstruction.add_shot(shot)\n for camera in cube_dataset.cameras.values():\n synthetic_reconstruction.add_camera(camera)\n for point_id, point in cube_dataset.points.items():\n point_type = types.Point()\n point_type.coordinates = point\n point_type.id = point_id\n synthetic_reconstruction.add_point(point_type)\n return synthetic_reconstruction, cube_dataset.tracks\n\n\ndef copy_cluster_points(cluster, tracks, points, noise):\n for shot in cluster.shots:\n for point in tracks[shot]:\n base = points[point]\n copy = types.Point()\n copy.id = base.id\n copy.coordinates = base.coordinates+np.random.rand()*noise\n cluster.add_point(copy)\n return cluster\n\n\ndef split_synthetic_reconstruction(synthetic_reconstruction,\n synthetic_tracks,\n cluster_size,\n point_noise):\n cluster1 = types.Reconstruction()\n cluster2 = types.Reconstruction()\n cluster1.cameras = synthetic_reconstruction.cameras\n cluster2.cameras = synthetic_reconstruction.cameras\n for (i, shot) in zip(range(len(synthetic_reconstruction.shots)),\n synthetic_reconstruction.shots.values()):\n if(i >= cluster_size):\n cluster2.add_shot(shot)\n if(i <= cluster_size):\n cluster1.add_shot(shot)\n\n cluster1 = copy_cluster_points(\n cluster1, synthetic_tracks, synthetic_reconstruction.points,\n point_noise)\n cluster2 = copy_cluster_points(\n cluster2, synthetic_tracks, synthetic_reconstruction.points,\n point_noise)\n return cluster1, cluster2\n\n\ndef move_and_scale_cluster(cluster):\n scale = np.random.rand(1)\n translation = np.random.rand(3)\n for point in cluster.points.values():\n point.coordinates = scale*point.coordinates + translation\n return cluster, translation, scale\n\n\ndef test_absolute_pose_single_shot():\n \"\"\"Single-camera resection on a toy reconstruction with\n 1/1000 pixel noise and zero outliers.\"\"\"\n parameters = config.default_config()\n synthetic_data, synthetic_tracks = synthetic_reconstruction()\n\n shot_id = 'shot1'\n camera_id = 'camera1'\n metadata = types.ShotMetadata()\n camera = synthetic_data.cameras[camera_id]\n\n graph_inliers = nx.Graph()\n shot_before = synthetic_data.shots[shot_id]\n status, report = reconstruction.resect(synthetic_tracks, graph_inliers,\n synthetic_data, shot_id,\n camera, metadata,\n parameters['resection_threshold'],\n parameters['resection_min_inliers'])\n shot_after = synthetic_data.shots[shot_id]\n\n assert status is True\n assert report['num_inliers'] == len(graph_inliers.edges())\n assert report['num_inliers'] is len(synthetic_data.points)\n np.testing.assert_almost_equal(\n shot_before.pose.rotation, shot_after.pose.rotation, 1)\n np.testing.assert_almost_equal(\n shot_before.pose.translation, shot_after.pose.translation, 1)\n\n\ndef test_absolute_pose_generalized_shot():\n \"\"\"Whole reconstruction resection (generalized pose) on a toy\n reconstruction with 0.01 meter point noise and zero outliers.\"\"\"\n noise = 0.01\n parameters = config.default_config()\n scene, tracks = synthetic_reconstruction()\n cluster1, cluster2 = split_synthetic_reconstruction(\n scene, tracks, 3, noise)\n cluster2, translation, scale = move_and_scale_cluster(cluster2)\n\n status, T, inliers = reconstruction.\\\n resect_reconstruction(cluster1, cluster2,\n tracks, tracks,\n 2*noise,\n parameters['resection_min_inliers'])\n\n assert status is True\n s, A, b = multiview.decompose_similarity_transform(T)\n np.testing.assert_almost_equal(scale, s, 2)\n np.testing.assert_almost_equal(np.eye(3), A, 2)\n np.testing.assert_almost_equal(translation, b, 2)\n", "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\n\nimport numpy as np\nfrom repoze.lru import LRUCache\n\nfrom opensfm import features as ft\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass FeatureLoader(object):\n def __init__(self):\n self.points_cache = LRUCache(1000)\n self.colors_cache = LRUCache(1000)\n self.features_cache = LRUCache(200)\n self.words_cache = LRUCache(200)\n self.masks_cache = LRUCache(1000)\n self.index_cache = LRUCache(200)\n self.masked_index_cache = LRUCache(200)\n\n def clear_cache(self):\n self.points_cache.clear()\n self.colors_cache.clear()\n self.features_cache.clear()\n self.words_cache.clear()\n self.masks_cache.clear()\n\n def load_mask(self, data, image, points=None):\n masks = self.masks_cache.get(image)\n if masks is None:\n if points is None:\n points, _ = self.load_points_colors(data, image, masked=False)\n masks = data.load_features_mask(image, points[:, :2])\n self.masks_cache.put(image, masks)\n return masks\n\n def load_points_colors(self, data, image, masked=False):\n points = self.points_cache.get(image)\n colors = self.colors_cache.get(image)\n if points is None or colors is None:\n points, _, colors = self._load_features_nocache(data, image)\n self.points_cache.put(image, points)\n self.colors_cache.put(image, colors)\n if masked:\n mask = self.load_mask(data, image, points)\n if mask is not None:\n points = points[mask]\n colors = colors[mask]\n return points, colors\n\n def load_points_features_colors(self, data, image, masked=False):\n points = self.points_cache.get(image)\n features = self.features_cache.get(image)\n colors = self.colors_cache.get(image)\n if points is None or features is None or colors is None:\n points, features, colors = self._load_features_nocache(data, image)\n self.points_cache.put(image, points)\n self.features_cache.put(image, features)\n self.colors_cache.put(image, colors)\n if masked:\n mask = self.load_mask(data, image, points)\n if mask is not None:\n points = points[mask]\n features = features[mask]\n colors = colors[mask]\n return points, features, colors\n\n def load_features_index(self, data, image, masked=False):\n cache = self.masked_index_cache if masked else self.index_cache\n cached = cache.get(image)\n if cached is None:\n _, features, _ = self.load_points_features_colors(data, image,\n masked)\n index = ft.build_flann_index(features, data.config)\n cache.put(image, (features, index))\n else:\n features, index = cached\n return index\n\n def load_words(self, data, image, masked):\n words = self.words_cache.get(image)\n if words is None:\n words = data.load_words(image)\n self.words_cache.put(image, words)\n if masked and words is not None:\n mask = self.load_mask(data, image)\n if mask is not None:\n words = words[mask]\n return words\n\n def _load_features_nocache(self, data, image):\n points, features, colors = data.load_features(image)\n if points is None:\n logger.error('Could not load features for image {}'.format(image))\n else:\n points = np.array(points[:, :3], dtype=float)\n return points, features, colors\n" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.eye", "numpy.random.rand" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhujun98/EXtra-foam
[ "680d6d7fd4afdcbc41eb8e440feac54b6cecab33", "680d6d7fd4afdcbc41eb8e440feac54b6cecab33", "680d6d7fd4afdcbc41eb8e440feac54b6cecab33" ]
[ "extra_foam/pipeline/processors/base_processor.py", "extra_foam/algorithms/miscellaneous.py", "extra_foam/gui/pyqtgraph/graphicsItems/GraphicsObject.py" ]
[ "\"\"\"\nDistributed under the terms of the BSD 3-Clause License.\n\nThe full license is in the file LICENSE, distributed with this software.\n\nAuthor: Jun Zhu <[email protected]>\nCopyright (C) European X-Ray Free-Electron Laser Facility GmbH.\nAll rights reserved.\n\"\"\"\nfrom abc import ABC, abstractmethod\nimport math\n\nimport numpy as np\n\nfrom ..exceptions import (\n ProcessingError, SkipTrainError, UnknownParameterError\n)\nfrom ...database import MetaProxy\nfrom ...algorithms import normalize_auc\nfrom ...config import AnalysisType, config, Normalizer\n\n\nclass State(ABC):\n \"\"\"Base class of processor state.\"\"\"\n @abstractmethod\n def on_enter(self, proc):\n pass\n\n @abstractmethod\n def next(self):\n \"\"\"Return the next state.\"\"\"\n pass\n\n def update(self, proc):\n self.on_enter(proc)\n\n\nclass StateOn(State):\n \"\"\"State on.\n\n The state when the processor is ready to start.\n \"\"\"\n def on_enter(self, proc):\n handler = proc.on_handler\n if handler is not None:\n handler(proc)\n\n def next(self):\n return StateProcessing()\n\n\nclass StateProcessing(State):\n \"\"\"State processing.\n\n The state when the processor is processing data.\n \"\"\"\n def on_enter(self, proc):\n handler = proc.processing_handler\n if handler is not None:\n handler(proc)\n\n def next(self):\n return StateOn()\n\n\nclass SharedProperty:\n \"\"\"Property shared among Processors.\n\n Define a property which is shared by the current processor and its\n children processors.\n \"\"\"\n def __init__(self):\n self.name = None\n\n def __get__(self, instance, instance_type):\n if instance is None:\n return self\n\n if self.name not in instance._params:\n # initialization\n instance._params[self.name] = None\n\n return instance._params[self.name]\n\n def __set__(self, instance, value):\n instance._params[self.name] = value\n\n\nclass _RedisParserMixin:\n \"\"\"_RedisParserMixin class.\n\n Due to the performance concern, methods in this class are not suppose\n to cover all the corner cases, passing an arbitrary input may result\n in undefined behavior.\n \"\"\"\n @staticmethod\n def str2tuple(text, delimiter=\",\", handler=float):\n \"\"\"Convert a string to a tuple.\n\n The string is expected to be the result of str(tp), where tp is a\n tuple.\n\n For example:\n str2tuple('(1, 2)') -> (1.0, 2.0)\n \"\"\"\n splitted = text[1:-1].split(delimiter)\n return handler(splitted[0]), handler(splitted[1])\n\n @staticmethod\n def str2list(text, delimiter=\",\", handler=float):\n \"\"\"Convert a string to a list.\n\n The string is expected to be the result of str(lt), where lt is a\n list.\n\n For example:\n str2list('[1, 2, 3]') -> [1.0, 2.0, 3.0]\n \"\"\"\n if not text[1:-1]:\n return []\n return [handler(v) for v in text[1:-1].split(delimiter)]\n\n @staticmethod\n def str2slice(text):\n \"\"\"Convert a string to a slice object.\n\n The string is expected to the result of str(lt), where lt can be\n converted to a slice object by slice(*lt).\n\n For example:\n str2slice('[None, 2]' -> slice(None, 2)\n \"\"\"\n return slice(*[None if v.strip() == 'None' else int(v)\n for v in text[1:-1].split(',')])\n\n\nclass MetaProcessor(type):\n def __new__(mcs, name, bases, class_dict):\n for key, value in class_dict.items():\n if isinstance(value, SharedProperty):\n value.name = key\n\n cls = type.__new__(mcs, name, bases, class_dict)\n return cls\n\n\nclass _BaseProcessorMixin:\n @staticmethod\n def _normalize_fom(processed, y, normalizer, *, x=None, auc_range=None):\n \"\"\"Normalize FOM/VFOM.\n\n :param ProcessedData processed: processed data.\n :param numpy.ndarray y: y values.\n :param Normalizer normalizer: normalizer type.\n :param numpy.ndarray x: x values used with AUC normalizer..\n :param tuple auc_range: normalization range with AUC normalizer.\n \"\"\"\n if normalizer == Normalizer.UNDEFINED:\n return y\n\n if normalizer == Normalizer.AUC:\n # normalized by area under curve (AUC)\n normalized = normalize_auc(y, x, auc_range)\n elif normalizer == Normalizer.XGM:\n # normalized by XGM\n intensity = processed.pulse.xgm.intensity\n if intensity is None:\n raise ProcessingError(\"XGM normalizer is not available!\")\n denominator = np.mean(intensity)\n\n if denominator == 0:\n raise ProcessingError(\"XGM normalizer is zero!\")\n\n normalized = y / denominator\n elif normalizer == Normalizer.DIGITIZER:\n # normalized by DIGITIZER\n channel = processed.pulse.digitizer.ch_normalizer\n pulse_integral = processed.pulse.digitizer[channel].pulse_integral\n if pulse_integral is None:\n raise ProcessingError(\"Digitizer normalizer is not available!\")\n denominator = np.mean(pulse_integral)\n\n if denominator == 0:\n raise ProcessingError(\"Digitizer normalizer is zero!\")\n\n normalized = y / denominator\n elif normalizer == Normalizer.ROI:\n # normalized by ROI\n denominator = processed.roi.norm\n\n if denominator is None:\n raise ProcessingError(\"ROI normalizer is not available!\")\n\n if denominator == 0:\n raise ProcessingError(\"ROI normalizer is zero!\")\n\n normalized = y / denominator\n\n else:\n raise UnknownParameterError(\n f\"Unknown normalizer: {repr(normalizer)}\")\n\n return normalized\n\n @staticmethod\n def _normalize_fom_pp(processed, y_on, y_off, normalizer, *,\n x=None, auc_range=None):\n \"\"\"Normalize pump-probe FOM/VFOM.\n\n :param ProcessedData processed: processed data.\n :param numpy.ndarray y_on: pump y values.\n :param numpy.ndarray y_off: probe y values.\n :param Normalizer normalizer: normalizer type.\n :param numpy.ndarray x: x values used with AUC normalizer..\n :param tuple auc_range: normalization range with AUC normalizer.\n \"\"\"\n if normalizer == Normalizer.UNDEFINED:\n return y_on, y_off\n\n if normalizer == Normalizer.AUC:\n # normalized by AUC\n normalized_on = normalize_auc(y_on, x, auc_range)\n normalized_off = normalize_auc(y_off, x, auc_range)\n\n elif normalizer == Normalizer.XGM:\n # normalized by XGM\n denominator_on = processed.pp.on.xgm_intensity\n denominator_off = processed.pp.off.xgm_intensity\n\n if denominator_on is None or denominator_off is None:\n raise ProcessingError(\"XGM normalizer is not available!\")\n\n if denominator_on == 0:\n raise ProcessingError(\"XGM normalizer (on) is zero!\")\n\n if denominator_off == 0:\n raise ProcessingError(\"XGM normalizer (off) is zero!\")\n\n normalized_on = y_on / denominator_on\n normalized_off = y_off / denominator_off\n\n elif normalizer == Normalizer.DIGITIZER:\n # normalized by Digitizer\n denominator_on = processed.pp.on.digitizer_pulse_integral\n denominator_off = processed.pp.off.digitizer_pulse_integral\n\n if denominator_on is None or denominator_off is None:\n raise ProcessingError(\"Digitizer normalizer is not available!\")\n\n if denominator_on == 0:\n raise ProcessingError(\"Digitizer normalizer (on) is zero!\")\n\n if denominator_off == 0:\n raise ProcessingError(\"Digitizer normalizer (off) is zero!\")\n\n normalized_on = y_on / denominator_on\n normalized_off = y_off / denominator_off\n\n elif normalizer == Normalizer.ROI:\n # normalized by ROI\n denominator_on = processed.pp.on.roi_norm\n denominator_off = processed.pp.off.roi_norm\n\n if denominator_on is None:\n raise ProcessingError(\"ROI normalizer (on) is not available!\")\n\n if denominator_off is None:\n raise ProcessingError(\"ROI normalizer (off) is not available!\")\n\n if denominator_on == 0:\n raise ProcessingError(\"ROI normalizer (on) is zero!\")\n\n if denominator_off == 0:\n raise ProcessingError(\"ROI normalizer (off) is zero!\")\n\n normalized_on = y_on / denominator_on\n normalized_off = y_off / denominator_off\n\n else:\n raise UnknownParameterError(\n f\"Unknown normalizer: {repr(normalizer)}\")\n\n return normalized_on, normalized_off\n\n @staticmethod\n def _fetch_property_data(tid, raw, src):\n \"\"\"Fetch property data from raw data.\n\n :param int tid: train ID.\n :param dict raw: raw data.\n :param str src: source.\n\n :returns (value, error str)\n \"\"\"\n if not src:\n # not activated is not an error\n return None, \"\"\n\n try:\n return raw[src], \"\"\n except KeyError:\n return None, f\"[{tid}] '{src}' not found!\"\n\n @staticmethod\n def filter_train_by_vrange(v, vrange, src):\n \"\"\"Filter a train by train-resolved value.\n\n :param float v: value of a control data.\n :param tuple vrange: value range.\n :param str src: data source.\n \"\"\"\n if vrange is not None:\n lb, ub = vrange\n if v > ub or v < lb:\n raise SkipTrainError(f\"<{src}> value {v:.4e} is \"\n f\"out of range [{lb}, {ub}]\")\n\n @staticmethod\n def filter_pulse_by_vrange(arr, vrange, index_mask):\n \"\"\"Filter pulses in a train by pulse-resolved value.\n\n :param numpy.array arr: pulse-resolved values of data\n in a train.\n :param tuple vrange: value range.\n :param PulseIndexMask index_mask: pulse index msk\n \"\"\"\n if vrange is not None:\n lb, ub = vrange\n\n if not math.isinf(lb) and not math.isinf(ub):\n index_mask.mask_by_array((arr > ub) | (arr < lb))\n elif not math.isinf(lb):\n index_mask.mask_by_array(arr < lb)\n elif not math.isinf(ub):\n index_mask.mask_by_array(arr > ub)\n\n\nclass _BaseProcessor(_BaseProcessorMixin, _RedisParserMixin,\n metaclass=MetaProcessor):\n \"\"\"Data processor interface.\"\"\"\n\n def __init__(self):\n self._pulse_resolved = config[\"PULSE_RESOLVED\"]\n\n self._meta = MetaProxy()\n\n def _update_analysis(self, analysis_type, *, register=True):\n \"\"\"Update analysis type.\n\n :param AnalysisType analysis_type: analysis type.\n :param bool register: True for (un)register the analysis type.\n\n :return: True if the analysis type has changed and False for not.\n \"\"\"\n if not isinstance(analysis_type, AnalysisType):\n raise UnknownParameterError(\n f\"Unknown analysis type: {str(analysis_type)}\")\n\n if analysis_type != self.analysis_type:\n if register:\n # unregister the old\n if self.analysis_type is not None:\n self._meta.unregister_analysis(self.analysis_type)\n\n # register the new one\n if analysis_type != AnalysisType.UNDEFINED:\n self._meta.register_analysis(analysis_type)\n\n self.analysis_type = analysis_type\n return True\n\n return False\n\n def run_once(self, data):\n \"\"\"Composition interface.\n\n :param dict data: data which contains raw and processed data, etc.\n \"\"\"\n self.update()\n self.process(data)\n\n def update(self):\n \"\"\"Update metadata.\"\"\"\n raise NotImplementedError\n\n def process(self, data):\n \"\"\"Process data.\n\n :param dict data: data which contains raw and processed data, etc.\n \"\"\"\n raise NotImplementedError\n", "\"\"\"\nDistributed under the terms of the BSD 3-Clause License.\n\nThe full license is in the file LICENSE, distributed with this software.\n\nAuthor: Jun Zhu <[email protected]>\nCopyright (C) European X-Ray Free-Electron Laser Facility GmbH.\nAll rights reserved.\n\"\"\"\nimport numpy as np\n\nfrom .sampling import slice_curve\n\n\ndef normalize_auc(y, x, auc_range=None):\n \"\"\"Normalize a curve a given area under the curve (AUC).\n\n :param numpy.ndarray y: 1D array.\n :param numpy.ndarray x: 1D array.\n :param None/tuple auc_range: x range for calculating AUC.\n\n :return numpy.ndarray: the normalized y.\n\n :raise ValueError\n \"\"\"\n # if y contains only 0 (np.any() is much faster to np.count_nonzero())\n if not np.any(y):\n return np.copy(y)\n\n # get the integration\n if auc_range is None:\n integ = np.trapz(*slice_curve(y, x))\n else:\n integ = np.trapz(*slice_curve(y, x, *auc_range))\n\n if integ == 0:\n raise ValueError(\"Normalized by 0!\")\n\n return y / integ\n\n", "import abc\n\nimport numpy as np\n\nfrom ..Qt import QtGui, QtCore, QT_LIB\nif QT_LIB in ['PyQt4', 'PyQt5']:\n import sip\nfrom .GraphicsItem import GraphicsItem\n\n__all__ = ['GraphicsObject', 'PlotItem']\n\n\nclass GraphicsObject(GraphicsItem, QtGui.QGraphicsObject):\n \"\"\"\n **Bases:** :class:`GraphicsItem <pyqtgraph.graphicsItems.GraphicsItem>`, :class:`QtGui.QGraphicsObject`\n\n Extension of QGraphicsObject with some useful methods (provided by :class:`GraphicsItem <pyqtgraph.graphicsItems.GraphicsItem>`)\n \"\"\"\n _qtBaseClass = QtGui.QGraphicsObject\n\n def __init__(self, *args, **kwargs):\n self.__inform_view_on_changes = True\n QtGui.QGraphicsObject.__init__(self, *args, **kwargs)\n self.setFlag(self.ItemSendsGeometryChanges)\n GraphicsItem.__init__(self)\n \n def itemChange(self, change, value):\n ret = QtGui.QGraphicsObject.itemChange(self, change, value)\n if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:\n self.parentChanged()\n try:\n inform_view_on_change = self.__inform_view_on_changes\n except AttributeError:\n # It's possible that the attribute was already collected when the itemChange happened\n # (if it was triggered during the gc of the object).\n pass\n else:\n if inform_view_on_change and change in [self.ItemPositionHasChanged, self.ItemTransformHasChanged]:\n self.informViewBoundsChanged()\n \n ## workaround for pyqt bug:\n ## http://www.riverbankcomputing.com/pipermail/pyqt/2012-August/031818.html\n if QT_LIB in ['PyQt4', 'PyQt5'] and change == self.ItemParentChange and isinstance(ret, QtGui.QGraphicsItem):\n ret = sip.cast(ret, QtGui.QGraphicsItem)\n\n return ret\n\n\nclass PlotItem(GraphicsObject):\n def __init__(self, name=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self._graph = None\n\n self._name = \"\" if name is None else name\n\n self._log_x_mode = False\n self._log_y_mode = False\n\n @abc.abstractmethod\n def setData(self, *args, **kwargs):\n raise NotImplementedError\n\n def _parseInputData(self, x, y, **kwargs):\n \"\"\"Convert input to np.array and apply shape check.\"\"\"\n if isinstance(x, list):\n x = np.array(x)\n elif x is None:\n x = np.array([])\n\n if isinstance(y, list):\n y = np.array(y)\n elif y is None:\n y = np.array([])\n\n if len(x) != len(y):\n raise ValueError(\"'x' and 'y' data have different lengths!\")\n\n # do not set data unless they pass the sanity check!\n self._x, self._y = x, y\n\n @abc.abstractmethod\n def data(self):\n raise NotImplementedError\n\n def updateGraph(self):\n self._graph = None\n self.prepareGeometryChange()\n self.informViewBoundsChanged()\n\n @abc.abstractmethod\n def _prepareGraph(self):\n raise NotImplementedError\n\n def paint(self, p, *args):\n \"\"\"Override.\"\"\"\n if self._graph is None:\n self._prepareGraph()\n p.setPen(self._pen)\n p.drawPath(self._graph)\n\n def boundingRect(self):\n \"\"\"Override.\"\"\"\n if self._graph is None:\n self._prepareGraph()\n return self._graph.boundingRect()\n\n def setLogX(self, state):\n \"\"\"Set log mode for x axis.\"\"\"\n self._log_x_mode = state\n self.updateGraph()\n\n def setLogY(self, state):\n \"\"\"Set log mode for y axis.\"\"\"\n self._log_y_mode = state\n self.updateGraph()\n\n def transformedData(self):\n \"\"\"Transform and return the internal data to log scale if requested.\n\n Child class should re-implement this method if it has a\n different internal data structure.\n \"\"\"\n return (self.toLogScale(self._x) if self._log_x_mode else self._x,\n self.toLogScale(self._y) if self._log_y_mode else self._y)\n\n @staticmethod\n def toLogScale(arr, policy=None):\n \"\"\"Convert array result to logarithmic scale.\"\"\"\n ret = np.nan_to_num(arr)\n ret[ret < 0] = 0\n return np.log10(ret + 1)\n\n def name(self):\n \"\"\"An identity of the PlotItem.\n\n Used in LegendItem.\n \"\"\"\n return self._name\n\n def drawSample(self, p):\n \"\"\"Draw a sample used in LegendItem.\"\"\"\n pass\n" ]
[ [ "numpy.mean" ], [ "numpy.copy", "numpy.any" ], [ "numpy.array", "numpy.log10", "numpy.nan_to_num" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
i-pan/kaggle-rsna-pe
[ "81ca9ee45014b7ef1647000492a4ef5fbad7126f", "81ca9ee45014b7ef1647000492a4ef5fbad7126f" ]
[ "src/etl/08_create_train_df_with_pe_and_heart_probas.py", "src/etl/04_write_sample_to_jpeg.py" ]
[ "import glob\nimport os, os.path as osp\nimport pandas as pd\n\n\ndef enumerate_slices(df):\n df = df.sort_values('ImagePositionPatient_2')\n df['SliceIndex'] = list(range(len(df)))\n return df\n\n\nPE_PROBA_DIR = '../../data/train-pe-probas/'\nHEART_PROBA_DIR = '../../data/train-heart-probas/'\n\n\npe_probas = pd.concat([pd.read_csv(_) for _ in glob.glob(osp.join(PE_PROBA_DIR, '*/*.csv'))]).drop_duplicates()\nheart_probas = pd.concat([pd.read_csv(_) for _ in glob.glob(osp.join(HEART_PROBA_DIR, '*.csv'))]).reset_index(drop=True)\nheart_probas = heart_probas.iloc[heart_probas[['SeriesInstanceUID','SliceIndex']].drop_duplicates().index]\n\ndf = pd.read_csv('../../data/train/train_5fold.csv')\ndf = pd.concat([enumerate_slices(series_df) for series, series_df in df.groupby('SeriesInstanceUID')])\n\nmerge_cols = ['StudyInstanceUID', 'SeriesInstanceUID', 'SliceIndex']\ndf = df.merge(pe_probas, on=merge_cols).merge(heart_probas, on=merge_cols)\n\ndf.to_csv('../../data/train/train_5fold_with_probas.csv', index=False)", "\"\"\"\nWrite 1,000 CTs to JPEG so I can use it to manually identify heart slices.\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport pydicom\nimport glob\nimport cv2\nimport os, os.path as osp\n\nfrom tqdm import tqdm\n\n\ndef window(X, WL=50, WW=350):\n lower, upper = WL-WW/2, WL+WW/2\n X = np.clip(X, lower, upper)\n X = X - np.min(X)\n X = X / np.max(X)\n X = (X*255.0).astype('uint8')\n return X\n\n\ndef save_images(sample, save_dir):\n if not osp.exists(save_dir): os.makedirs(save_dir)\n for series, _df in tqdm(sample.groupby('SeriesInstanceUID'), total=len(sample.SeriesInstanceUID.unique())):\n _df = _df.sort_values('ImagePositionPatient_2')\n dicoms = [pydicom.dcmread(osp.join(DATADIR, _)) for _ in _df.filepath.values[::4]]\n m = float(dicoms[0].RescaleSlope)\n b = float(dicoms[0].RescaleIntercept)\n arrays = [d.pixel_array*m+b for d in dicoms]\n arrays = [window(_) for _ in arrays]\n for ind, a in enumerate(arrays):\n a = cv2.resize(a, (128,128))\n _ = cv2.imwrite(osp.join(save_dir, f'{_df.iloc[0].StudyInstanceUID}_{series}_{ind:04d}_{_df.iloc[ind].SOPInstanceUID}.jpg'), a)\n\n\nDATADIR = '../../data/train/'\nSAVEDIR = '../../data/sample-1500/'\nif not osp.exists(SAVEDIR): os.makedirs(SAVEDIR)\n\ndf = pd.read_csv(osp.join(DATADIR, 'train_kfold.csv'))\n# Take 500 from normals\nnormal = df[df.negative_exam_for_pe == 1][['StudyInstanceUID', 'SeriesInstanceUID']].drop_duplicates().sample(n=500, random_state=0)\nnormal = normal.merge(df, on=['StudyInstanceUID', 'SeriesInstanceUID'])\n# Take 500 from RV/LV >= 1\nbigrv = df[df.rv_lv_ratio_gte_1 == 1][['StudyInstanceUID', 'SeriesInstanceUID']].drop_duplicates().sample(n=500, random_state=0)\nbigrv = bigrv.merge(df, on=['StudyInstanceUID', 'SeriesInstanceUID'])\n# Take 500 from +PE, RV/LV < 1\npe_normalrv = df[(df.rv_lv_ratio_lt_1 == 1) & (df.negative_exam_for_pe == 0)][['StudyInstanceUID', 'SeriesInstanceUID']].drop_duplicates().sample(n=500, random_state=0)\npe_normalrv = pe_normalrv.merge(df, on=['StudyInstanceUID', 'SeriesInstanceUID'])\n\nsave_images(normal, osp.join(SAVEDIR, 'normal'))\nsave_images(bigrv, osp.join(SAVEDIR, 'bigrv'))\nsave_images(pe_normalrv, osp.join(SAVEDIR, 'pe_normalrv'))\n" ]
[ [ "pandas.read_csv" ], [ "numpy.max", "numpy.min", "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nkeim/pims
[ "3c8f7832113517a37ddd65a2803a6b17f9c33e4c", "3c8f7832113517a37ddd65a2803a6b17f9c33e4c" ]
[ "pims/tests/test_process.py", "pims/tests/test_frame.py" ]
[ "from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\n\nimport os\nimport unittest\nimport nose\nimport numpy as np\nfrom numpy.testing import assert_equal\nfrom pims import FramesSequence, Frame\nfrom pims.process import crop\n\n\nclass RandomReader(FramesSequence):\n def __init__(self, length=10, shape=(128, 128), dtype='uint8'):\n self._len = length\n self._dtype = dtype\n self._shape = shape\n data_shape = (length,) + shape\n if np.issubdtype(self._dtype, np.float):\n self._data = np.random.random(data_shape).astype(self._dtype)\n else:\n self._data = np.random.randint(0, np.iinfo(self._dtype).max,\n data_shape).astype(self._dtype)\n\n def __len__(self):\n return self._len\n\n @property\n def frame_shape(self):\n return self._shape\n\n @property\n def pixel_type(self):\n return self._dtype\n\n def get_frame(self, i):\n return Frame(self._data[i], frame_no=i)\n\n\nclass PipelinesCommon(object):\n def test_on_frame(self):\n assert_equal(self.pipeline(self.rdr[0]), self.first_frame)\n\n def test_on_reader(self):\n assert_equal(self.pipeline(self.rdr)[0], self.first_frame)\n\n def test_on_random_frame(self):\n i = np.random.randint(0, len(self.rdr))\n assert_equal(self.pipeline(self.rdr)[i], self.pipeline(self.rdr[i]))\n\n\nclass TestCrop(PipelinesCommon, unittest.TestCase):\n def setUp(self):\n self.rdr = RandomReader(length=10, shape=(32, 33))\n self.pipeline = lambda x: crop(x, ((5, 32-26), (7, 33-27)))\n self.first_frame = self.rdr[0][5:26, 7:27]\n\n def test_attrs(self):\n proc = self.pipeline(self.rdr)\n assert_equal(self.rdr.pixel_type, proc.pixel_type)\n assert_equal(len(self.rdr), len(proc))\n assert_equal(self.rdr.frame_shape, (32, 33))\n assert_equal(proc.frame_shape, (21, 20))\n", "from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\nimport nose\nimport numpy as np\nfrom pims.frame import Frame\nfrom nose.tools import assert_true, assert_equal\n\n\ndef _skip_if_no_PIL():\n try:\n from PIL import Image\n except ImportError:\n raise nose.SkipTest('PIL/Pillow not installed. Skipping.')\n\n\ndef _skip_if_no_jinja2():\n try:\n import jinja2\n except ImportError:\n raise nose.SkipTest('Jinja2 not installed. Skipping.')\n\n\ndef test_scalar_casting():\n tt = Frame(np.ones((5, 3)), frame_no=42)\n sum1 = tt.sum()\n assert_true(np.isscalar(sum1))\n sum2 = tt.sum(keepdims=True)\n assert_equal(sum2.ndim, 2)\n assert_equal(sum2.frame_no, tt.frame_no)\n\n\ndef test_creation_md():\n md_dict = {'a': 1}\n frame_no = 42\n tt = Frame(np.ones((5, 3)), frame_no=frame_no, metadata=md_dict)\n assert_equal(tt.metadata, md_dict)\n assert_equal(tt.frame_no, frame_no)\n\n\ndef test_repr_html_():\n _skip_if_no_PIL()\n _skip_if_no_jinja2()\n # This confims a bugfix, where 16-bit images would raise\n # an error.\n Frame(10000*np.ones((50, 50), dtype=np.uint16))._repr_html_()\n\n\ndef test_copy():\n md_dict = {'a': 1}\n frame_no = 42\n tt_base = Frame(np.ones((5, 3)), frame_no=frame_no, metadata=md_dict)\n tt = Frame(tt_base)\n assert_equal(tt.metadata, md_dict)\n assert_equal(tt.frame_no, frame_no)\n\n\ndef test_copy_override_frame():\n frame_no = 42\n tt_base = Frame(np.ones((5, 3)), frame_no=frame_no)\n frame_no_2 = 123\n tt = Frame(tt_base, frame_no=frame_no_2)\n assert_equal(tt.frame_no, frame_no_2)\n\n\ndef test_copy_update_md():\n frame_no = 42\n md_dict = {'a': 1}\n md_dict2 = {'b': 1}\n md_dict3 = {'a': 2, 'c': 3}\n tt_base = Frame(np.ones((5, 3)), frame_no=frame_no, metadata=md_dict)\n\n tt = Frame(tt_base, frame_no=frame_no, metadata=md_dict2)\n target_dict = dict(md_dict)\n target_dict.update(md_dict2)\n # print(target_dict)\n # print(tt.metadata)\n assert_equal(tt.metadata, target_dict)\n\n tt2 = Frame(tt_base, frame_no=frame_no, metadata=md_dict3)\n assert_equal(tt2.metadata, md_dict3)\n" ]
[ [ "numpy.testing.assert_equal", "numpy.issubdtype", "numpy.random.random", "numpy.iinfo" ], [ "numpy.isscalar", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ericl/ray
[ "b8669bc06c4535e1715b3a84ad193cdfa0e237f3", "b8669bc06c4535e1715b3a84ad193cdfa0e237f3" ]
[ "python/ray/tests/test_advanced_3.py", "python/ray/tests/test_memory_limits.py" ]
[ "# coding: utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport glob\nimport logging\nimport os\nimport setproctitle\nimport shutil\nimport sys\nimport socket\nimport subprocess\nimport tempfile\nimport time\n\nimport numpy as np\nimport pickle\nimport pytest\n\nimport ray\nfrom ray import signature\nimport ray.ray_constants as ray_constants\nimport ray.cluster_utils\nimport ray.test_utils\n\nfrom ray.test_utils import RayTestTimeoutException\n\nlogger = logging.getLogger(__name__)\n\n\ndef attempt_to_load_balance(remote_function,\n args,\n total_tasks,\n num_nodes,\n minimum_count,\n num_attempts=100):\n attempts = 0\n while attempts < num_attempts:\n locations = ray.get(\n [remote_function.remote(*args) for _ in range(total_tasks)])\n names = set(locations)\n counts = [locations.count(name) for name in names]\n logger.info(\"Counts are {}.\".format(counts))\n if (len(names) == num_nodes\n and all(count >= minimum_count for count in counts)):\n break\n attempts += 1\n assert attempts < num_attempts\n\n\ndef test_load_balancing(ray_start_cluster):\n # This test ensures that tasks are being assigned to all raylets\n # in a roughly equal manner.\n cluster = ray_start_cluster\n num_nodes = 3\n num_cpus = 7\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=num_cpus)\n ray.init(address=cluster.address)\n\n @ray.remote\n def f():\n time.sleep(0.01)\n return ray.worker.global_worker.node.unique_id\n\n attempt_to_load_balance(f, [], 100, num_nodes, 10)\n attempt_to_load_balance(f, [], 1000, num_nodes, 100)\n\n\ndef test_load_balancing_with_dependencies(ray_start_cluster):\n # This test ensures that tasks are being assigned to all raylets in a\n # roughly equal manner even when the tasks have dependencies.\n cluster = ray_start_cluster\n num_nodes = 3\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=1)\n ray.init(address=cluster.address)\n\n @ray.remote\n def f(x):\n time.sleep(0.010)\n return ray.worker.global_worker.node.unique_id\n\n # This object will be local to one of the raylets. Make sure\n # this doesn't prevent tasks from being scheduled on other raylets.\n x = ray.put(np.zeros(1000000))\n\n attempt_to_load_balance(f, [x], 100, num_nodes, 25)\n\n\ndef wait_for_num_actors(num_actors, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.actors()) >= num_actors:\n return\n time.sleep(0.1)\n raise RayTestTimeoutException(\"Timed out while waiting for global state.\")\n\n\ndef wait_for_num_tasks(num_tasks, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.tasks()) >= num_tasks:\n return\n time.sleep(0.1)\n raise RayTestTimeoutException(\"Timed out while waiting for global state.\")\n\n\ndef wait_for_num_objects(num_objects, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.objects()) >= num_objects:\n return\n time.sleep(0.1)\n raise RayTestTimeoutException(\"Timed out while waiting for global state.\")\n\n\ndef test_global_state_api(shutdown_only):\n\n error_message = (\"The ray global state API cannot be used \"\n \"before ray.init has been called.\")\n\n with pytest.raises(Exception, match=error_message):\n ray.objects()\n\n with pytest.raises(Exception, match=error_message):\n ray.actors()\n\n with pytest.raises(Exception, match=error_message):\n ray.tasks()\n\n with pytest.raises(Exception, match=error_message):\n ray.nodes()\n\n with pytest.raises(Exception, match=error_message):\n ray.jobs()\n\n ray.init(num_cpus=5, num_gpus=3, resources={\"CustomResource\": 1})\n\n assert ray.cluster_resources()[\"CPU\"] == 5\n assert ray.cluster_resources()[\"GPU\"] == 3\n assert ray.cluster_resources()[\"CustomResource\"] == 1\n\n assert ray.objects() == {}\n\n job_id = ray.utils.compute_job_id_from_driver(\n ray.WorkerID(ray.worker.global_worker.worker_id))\n driver_task_id = ray.worker.global_worker.current_task_id.hex()\n\n # One task is put in the task table which corresponds to this driver.\n wait_for_num_tasks(1)\n task_table = ray.tasks()\n assert len(task_table) == 1\n assert driver_task_id == list(task_table.keys())[0]\n task_spec = task_table[driver_task_id][\"TaskSpec\"]\n nil_unique_id_hex = ray.UniqueID.nil().hex()\n nil_actor_id_hex = ray.ActorID.nil().hex()\n\n assert task_spec[\"TaskID\"] == driver_task_id\n assert task_spec[\"ActorID\"] == nil_actor_id_hex\n assert task_spec[\"Args\"] == []\n assert task_spec[\"JobID\"] == job_id.hex()\n assert task_spec[\"FunctionID\"] == nil_unique_id_hex\n assert task_spec[\"ReturnObjectIDs\"] == []\n\n client_table = ray.nodes()\n node_ip_address = ray.worker.global_worker.node_ip_address\n\n assert len(client_table) == 1\n assert client_table[0][\"NodeManagerAddress\"] == node_ip_address\n\n @ray.remote\n class Actor:\n def __init__(self):\n pass\n\n _ = Actor.remote()\n # Wait for actor to be created\n wait_for_num_actors(1)\n\n actor_table = ray.actors()\n assert len(actor_table) == 1\n\n actor_info, = actor_table.values()\n assert actor_info[\"JobID\"] == job_id.hex()\n assert \"IPAddress\" in actor_info[\"Address\"]\n assert \"IPAddress\" in actor_info[\"OwnerAddress\"]\n assert actor_info[\"Address\"][\"Port\"] != actor_info[\"OwnerAddress\"][\"Port\"]\n\n job_table = ray.jobs()\n\n assert len(job_table) == 1\n assert job_table[0][\"JobID\"] == job_id.hex()\n assert job_table[0][\"NodeManagerAddress\"] == node_ip_address\n\n\[email protected](\n ray_constants.direct_call_enabled(),\n reason=\"object and task API not supported\")\ndef test_global_state_task_object_api(shutdown_only):\n ray.init()\n\n job_id = ray.utils.compute_job_id_from_driver(\n ray.WorkerID(ray.worker.global_worker.worker_id))\n driver_task_id = ray.worker.global_worker.current_task_id.hex()\n\n nil_actor_id_hex = ray.ActorID.nil().hex()\n\n @ray.remote\n def f(*xs):\n return 1\n\n x_id = ray.put(1)\n result_id = f.remote(1, \"hi\", x_id)\n\n # Wait for one additional task to complete.\n wait_for_num_tasks(1 + 1)\n task_table = ray.tasks()\n assert len(task_table) == 1 + 1\n task_id_set = set(task_table.keys())\n task_id_set.remove(driver_task_id)\n task_id = list(task_id_set)[0]\n\n task_spec = task_table[task_id][\"TaskSpec\"]\n assert task_spec[\"ActorID\"] == nil_actor_id_hex\n assert task_spec[\"Args\"] == [\n signature.DUMMY_TYPE, 1, signature.DUMMY_TYPE, \"hi\",\n signature.DUMMY_TYPE, x_id\n ]\n assert task_spec[\"JobID\"] == job_id.hex()\n assert task_spec[\"ReturnObjectIDs\"] == [result_id]\n\n assert task_table[task_id] == ray.tasks(task_id)\n\n # Wait for two objects, one for the x_id and one for result_id.\n wait_for_num_objects(2)\n\n def wait_for_object_table():\n timeout = 10\n start_time = time.time()\n while time.time() - start_time < timeout:\n object_table = ray.objects()\n tables_ready = (object_table[x_id][\"ManagerIDs\"] is not None and\n object_table[result_id][\"ManagerIDs\"] is not None)\n if tables_ready:\n return\n time.sleep(0.1)\n raise RayTestTimeoutException(\n \"Timed out while waiting for object table to \"\n \"update.\")\n\n object_table = ray.objects()\n assert len(object_table) == 2\n\n assert object_table[x_id] == ray.objects(x_id)\n object_table_entry = ray.objects(result_id)\n assert object_table[result_id] == object_table_entry\n\n\n# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we\n# should use those, but they seem to conflict with Ray's use of faulthandler.\nclass CaptureOutputAndError:\n \"\"\"Capture stdout and stderr of some span.\n\n This can be used as follows.\n\n captured = {}\n with CaptureOutputAndError(captured):\n # Do stuff.\n # Access captured[\"out\"] and captured[\"err\"].\n \"\"\"\n\n def __init__(self, captured_output_and_error):\n import io\n self.output_buffer = io.StringIO()\n self.error_buffer = io.StringIO()\n self.captured_output_and_error = captured_output_and_error\n\n def __enter__(self):\n sys.stdout.flush()\n sys.stderr.flush()\n self.old_stdout = sys.stdout\n self.old_stderr = sys.stderr\n sys.stdout = self.output_buffer\n sys.stderr = self.error_buffer\n\n def __exit__(self, exc_type, exc_value, traceback):\n sys.stdout.flush()\n sys.stderr.flush()\n sys.stdout = self.old_stdout\n sys.stderr = self.old_stderr\n self.captured_output_and_error[\"out\"] = self.output_buffer.getvalue()\n self.captured_output_and_error[\"err\"] = self.error_buffer.getvalue()\n\n\ndef test_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=True)\n\n @ray.remote\n def f():\n # It's important to make sure that these print statements occur even\n # without calling sys.stdout.flush() and sys.stderr.flush().\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n for i in range(200):\n assert str(i) in output_lines\n\n # TODO(rkn): Check that no additional logs appear beyond what we expect\n # and that there are no duplicate logs. Once we address the issue\n # described in https://github.com/ray-project/ray/pull/5462, we should\n # also check that nothing is logged to stderr.\n\n\ndef test_not_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=False)\n\n @ray.remote\n def f():\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n sys.stdout.flush()\n sys.stderr.flush()\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n assert len(output_lines) == 0\n\n # TODO(rkn): Check that no additional logs appear beyond what we expect\n # and that there are no duplicate logs. Once we address the issue\n # described in https://github.com/ray-project/ray/pull/5462, we should\n # also check that nothing is logged to stderr.\n\n\[email protected](\n os.environ.get(\"RAY_USE_NEW_GCS\") == \"on\",\n reason=\"New GCS API doesn't have a Python API yet.\")\ndef test_workers(shutdown_only):\n num_workers = 3\n ray.init(num_cpus=num_workers)\n\n @ray.remote\n def f():\n return id(ray.worker.global_worker), os.getpid()\n\n # Wait until all of the workers have started.\n worker_ids = set()\n while len(worker_ids) != num_workers:\n worker_ids = set(ray.get([f.remote() for _ in range(10)]))\n\n\ndef test_specific_job_id():\n dummy_driver_id = ray.JobID.from_int(1)\n ray.init(num_cpus=1, job_id=dummy_driver_id)\n\n # in driver\n assert dummy_driver_id == ray._get_runtime_context().current_driver_id\n\n # in worker\n @ray.remote\n def f():\n return ray._get_runtime_context().current_driver_id\n\n assert dummy_driver_id == ray.get(f.remote())\n\n ray.shutdown()\n\n\ndef test_object_id_properties():\n id_bytes = b\"00112233445566778899\"\n object_id = ray.ObjectID(id_bytes)\n assert object_id.binary() == id_bytes\n object_id = ray.ObjectID.nil()\n assert object_id.is_nil()\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectID(id_bytes + b\"1234\")\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectID(b\"0123456789\")\n object_id = ray.ObjectID.from_random()\n assert not object_id.is_nil()\n assert object_id.binary() != id_bytes\n id_dumps = pickle.dumps(object_id)\n id_from_dumps = pickle.loads(id_dumps)\n assert id_from_dumps == object_id\n\n\[email protected]\ndef shutdown_only_with_initialization_check():\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n assert not ray.is_initialized()\n\n\ndef test_initialized(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0)\n assert ray.is_initialized()\n\n\ndef test_initialized_local_mode(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0, local_mode=True)\n assert ray.is_initialized()\n\n\ndef test_wait_reconstruction(shutdown_only):\n ray.init(num_cpus=1, object_store_memory=int(10**8))\n\n @ray.remote\n def f():\n return np.zeros(6 * 10**7, dtype=np.uint8)\n\n x_id = f.remote()\n ray.wait([x_id])\n ray.wait([f.remote()])\n assert not ray.worker.global_worker.core_worker.object_exists(x_id)\n ready_ids, _ = ray.wait([x_id])\n assert len(ready_ids) == 1\n\n\ndef test_ray_setproctitle(ray_start_2_cpus):\n @ray.remote\n class UniqueName:\n def __init__(self):\n assert setproctitle.getproctitle() == \"ray::UniqueName.__init__()\"\n\n def f(self):\n assert setproctitle.getproctitle() == \"ray::UniqueName.f()\"\n\n @ray.remote\n def unique_1():\n assert \"unique_1\" in setproctitle.getproctitle()\n\n actor = UniqueName.remote()\n ray.get(actor.f.remote())\n ray.get(unique_1.remote())\n\n\ndef test_duplicate_error_messages(shutdown_only):\n ray.init(num_cpus=0)\n\n driver_id = ray.WorkerID.nil()\n error_data = ray.gcs_utils.construct_error_message(driver_id, \"test\",\n \"message\", 0)\n\n # Push the same message to the GCS twice (they are the same because we\n # do not include a timestamp).\n\n r = ray.worker.global_worker.redis_client\n\n r.execute_command(\"RAY.TABLE_APPEND\",\n ray.gcs_utils.TablePrefix.Value(\"ERROR_INFO\"),\n ray.gcs_utils.TablePubsub.Value(\"ERROR_INFO_PUBSUB\"),\n driver_id.binary(), error_data)\n\n # Before https://github.com/ray-project/ray/pull/3316 this would\n # give an error\n r.execute_command(\"RAY.TABLE_APPEND\",\n ray.gcs_utils.TablePrefix.Value(\"ERROR_INFO\"),\n ray.gcs_utils.TablePubsub.Value(\"ERROR_INFO_PUBSUB\"),\n driver_id.binary(), error_data)\n\n\[email protected](\n os.getenv(\"TRAVIS\") is None,\n reason=\"This test should only be run on Travis.\")\ndef test_ray_stack(ray_start_2_cpus):\n def unique_name_1():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_2():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_3():\n unique_name_1()\n\n unique_name_2.remote()\n unique_name_3.remote()\n\n success = False\n start_time = time.time()\n while time.time() - start_time < 30:\n # Attempt to parse the \"ray stack\" call.\n output = ray.utils.decode(subprocess.check_output([\"ray\", \"stack\"]))\n if (\"unique_name_1\" in output and \"unique_name_2\" in output\n and \"unique_name_3\" in output):\n success = True\n break\n\n if not success:\n raise Exception(\"Failed to find necessary information with \"\n \"'ray stack'\")\n\n\ndef test_pandas_parquet_serialization():\n # Only test this if pandas is installed\n pytest.importorskip(\"pandas\")\n\n import pandas as pd\n import pyarrow as pa\n import pyarrow.parquet as pq\n\n tempdir = tempfile.mkdtemp()\n filename = os.path.join(tempdir, \"parquet-test\")\n pd.DataFrame({\"col1\": [0, 1], \"col2\": [0, 1]}).to_parquet(filename)\n with open(os.path.join(tempdir, \"parquet-compression\"), \"wb\") as f:\n table = pa.Table.from_arrays([pa.array([1, 2, 3])], [\"hello\"])\n pq.write_table(table, f, compression=\"lz4\")\n # Clean up\n shutil.rmtree(tempdir)\n\n\ndef test_socket_dir_not_existing(shutdown_only):\n random_name = ray.ObjectID.from_random().hex()\n temp_raylet_socket_dir = \"/tmp/ray/tests/{}\".format(random_name)\n temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,\n \"raylet_socket\")\n ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)\n\n\ndef test_raylet_is_robust_to_random_messages(ray_start_regular):\n node_manager_address = None\n node_manager_port = None\n for client in ray.nodes():\n if \"NodeManagerAddress\" in client:\n node_manager_address = client[\"NodeManagerAddress\"]\n node_manager_port = client[\"NodeManagerPort\"]\n assert node_manager_address\n assert node_manager_port\n # Try to bring down the node manager:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((node_manager_address, node_manager_port))\n s.send(1000 * b\"asdf\")\n\n @ray.remote\n def f():\n return 1\n\n assert ray.get(f.remote()) == 1\n\n\ndef test_non_ascii_comment(ray_start_regular):\n @ray.remote\n def f():\n # 日本語 Japanese comment\n return 1\n\n assert ray.get(f.remote()) == 1\n\n\ndef test_shutdown_disconnect_global_state():\n ray.init(num_cpus=0)\n ray.shutdown()\n\n with pytest.raises(Exception) as e:\n ray.objects()\n assert str(e.value).endswith(\"ray.init has been called.\")\n\n\[email protected](\n \"ray_start_object_store_memory\", [150 * 1024 * 1024], indirect=True)\ndef test_put_pins_object(ray_start_object_store_memory):\n x_id = ray.put(\"HI\")\n x_copy = ray.ObjectID(x_id.binary())\n assert ray.get(x_copy) == \"HI\"\n\n # x cannot be evicted since x_id pins it\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n assert ray.get(x_id) == \"HI\"\n assert ray.get(x_copy) == \"HI\"\n\n # now it can be evicted since x_id pins it but x_copy does not\n del x_id\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n with pytest.raises(ray.exceptions.UnreconstructableError):\n ray.get(x_copy)\n\n # weakref put\n y_id = ray.put(\"HI\", weakref=True)\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n with pytest.raises(ray.exceptions.UnreconstructableError):\n ray.get(y_id)\n\n @ray.remote\n def check_no_buffer_ref(x):\n assert x[0].get_buffer_ref() is None\n\n z_id = ray.put(\"HI\")\n assert z_id.get_buffer_ref() is not None\n ray.get(check_no_buffer_ref.remote([z_id]))\n\n\[email protected](\n \"ray_start_object_store_memory\", [150 * 1024 * 1024], indirect=True)\ndef test_redis_lru_with_set(ray_start_object_store_memory):\n x = np.zeros(8 * 10**7, dtype=np.uint8)\n x_id = ray.put(x, weakref=True)\n\n # Remove the object from the object table to simulate Redis LRU eviction.\n removed = False\n start_time = time.time()\n while time.time() < start_time + 10:\n if ray.state.state.redis_clients[0].delete(b\"OBJECT\" +\n x_id.binary()) == 1:\n removed = True\n break\n assert removed\n\n # Now evict the object from the object store.\n ray.put(x) # This should not crash.\n\n\ndef test_decorated_function(ray_start_regular):\n def function_invocation_decorator(f):\n def new_f(args, kwargs):\n # Reverse the arguments.\n return f(args[::-1], {\"d\": 5}), kwargs\n\n return new_f\n\n def f(a, b, c, d=None):\n return a, b, c, d\n\n f.__ray_invocation_decorator__ = function_invocation_decorator\n f = ray.remote(f)\n\n result_id, kwargs = f.remote(1, 2, 3, d=4)\n assert kwargs == {\"d\": 4}\n assert ray.get(result_id) == (3, 2, 1, 5)\n\n\ndef test_get_postprocess(ray_start_regular):\n def get_postprocessor(object_ids, values):\n return [value for value in values if value > 0]\n\n ray.worker.global_worker._post_get_hooks.append(get_postprocessor)\n\n assert ray.get(\n [ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]\n\n\ndef test_export_after_shutdown(ray_start_regular):\n # This test checks that we can use actor and remote function definitions\n # across multiple Ray sessions.\n\n @ray.remote\n def f():\n pass\n\n @ray.remote\n class Actor:\n def method(self):\n pass\n\n ray.get(f.remote())\n a = Actor.remote()\n ray.get(a.method.remote())\n\n ray.shutdown()\n\n # Start Ray and use the remote function and actor again.\n ray.init(num_cpus=1)\n ray.get(f.remote())\n a = Actor.remote()\n ray.get(a.method.remote())\n\n ray.shutdown()\n\n # Start Ray again and make sure that these definitions can be exported from\n # workers.\n ray.init(num_cpus=2)\n\n @ray.remote\n def export_definitions_from_worker(remote_function, actor_class):\n ray.get(remote_function.remote())\n actor_handle = actor_class.remote()\n ray.get(actor_handle.method.remote())\n\n ray.get(export_definitions_from_worker.remote(f, Actor))\n\n\ndef test_invalid_unicode_in_worker_log(shutdown_only):\n info = ray.init(num_cpus=1)\n\n logs_dir = os.path.join(info[\"session_dir\"], \"logs\")\n\n # Wait till first worker log file is created.\n while True:\n log_file_paths = glob.glob(\"{}/worker*.out\".format(logs_dir))\n if len(log_file_paths) == 0:\n time.sleep(0.2)\n else:\n break\n\n with open(log_file_paths[0], \"wb\") as f:\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.flush()\n\n # Wait till the log monitor reads the file.\n time.sleep(1.0)\n\n # Make sure that nothing has died.\n assert ray.services.remaining_processes_alive()\n\n\[email protected](reason=\"This test is too expensive to run.\")\ndef test_move_log_files_to_old(shutdown_only):\n info = ray.init(num_cpus=1)\n\n logs_dir = os.path.join(info[\"session_dir\"], \"logs\")\n\n @ray.remote\n class Actor:\n def f(self):\n print(\"function f finished\")\n\n # First create a temporary actor.\n actors = [\n Actor.remote() for i in range(ray_constants.LOG_MONITOR_MAX_OPEN_FILES)\n ]\n ray.get([a.f.remote() for a in actors])\n\n # Make sure no log files are in the \"old\" directory before the actors\n # are killed.\n assert len(glob.glob(\"{}/old/worker*.out\".format(logs_dir))) == 0\n\n # Now kill the actors so the files get moved to logs/old/.\n [a.__ray_terminate__.remote() for a in actors]\n\n while True:\n log_file_paths = glob.glob(\"{}/old/worker*.out\".format(logs_dir))\n if len(log_file_paths) > 0:\n with open(log_file_paths[0], \"r\") as f:\n assert \"function f finished\\n\" in f.readlines()\n break\n\n # Make sure that nothing has died.\n assert ray.services.remaining_processes_alive()\n\n\nif __name__ == \"__main__\":\n import pytest\n sys.exit(pytest.main([\"-v\", __file__]))\n", "import numpy as np\nimport unittest\n\nimport ray\n\nMB = 1024 * 1024\n\nOBJECT_EVICTED = ray.exceptions.UnreconstructableError\nOBJECT_TOO_LARGE = ray.exceptions.ObjectStoreFullError\n\n\[email protected]\nclass LightActor:\n def __init__(self):\n pass\n\n def sample(self):\n return np.zeros(1 * MB, dtype=np.uint8)\n\n\[email protected]\nclass GreedyActor:\n def __init__(self):\n pass\n\n def sample(self):\n return np.zeros(20 * MB, dtype=np.uint8)\n\n\nclass TestMemoryLimits(unittest.TestCase):\n def testWithoutQuota(self):\n self.assertRaises(OBJECT_EVICTED, lambda: self._run(None, None, None))\n self.assertRaises(OBJECT_EVICTED,\n lambda: self._run(100 * MB, None, None))\n self.assertRaises(OBJECT_EVICTED,\n lambda: self._run(None, 100 * MB, None))\n\n def testQuotasProtectSelf(self):\n self._run(100 * MB, 100 * MB, None)\n\n def testQuotasProtectOthers(self):\n self._run(None, None, 100 * MB)\n\n def testQuotaTooLarge(self):\n self.assertRaisesRegexp(ray.memory_monitor.RayOutOfMemoryError,\n \".*Failed to set object_store_memory.*\",\n lambda: self._run(300 * MB, None, None))\n\n def testTooLargeAllocation(self):\n try:\n ray.init(num_cpus=1, driver_object_store_memory=100 * MB)\n ray.put(np.zeros(50 * MB, dtype=np.uint8), weakref=True)\n self.assertRaises(\n OBJECT_TOO_LARGE,\n lambda: ray.put(np.zeros(200 * MB, dtype=np.uint8)))\n finally:\n ray.shutdown()\n\n def _run(self, driver_quota, a_quota, b_quota):\n print(\"*** Testing ***\", driver_quota, a_quota, b_quota)\n try:\n ray.init(\n num_cpus=1,\n object_store_memory=300 * MB,\n driver_object_store_memory=driver_quota)\n z = ray.put(\"hi\", weakref=True)\n a = LightActor._remote(object_store_memory=a_quota)\n b = GreedyActor._remote(object_store_memory=b_quota)\n oids = [z]\n for _ in range(5):\n r_a = a.sample.remote()\n for _ in range(20):\n new_oid = b.sample.remote()\n oids.append(new_oid)\n ray.get(new_oid)\n oids.append(r_a)\n ray.get(r_a)\n ray.get(z)\n except Exception as e:\n print(\"Raised exception\", type(e), e)\n raise e\n finally:\n print(ray.worker.global_worker.core_worker.\n dump_object_store_memory_usage())\n ray.shutdown()\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n sys.exit(pytest.main([\"-v\", __file__]))\n" ]
[ [ "numpy.zeros", "pandas.DataFrame" ], [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Jjschwartz/rltorch
[ "eeb2ad955f018d768db98c4a2be5da96a75579f6", "eeb2ad955f018d768db98c4a2be5da96a75579f6" ]
[ "rltorch/utils/plot_utils.py", "rltorch/algs/policy_gradients/REINFORCE/buffer.py" ]
[ "import numpy as np\n\n\ndef one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1.,\n low_counts_threshold=1e-8):\n \"\"\"From openai.baselines.common.plot_util.py\n\n perform one-sided (causal) EMA (exponential moving average)\n smoothing and resampling to an even grid with n points.\n Does not do extrapolation, so we assume\n xolds[0] <= low && high <= xolds[-1]\n\n Arguments\n ---------\n xolds : array or list\n x values of data. Needs to be sorted in ascending order\n yolds : array of list\n y values of data. Has to have the same length as xolds\n low : float\n min value of the new x grid. By default equals to xolds[0]\n high : float\n max value of the new x grid. By default equals to xolds[-1]\n n : int\n number of points in new x grid\n decay_steps : float\n EMA decay factor, expressed in new x grid steps.\n low_counts_threshold: float or int\n y values with counts less than this value will be set to NaN\n\n Returns\n -------\n xs : array\n with new x grid\n ys : array\n of EMA of y at each point of the new x grid\n count_ys : array\n of EMA of y counts at each point of the new x grid\n \"\"\"\n\n low = xolds[0] if low is None else low\n high = xolds[-1] if high is None else high\n\n assert xolds[0] <= low, \\\n f'low={low} < xolds[0]={xolds[0]} - extrapolation not permitted!'\n assert xolds[-1] >= high, \\\n f'high={high} > xolds[-1]={xolds[-1]} - extrapolation not permitted!'\n assert len(xolds) == len(yolds), \\\n f'len of xolds ({len(xolds)}) and yolds ({len(yolds)}) do not match!'\n\n xolds = xolds.astype('float64')\n yolds = yolds.astype('float64')\n\n luoi = 0 # last unused old index\n sum_y = 0.\n count_y = 0.\n xnews = np.linspace(low, high, n)\n decay_period = (high - low) / (n - 1) * decay_steps\n interstep_decay = np.exp(- 1. / decay_steps)\n sum_ys = np.zeros_like(xnews)\n count_ys = np.zeros_like(xnews)\n for i in range(n):\n xnew = xnews[i]\n sum_y *= interstep_decay\n count_y *= interstep_decay\n while True:\n if luoi >= len(xolds):\n break\n xold = xolds[luoi]\n if xold <= xnew:\n decay = np.exp(- (xnew - xold) / decay_period)\n sum_y += decay * yolds[luoi]\n count_y += decay\n luoi += 1\n else:\n break\n sum_ys[i] = sum_y\n count_ys[i] = count_y\n\n ys = sum_ys / count_ys\n ys[count_ys < low_counts_threshold] = np.nan\n\n return xnews, ys, count_ys\n\n\ndef symmetric_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1.,\n low_counts_threshold=1e-8):\n \"\"\"From openai.baselines.common.plot_util.py\n\n perform symmetric EMA (exponential moving average)\n smoothing and resampling to an even grid with n points.\n Does not do extrapolation, so we assume\n xolds[0] <= low && high <= xolds[-1]\n\n Arguments\n ---------\n xolds : array or list\n x values of data. Needs to be sorted in ascending order\n yolds : array of list\n y values of data. Has to have the same length as xolds\n low : float\n min value of the new x grid. By default equals to xolds[0]\n high : float\n max value of the new x grid. By default equals to xolds[-1]\n n : int\n number of points in new x grid\n decay_steps : float\n EMA decay factor, expressed in new x grid steps.\n low_counts_threshold: float or int\n y values with counts less than this value will be set to NaN\n\n Returns\n -------\n xs : array\n with new x grid\n ys : array\n of EMA of y at each point of the new x grid\n count_ys : array\n of EMA of y counts at each point of the new x grid\n \"\"\"\n low = xolds[0] if low is None else low\n high = xolds[-1] if high is None else high\n\n xs, ys1, count_ys1 = one_sided_ema(\n xolds, yolds, low, high, n, decay_steps, low_counts_threshold=0\n )\n _, ys2, count_ys2 = one_sided_ema(\n -xolds[::-1], yolds[::-1], -high, -low, n, decay_steps,\n low_counts_threshold=0\n )\n ys2 = ys2[::-1]\n count_ys2 = count_ys2[::-1]\n count_ys = count_ys1 + count_ys2\n ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ys\n ys[count_ys < low_counts_threshold] = np.nan\n return xs, ys, count_ys\n\n\ndef plot_xy(ax, xs, ys, label=None):\n \"\"\"Create a line plot on given axis, with stderr included\n\n Will plot an errorbar plot if len(xs) == 1\n\n Parameters\n ----------\n ax : Matplotlib.pyplot.axis\n axis to plot on\n xs : array\n of x-axis values\n ys : array\n of y-axis values\n label : str, optional\n a label for the line (default=None)\n \"\"\"\n print(f\"Plotting {label}\")\n try:\n if len(ys[0]):\n # list of lists\n y_mean = np.mean(ys, axis=0)\n y_std = np.std(ys, axis=0)\n except Exception:\n y_mean = ys\n y_std = 0\n\n if len(xs) > 1:\n ax.plot(xs, y_mean, label=label)\n ax.fill_between(xs, y_mean-y_std, y_mean+y_std, alpha=0.25)\n else:\n ax.errorbar(\n xs, y_mean, yerr=(y_mean-y_std, y_mean+y_std),\n fmt='o', label=label, capsize=10\n )\n", "import numpy as np\nimport scipy.signal\n\nimport torch\n\n\ndef discount_cumsum(x, discount):\n return scipy.signal.lfilter(\n [1], [1, float(-discount)], x[::-1], axis=0)[::-1]\n\n\nclass Buffer:\n\n def __init__(self, capacity, obs_dim, gamma=0.99, device=\"cpu\"):\n self.capacity = capacity\n self.device = device\n self.o_buf = np.zeros((capacity, *obs_dim), dtype=np.float32)\n self.a_buf = np.zeros((capacity, ), dtype=np.float32)\n self.rew_buf = np.zeros(capacity, dtype=np.float32)\n self.ret_buf = np.zeros(capacity, dtype=np.float32)\n self.logp_buf = np.zeros(capacity, dtype=np.float32)\n self.gamma = gamma\n self.ptr, self.path_start_idx = 0, 0\n\n def store(self, o, a, r, logp):\n assert self.ptr < self.capacity\n self.o_buf[self.ptr] = o\n self.a_buf[self.ptr] = a\n self.rew_buf[self.ptr] = r\n self.logp_buf[self.ptr] = logp\n self.ptr += 1\n\n def finish_path(self, last_val=0):\n \"\"\"Call this at end of trajectory \"\"\"\n path_slice = slice(self.path_start_idx, self.ptr)\n rews = np.append(self.rew_buf[path_slice], last_val)\n\n # Reward-to-go targets\n self.ret_buf[path_slice] = discount_cumsum(rews, self.gamma)[:-1]\n self.path_start_idx = self.ptr\n\n def get(self):\n \"\"\"Get all trajectories currently stored\"\"\"\n assert self.ptr == self.capacity\n self.ptr, self.path_start_idx = 0, 0\n\n data = [self.o_buf,\n self.a_buf,\n self.ret_buf,\n self.logp_buf]\n return [torch.from_numpy(v).to(self.device) for v in data]\n" ]
[ [ "numpy.linspace", "numpy.std", "numpy.zeros_like", "numpy.mean", "numpy.exp" ], [ "numpy.append", "numpy.zeros", "torch.from_numpy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fourjr/advent-of-code
[ "5f7a34be4d81957c3e8ca504e095786c823042ba" ]
[ "2020/Day 20/part2.py" ]
[ "import copy\nimport math\nimport re\nfrom collections import defaultdict\nfrom enum import Enum\nfrom typing import DefaultDict, Iterable, List, Set\n\nimport numpy as np\n\nfrom aoc import get_input_as, submit\n\n\ninp = get_input_as(str, sep='\\n\\n')\n\n\nclass FlipVariant(Enum):\n ORIGINAL = 0b0000\n VERTICAL = 0b0100\n HORIZONTAL = 0b1000\n\n\nclass RotateVariant(Enum):\n ORIGINAL = 0b0000\n D090 = 0b0001\n D180 = 0b0010\n D270 = 0b0011\n\n\nclass TileVariant:\n def __init__(self, variant, data, tile):\n self.variant = variant\n self.pixels: List[List[str]] = data # 2D NP ARRAY\n self.tile = tile\n self.id = f'{tile.id}.{variant}'\n\n self.top_edge = ''.join(self.pixels[0])\n self.bottom_edge = ''.join(self.pixels[len(self.pixels) - 1])\n self.left_edge = ''.join(self.pixels.transpose()[0])\n self.right_edge = ''.join(self.pixels.transpose()[-1])\n\n def remove_borders(self):\n self.pixels = self.pixels[1:-1,1:-1]\n\n def rotate90(self, k=1):\n flip_bits = self.variant >> 2\n rotate_bits = 0b0011 & self.variant\n rotate_bits += 1\n variant = (flip_bits << 2) | rotate_bits\n return TileVariant(variant, np.rot90(self.pixels, k), self.tile)\n\n def __repr__(self) -> str:\n return f'<TileVariant id={self.id} variant={bin(self.variant)} tile={self.tile.id}>'\n\n def __hash__(self) -> int:\n return hash(self.id)\n\n def __eq__(self, o: object) -> bool:\n if isinstance(o, TileVariant):\n return self.id == o.id\n return super().__eq__(o)\n\n\nclass Tile:\n def __init__(self, data):\n \"\"\"data: str\"\"\"\n self.state = None\n self.original = None\n self.next = None\n self.prev = None\n\n data = data.splitlines()\n header = data.pop(0)\n self.id = int(header[5:-1])\n self.pixels = np.array([list(x) for x in data])\n self.size = len(self.pixels[0])\n\n self.variants = {}\n for flip_v in FlipVariant:\n var = self.flip(flip_v)\n for rotate_v in RotateVariant:\n if rotate_v != RotateVariant.ORIGINAL:\n var = var.rotate90()\n self.variants[var.variant] = var\n\n def flip(self, variant):\n if variant == FlipVariant.VERTICAL:\n new_data = np.flipud(self.pixels)\n elif variant == FlipVariant.HORIZONTAL:\n new_data = np.fliplr(self.pixels)\n else:\n new_data = self.pixels\n\n return TileVariant(variant.value, new_data, self)\n\n def __repr__(self) -> str:\n return f'<Tile id={self.id} pixels={len(self.pixels)}>'\n\n def __eq__(self, o: object) -> bool:\n if isinstance(o, Tile):\n return self.id == o.id\n return super().__eq__(o)\n\n def __hash__(self) -> int:\n return hash(self.id)\n\n\nclass CameraArray:\n def __init__(self, data) -> None:\n self.head = None\n self.tail = None\n\n self.tiles = []\n prev = None\n for i in data:\n tile = Tile(i)\n self.tiles.append(tile)\n tile.prev = prev\n\n if prev is not None:\n prev.next = tile\n\n prev = tile\n\n self.head = self.tiles[0]\n self.tail = self.tiles[-1]\n\n self.size = int(math.sqrt(len(self.tiles)))\n self.all_variants: Set[TileVariant] = {x for t in self.tiles for x in t.variants.values()}\n\n def coherence_check(self) -> bool:\n tiles = list(self)\n for index, tile in enumerate(tiles):\n check_top = index - 10\n check_left = index - 1\n if index % self.size == 0:\n # extreme left\n check_left = None\n pass\n if check_top < 0:\n # top row\n check_top = None\n\n # Checking\n conditions = (\n check_left is None or tile.left_edge == self.tiles[check_left].right_edge,\n check_top is None or tile.top_edge == self.tiles[check_top].top_edge,\n )\n\n if not all(conditions):\n return False\n\n return True\n\n def __iter__(self) -> Iterable[Tile]:\n curr = self.head\n yield curr\n\n while curr.next:\n curr = curr.next\n if curr is self.tail:\n yield curr\n return\n\n yield curr\n\n def get_top_left_candidates(self):\n right_cands: DefaultDict[List[TileVariant]] = defaultdict(list)\n for v1 in self.all_variants:\n for v2 in self.all_variants:\n # Check for right edge match\n if v1.tile != v2.tile:\n if v1.right_edge == v2.left_edge:\n right_cands[v1].append(v2)\n\n maps = []\n tiles = []\n confirmed = [False] * len(self.tiles)\n confirmed[0] = True\n confirmed[1] = True\n confirmed[self.size] = True\n for t in self.tiles:\n tiles.append(t.variants[0b0])\n\n for v1 in right_cands.keys():\n tiles[0] = v1\n\n for v2 in self.all_variants:\n if v1.bottom_edge == v2.top_edge and v2.tile != v1.tile:\n tiles[self.size] = v2\n for r in right_cands[v1]:\n if r.tile != v2.tile:\n tiles[1] = r\n maps.append(MapVariant(tiles, confirmed, self, 0))\n\n return maps\n\n def get_answer(self):\n left_candidates = self.get_top_left_candidates()\n answers = self.call_get_next(left_candidates)\n assert len(answers) == 1\n return list(answers)[0]\n\n def call_get_next(self, possibilites):\n if not possibilites:\n raise StopIteration\n\n all_possibilities = set()\n for i in possibilites:\n all_possibilities |= i.find_next()\n\n if len(all_possibilities) == 1:\n return all_possibilities\n\n try:\n return self.call_get_next(all_possibilities)\n except StopIteration:\n return all_possibilities\n\n\nclass MapVariant:\n def __init__(self, new_map: List[TileVariant], confirmed: List[bool], original: CameraArray, curr_index: int):\n self.map = copy.copy(new_map)\n self.confirmed = confirmed[:]\n self.curr_index = curr_index\n self.original = original\n self.size = original.size\n\n ids = []\n for n, i in enumerate(self.map):\n if self.confirmed[n]:\n ids.append(i.id)\n else:\n ids.append('')\n\n self.id = '-'.join(ids)\n self.short_id = self.map[0].tile.id * self.map[self.size - 1].tile.id * self.map[self.size * (self.size - 1)].tile.id * self.map[-1].tile.id\n\n @property\n def used_tiles(self) -> List[Tile]:\n used_tiles = set()\n for n, i in enumerate(self.confirmed):\n if i:\n used_tiles.add(self.map[n].tile)\n\n return used_tiles\n\n def find_next(self):\n self.curr_index += 1\n if self.curr_index >= len(self.map):\n raise StopIteration\n\n index = self.curr_index\n curr = self.map[self.curr_index]\n\n check_bottom = index + self.size\n check_right = index + 1\n\n # special cases\n if (index + 1) % self.size != 0 and self.confirmed[check_right]:\n # force a check right\n if curr.right_edge != self.map[check_right].left_edge:\n return []\n\n if check_bottom < len(self.map) and self.confirmed[check_bottom]:\n if curr.bottom_edge != self.map[check_bottom].top_edge:\n return []\n\n # back to normal\n if (index + 1) % self.size == 0 or self.confirmed[check_right]:\n # extreme right\n check_right = None\n\n if check_bottom >= len(self.map) or self.confirmed[check_bottom]:\n # top row\n check_bottom = None\n\n right_cands = []\n if check_right:\n for v in self.original.all_variants:\n if curr.right_edge == v.left_edge and v.tile not in self.used_tiles:\n right_cands.append(v)\n\n newmaps = set()\n\n if check_bottom:\n for v in self.original.all_variants:\n if v.tile in self.used_tiles:\n continue\n\n if check_right:\n self.confirmed[check_right] = True\n for r in right_cands:\n self.map[check_right] = r\n if curr.bottom_edge == v.top_edge and r.tile != v.tile:\n self.confirmed[check_bottom] = True\n self.map[check_bottom] = v\n\n newmaps.add(MapVariant(self.map, self.confirmed, self.original, self.curr_index))\n self.confirmed[check_right] = False\n else:\n if curr.bottom_edge == v.top_edge:\n self.confirmed[check_bottom] = True\n self.map[check_bottom] = v\n newmaps.add(MapVariant(self.map, self.confirmed, self.original, self.curr_index))\n\n else:\n for r in right_cands:\n self.confirmed[check_right] = True\n self.map[check_right] = r\n newmaps.add(MapVariant(self.map, self.confirmed, self.original, self.curr_index))\n\n if not any((check_bottom, check_right)):\n newmaps.add(self)\n\n return newmaps\n\n def __hash__(self) -> int:\n return self.short_id\n\n def __eq__(self, o: object) -> bool:\n if isinstance(o, MapVariant):\n return self.short_id == o.short_id\n return super().__eq__(o)\n\n def __repr__(self) -> str:\n return f'<MapVariant id={self.id} short_id={self.short_id}>'\n\n def __str__(self):\n size = self.size\n fmt = ''\n rows = len(self.map[0].pixels)\n\n for tr in range(size):\n for r in range(rows):\n for m in range(size):\n fmt += ''.join(self.map[tr * size + m].pixels[r])\n fmt += '\\n'\n\n return fmt.strip()\n\n def remove_borders(self):\n for i in self.map:\n i.remove_borders()\n\n\ndef raw_to_xy(size, num):\n return num % size, num // size\n\n\ncam = CameraArray(inp)\nanswer = cam.get_answer()\nanswer.remove_borders()\n\nparsed_data = [list(x) for x in str(answer).splitlines()]\nparsed_data = np.array(parsed_data)\nsize = len(parsed_data)\n\ncounts = set()\n\nfor curr_data in (parsed_data, np.flipud(parsed_data), np.fliplr(parsed_data)):\n for rot in range(4):\n curr_data = np.rot90(curr_data)\n\n str_data = ''.join(''.join(x) for x in curr_data)\n matches = re.finditer(r'#....##....##....###', str_data)\n count = 0\n for m in matches:\n head, tail = m.span(0)\n headx, hy = raw_to_xy(size, head)\n tailx, ty = raw_to_xy(size, tail)\n if hy != ty or (hy + 1) >= size or (hy - 1) < 0:\n continue\n\n count += 1\n\n if count > 0:\n counts.add(str_data.count('#') - 15 * count)\n curr_data = np.rot90(curr_data)\n\nsubmit(min(counts))\n" ]
[ [ "numpy.flipud", "numpy.rot90", "numpy.fliplr", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
530824679/KPROI
[ "f485c83bb6401156653e1c60af92df5427aa1296" ]
[ "train.py" ]
[ "# -*- coding: utf-8 -*-\n# --------------------------------------\n# @Time : 2021/10/30 下午12:54\n# @Author : Oscar Chen\n# @Email : [email protected]\n# @File : train.py\n# @Software: PyCharm\n# Description : None\n# --------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport yaml # yaml配置模块\nimport torch\nimport logging # 日志模块\nimport argparse\nimport os.path as osp\nfrom tqdm import tqdm\nfrom pathlib import Path # 路径操作模块\nfrom torchvision import transforms\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom valid import validate\nfrom dataset.dataloader import create_train_dataloader, create_val_dataloader\nfrom utils.misc import AverageMeter, ProgressMeter\nfrom loss.loss import KeyPointsMSELoss\nfrom loss.optimizer import create_optimizer, create_lr_scheduler\nfrom models.kpnet import build_model\nfrom models.basenet import parameters_num\nfrom utils.general import to_python_float, get_latest_run, check_file, colorstr, increment_dir\nfrom utils.evaluate import calc_acc\nfrom utils.torch_utils import select_device, save_checkpoint, get_saved_state\nfrom utils.visualize import *\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef train(train_dataloader, model, optimizer, lr_scheduler, epoch, hyp, device, logger, tb_writer):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n accuracy = AverageMeter('Accuracy', ':.4e')\n\n progress = ProgressMeter(len(train_dataloader), [batch_time, data_time, losses, accuracy], prefix=\"Train - Epoch: [{}/{}]\".format(epoch, hyp['end_epoch']))\n\n criterion = KeyPointsMSELoss(hyp['use_target_weight'])\n num_iters_per_epoch = len(train_dataloader)\n\n # switch to train mode\n model.train()\n start_time = time.time()\n for batch_idx, batch_data in enumerate(tqdm(train_dataloader)):\n data_time.update(time.time() - start_time)\n inputs, targets, target_weight = batch_data\n global_step = num_iters_per_epoch * epoch + batch_idx + 1\n\n batch_size = inputs.size(0)\n targets = targets.to(device, non_blocking=True)\n target_weight = target_weight.to(device, non_blocking=True)\n #for k in targets.keys():\n # targets[k] = targets[k].to(device, non_blocking=True)\n # target_weight[k] = target_weight[k].cuda(non_blocking=True)\n inputs = inputs.to(device, non_blocking=True).float()\n outputs = model(inputs)\n\n # compute loss\n total_loss = criterion(outputs, targets, target_weight)\n\n # compute gradient and perform backpropagation\n total_loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n # measure accuracy and record loss\n reduced_loss = total_loss.data\n losses.update(to_python_float(reduced_loss), batch_size)\n\n _, avg_acc, cnt, pred = calc_acc(outputs.detach().cpu().numpy(), targets.detach().cpu().numpy())\n accuracy.update(avg_acc, cnt)\n\n # measure elapsed time\n # torch.cuda.synchronize()\n batch_time.update(time.time() - start_time)\n\n #if tb_writer is not None:\n # if (global_step % hyp['ckpt_freq']) == 0:\n # tb_writer.add_scalars('Train', total_loss, global_step)\n\n if batch_idx % hyp['print_freq'] == 0:\n msg = 'Epoch: [{0}][{1}/{2}]\\t' \\\n 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\\t' \\\n 'Speed {speed:.1f} samples/s\\t' \\\n 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\\t' \\\n 'Loss {loss.val:.5f} ({loss.avg:.5f})\\t' \\\n 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(\n epoch, batch_idx, num_iters_per_epoch, batch_time=batch_time,\n speed=inputs.size(0)/batch_time.val,\n data_time=data_time, loss=losses, acc=accuracy)\n print(msg)\n\n # Log message\n if logger is not None:\n if (global_step % hyp['ckpt_freq']) == 0:\n logger.info(progress.get_message(batch_idx))\n\n start_time = time.time()\n\ndef main(hyp, device, tb_writer=None):\n # create model\n if torch.cuda.is_available():\n torch.cuda.set_device(device)\n model = build_model(hyp['pretrained'], hyp['num_keypoints'], is_train=False)\n\n model.load_state_dict(torch.load(\"./saved_weights/Epoch20_Epoch1_kitti.pth\"))\n\n model = model.to(device)\n\n num_parameters = parameters_num(model)\n logger.info('number of trained parameters of the model: {}'.format(num_parameters))\n\n if hyp['resume']:\n checkpoints = hyp['resume'] if isinstance(hyp['resume'], str) else get_latest_run()\n assert os.path.isfile(checkpoints), 'ERROR: --resume checkpoint does not exist'\n model.load_state_dict(torch.load(checkpoints))\n if logger is not None:\n logger.info('Resuming training from %s' % checkpoints)\n\n # create optimizer\n optimizer = create_optimizer(hyp, model)\n lr_scheduler = create_lr_scheduler(hyp, optimizer)\n\n # Create dataloader\n logger.info(\">>> Loading dataset & getting dataloader...\")\n train_dataloader, train_sampler = create_train_dataloader(hyp)\n if logger is not None:\n logger.info('number of batches in training set: {}'.format(len(train_dataloader)))\n\n for epoch in range(hyp['start_epoch'], hyp['end_epoch']):\n lr_scheduler.step()\n if logger is not None:\n logger.info('>>> Epoch: [{}/{}]'.format(epoch, hyp['end_epoch']))\n\n # train for one epoch\n train(train_dataloader, model, optimizer, lr_scheduler, epoch, hyp, device, logger, tb_writer)\n if (epoch % hyp['ckpt_freq'] == 0):\n val_dataloader = create_val_dataloader(hyp)\n print('number of batches in val_dataloader: {}'.format(len(val_dataloader)))\n val_loss = validate(val_dataloader, model, device, hyp)\n print('val_loss: {:.4e}'.format(val_loss))\n if tb_writer is not None:\n tb_writer.add_scalar('Val_loss', val_loss, epoch)\n\n # Save checkpoint\n if ((epoch % hyp['ckpt_freq']) == 0):\n model_state_dict, utils_state_dict = get_saved_state(model, optimizer, lr_scheduler, epoch, hyp)\n save_checkpoint(hyp['ckpt_dir'], '3DOD', model_state_dict, utils_state_dict, epoch)\n\n lr_scheduler.step()\n if tb_writer is not None:\n tb_writer.add_scalar('LR', lr_scheduler.get_lr()[0], epoch)\n\n if tb_writer is not None:\n tb_writer.close()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Train keypoints network')\n parser.add_argument('--config', type=str, default='./config/hyp.yaml', help='the path of the pretrained weights')\n parser.add_argument('--device', type=str, default='cpu', help='number of select device')\n opt = parser.parse_args()\n\n hyp = check_file(opt.config)\n assert len(hyp), '--hyp file must be specified'\n\n # 载入初始超参\n with open(hyp, encoding='UTF-8') as f:\n hyp = yaml.load(f, Loader=yaml.SafeLoader)\n\n device = select_device(opt.device, batch_size=hyp['batch_size'])\n\n # Logger\n logger.info(opt)\n prefix = colorstr('tensorboard: ')\n logger.info('Start Tensorboard with \"tensorboard --logdir %s\", view at http://localhost:6006/' % hyp['logs_dir'])\n\n # Tensorboard\n tb_writer = None\n hyp['logs_dir'] = increment_dir(Path(hyp['logs_dir']) / 'exp')\n tb_writer = SummaryWriter(log_dir=hyp['logs_dir'])\n\n # 调用train()函数,开始训练\n main(hyp, device, tb_writer)" ]
[ [ "torch.cuda.set_device", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
davidcoelho89/MLTool_python
[ "d8293ac73650028d9c8d8c101bfc3cb28a2ebb9f", "d8293ac73650028d9c8d8c101bfc3cb28a2ebb9f" ]
[ "scripts_patternRecognition/lssvm_classifier.py", "scripts_patternRecognition/data_loader.py" ]
[ "import numpy as np\nfrom numpy import dot, exp\nfrom scipy.spatial.distance import cdist\n\nclass LSSVM:\n 'Class that implements the Least-Squares Support Vector Machine.'\n \n def __init__(self, gamma=1, kernel='rbf', **kernel_params): \n \n self.gamma = gamma\n \n self.x = None\n self.y = None\n self.y_labels = None\n \n # model params\n self.alpha = None\n self.b = None\n \n self.kernel = LSSVM.get_kernel(kernel, **kernel_params)\n \n \n @staticmethod\n def get_kernel(name, **params):\n \n def linear(x_i, x_j): \n return dot(x_i, x_j.T)\n \n def poly(x_i, x_j, d=params.get('d',3)): \n return ( dot(x_i, x_j.T) + 1 )**d\n \n def rbf(x_i, x_j, sigma=params.get('sigma',1)):\n if x_i.ndim==x_i.ndim and x_i.ndim==2: # both matrices\n return exp( -cdist(x_i,x_j)**2 / sigma**2 )\n \n else: # both vectors or a vector and a matrix\n return exp( -( dot(x_i,x_i.T) + dot(x_j,x_j.T)- 2*dot(x_i,x_j) ) / sigma**2 )\n \n kernels = {'linear': linear, 'poly': poly, 'rbf': rbf}\n \n if kernels.get(name) is None: \n raise KeyError(\"Kernel '{}' is not defined, try one in the list: {}.\".format(\n name, list(kernels.keys())))\n else: return kernels[name]\n \n \n def opt_params(self, X, y_values):\n \n sigma = np.multiply( y_values*y_values.T, self.kernel(X,X) )\n\n A_cross = np.linalg.pinv(np.block([\n [0, y_values.T ],\n [y_values, sigma + self.gamma**-1 * np.eye(len(y_values))]\n ]))\n\n B = np.array([0]+[1]*len(y_values))\n\n solution = dot(A_cross, B)\n b = solution[0]\n alpha = solution[1:]\n \n return (b, alpha)\n \n \n def fit(self, X, Y, verboses=0):\n \n self.x = X\n self.y = Y\n self.y_labels = np.unique(Y, axis=0)\n\n # binary classification\n if len(self.y_labels)==2: \n\t\t\t# converting to -1/+1\n y_values = np.where(\n (Y == self.y_labels[0]).all(axis=1)\n ,-1,+1)[:,np.newaxis] # making it a column vector\n \n self.b, self.alpha = self.opt_params(X, y_values)\n \n else: # multiclass classification (ONE-VS-ALL Approach)\n n_classes = len(self.y_labels)\n self.b = np.zeros(n_classes)\n self.alpha = np.zeros((n_classes, len(Y)))\n for i in range(n_classes):\n # converting to +1 for the desired class and -1 for all other classes\n y_values = np.where(\n (Y == self.y_labels[i]).all(axis=1)\n ,+1,-1)[:,np.newaxis] # making it a column vector\n \n self.b[i], self.alpha[i] = self.opt_params(X, y_values)\n \n def predict(self, X):\n \n\t\t# X = torch.from_numpy(X).to(self.device)\n K = self.kernel(self.x, X)\n \n\t\t# binary classification\n if len(self.y_labels)==2:\n y_values = np.where(\n (self.y == self.y_labels[0]).all(axis=1),\n -1,+1)[:,np.newaxis] # making it a column vector\n\n Y = np.sign( dot( np.multiply(self.alpha, y_values.flatten()), K ) + self.b)\n \n y_pred_labels = np.where(Y==-1, self.y_labels[0], self.y_labels[1])\n \n\t\t# multiclass classification (ONE-VS-ALL Approach)\n else:\n Y = np.zeros((len(self.y_labels), len(X)))\n for i in range(len(self.y_labels)):\n y_values = np.where(\n (self.y == self.y_labels[i]).all(axis=1),\n +1, -1)[:,np.newaxis] # making it a column vector\n Y[i] = dot( np.multiply(self.alpha[i], y_values.flatten()), K ) + self.b[i] # no sign function applied\n \n predictions = np.argmax(Y, axis=0)\n y_pred_labels = np.array([self.y_labels[i] for i in predictions])\n \n return y_pred_labels\n \n######################################################################################################################\n\nimport torch\n\nclass LSSVM_GPU:\n 'Class that implements the Least-Squares Support Vector Machine on GPU.'\n \n def __init__(self, gamma=1, kernel='rbf', **kernel_params):\n\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n \n\t\t# Model hyperparameters \n self.gamma = gamma\n \n\t\t# Data used to build model\n self.x = None\n self.y = None\n self.y_labels = None\n \n # Model parameters\n self.alpha = None\n self.b = None\n \n self.kernel = LSSVM_GPU.get_kernel(kernel, **kernel_params) # saving kernel function\n \n \n @staticmethod\n def get_kernel(name, **params):\n \n def linear(x_i, x_j): \n return torch.mm(x_i, torch.t(x_j))\n \n def poly(x_i, x_j, d=params.get('d',3)): \n return ( torch.mm(x_i, torch.t(x_j)) + 1 )**d\n \n def rbf(x_i, x_j, sigma=params.get('sigma',1)):\n\t\t\t# both matrices\n if x_i.ndim==x_i.ndim and x_i.ndim==2:\n return torch.exp( -torch.cdist(x_i,x_j)**2 / sigma**2 )\n # both vectors or a vector and a matrix\n else:\n return torch.exp( -( torch.dot(x_i,torch.t(x_i)) + torch.dot(x_j,torch.t(x_j))- 2*torch.dot(x_i,x_j) ) / sigma**2 )\n \n kernels = {'linear': linear, 'poly': poly, 'rbf': rbf}\n \n if kernels.get(name) is None: \n raise KeyError(\"Kernel '{}' is not defined, try one in the list: {}.\".format(name, list(kernels.keys())))\n else: return kernels[name]\n \n \n def opt_params(self, X, y_values):\n sigma = ( torch.mm(y_values, torch.t(y_values)) ) * self.kernel(X,X)\n\n A_cross = torch.pinverse(torch.cat(( \n # block matrix\n torch.cat(( torch.tensor(0, dtype=X.dtype, device=self.device).view(1,1),\n torch.t(y_values)\n ),dim=1),\n torch.cat(( y_values, \n sigma + self.gamma**-1 * torch.eye(len(y_values), dtype=X.dtype, device=self.device) \n ),dim=1)\n ),dim=0))\n\n B = torch.tensor([0]+[1]*len(y_values), dtype=X.dtype, device=self.device).view(-1,1)\n\n solution = torch.mm(A_cross, B)\n b = solution[0]\n alpha = solution[1:].view(-1) # 1D array form\n \n return (b, alpha)\n \n \n def fit(self, X, Y, verboses=0):\n # converting to tensors and passing to GPU\n X = torch.from_numpy(X).to(self.device)\n Y = torch.from_numpy(Y).to(self.device)\n self.x = X\n self.y = Y\n self.y_labels = torch.unique(Y, dim=0)\n \n if len(self.y_labels)==2: # binary classification\n # converting to -1/+1\n y_values = torch.where(\n (Y == self.y_labels[0]).all(axis=1)\n ,torch.tensor(-1, dtype=X.dtype, device=self.device)\n ,torch.tensor(+1, dtype=X.dtype, device=self.device)\n ).view(-1,1) # making it a column vector\n \n self.b, self.alpha = self.opt_params(X, y_values)\n \n else: # multiclass classification\n # ONE-VS-ALL APPROACH\n n_classes = len(self.y_labels)\n self.b = torch.empty(n_classes, dtype=X.dtype, device=self.device)\n self.alpha = torch.empty(n_classes, len(Y), dtype=X.dtype, device=self.device)\n for i in range(n_classes):\n # converting to +1 for the desired class and -1 for all other classes\n y_values = torch.where(\n (Y == self.y_labels[i]).all(axis=1)\n ,torch.tensor(+1, dtype=X.dtype, device=self.device)\n ,torch.tensor(-1, dtype=X.dtype, device=self.device)\n ).view(-1,1) # making it a column vector\n \n self.b[i], self.alpha[i] = self.opt_params(X, y_values)\n\n \n def predict(self, X):\n X = torch.from_numpy(X).to(self.device)\n K = self.kernel(self.x, X)\n \n if len(self.y_labels)==2: # binary classification\n y_values = torch.where(\n (self.y == self.y_labels[0]).all(axis=1)\n ,torch.tensor(-1, dtype=X.dtype, device=self.device)\n ,torch.tensor(+1, dtype=X.dtype, device=self.device)\n )\n \n Y = torch.sign( torch.mm( (self.alpha*y_values).view(1,-1), K ) + self.b)\n \n y_pred_labels = torch.where(Y==-1, self.y_labels[0],\n self.y_labels[1]\n ).view(-1) # convert to flat array\n \n else: # multiclass classification, ONE-VS-ALL APPROACH\n Y = torch.empty((len(self.y_labels), len(X)), dtype=X.dtype, device=self.device)\n for i in range(len(self.y_labels)):\n y_values = torch.where(\n (self.y == self.y_labels[i]).all(axis=1)\n ,torch.tensor(+1, dtype=X.dtype, device=self.device)\n ,torch.tensor(-1, dtype=X.dtype, device=self.device)\n )\n\n Y[i] = torch.mm( (self.alpha[i]*y_values).view(1,-1), K ) + self.b[i] # no sign function applied\n \n predictions = torch.argmax(Y, axis=0)\n y_pred_labels = torch.stack([self.y_labels[i] for i in predictions])\n \n return y_pred_labels", "# -*- coding: utf-8 -*-\n\"\"\"\nData Functions Module\n\n\"\"\"\n\nimport os # Get files' paths\nimport numpy as np # Work with matrices (arrays)\nimport pandas as pd # Load csv files\nimport scipy.stats as ss # interquartile attribute\n\ndef load_ecg_audio():\n \n # Absolute Path\n audioFile_path = os.path.realpath('datasets/audio.txt')\n ecgFile_path = os.path.realpath('datasets/ecg.txt')\n\n # Get files as dataframes\n audio = pd.read_fwf(audioFile_path, header=None)\n ecg = pd.read_fwf(ecgFile_path, header=None)\n\n # 100 signals of 500 samples\n signals = pd.concat([audio.T,ecg.T])\n \n # Calculate Attributes\n minimum = signals.min(axis = 1) \n maximum = signals.max(axis = 1)\n med = signals.mean(axis = 1)\n var = signals.var(axis = 1)\n std = signals.std(axis = 1)\n ske = signals.skew(axis = 1)\n kur = signals.kurtosis(axis = 1)\n iqr = signals.apply(lambda x: ss.iqr(x), axis=1)\n \n # Attributes and label Matrices\n X = np.array([minimum,maximum,med,var,std,ske,kur,iqr])\n Y = np.concatenate( (np.ones((1,50)),2*np.ones((1,50))) , axis=1 )\n \n return X.T,Y.T\n\ndef load_iris():\n \n # Absolute Path\n iris_path = os.path.realpath('datasets/iris.data.csv')\n df = pd.read_csv(iris_path) # load data in a dataframe\n \n # Attributes\n X = np.array(df.drop('classe',1)).T\n \n # Classes\n aux = pd.get_dummies(df['classe'])\n Y = np.array(aux.values)\n Y = np.argmax(Y,axis = 1) + 1\n N = len(Y)\n Y = np.multiply(np.ones((1,N)),Y)\n \n return X.T,Y.T\n \ndef load_wine():\n \n # Absolute Path\n wine_path = os.path.realpath('datasets/wine.csv')\n df = pd.read_csv(wine_path,header=None) # load data in a dataframe\n \n X = np.array(df.drop(df.columns[0], axis=1)).T\n Y = np.array(df.iloc[:,0]).T\n N = len(Y)\n Y = np.multiply(np.ones((1,N)),Y)\n \n return X.T,Y.T" ]
[ [ "numpy.dot", "torch.cdist", "torch.unique", "torch.cuda.is_available", "torch.where", "torch.t", "numpy.where", "torch.mm", "numpy.unique", "torch.from_numpy", "torch.tensor", "numpy.argmax", "torch.dot", "numpy.zeros", "torch.empty", "scipy.spatial.distance.cdist", "torch.stack", "numpy.array", "torch.argmax" ], [ "pandas.concat", "pandas.read_csv", "pandas.read_fwf", "numpy.ones", "scipy.stats.iqr", "numpy.argmax", "numpy.array", "pandas.get_dummies" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [ "1.6", "1.10", "1.4", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "1.3", "1.8" ], "tensorflow": [] } ]
chongxi/vispy
[ "fa5e2eab9bb3d956f87ae68a56e342913e58a305", "3683ea1f58e43b4aa1b32a3e69656bead8a31e99", "cc17f94b0e60c7845320017e60b355245f8bafff" ]
[ "examples/basics/plotting/mpl_plot.py", "examples/demo/scene/probe_view.py", "examples/basics/visuals/dynamic_polygon.py" ]
[ "# -*- coding: utf-8 -*-\n# vispy: testskip\n# -----------------------------------------------------------------------------\n# Copyright (c) 2015, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\"\"\"\nExample demonstrating how to use vispy.pyplot, which uses mplexporter\nto convert matplotlib commands to vispy draw commands.\n\nRequires matplotlib.\n\"\"\"\n\nimport numpy as np\n\n# You can use either matplotlib or vispy to render this example:\n# import matplotlib.pyplot as plt\nimport vispy.mpl_plot as plt\n\nfrom vispy.io import read_png, load_data_file\n\nn = 200\nfreq = 10\nfs = 100.\nt = np.arange(n) / fs\ntone = np.sin(2*np.pi*freq*t)\nnoise = np.random.RandomState(0).randn(n)\nsignal = tone + noise\nmagnitude = np.abs(np.fft.fft(signal))\nfreqs = np.fft.fftfreq(n, 1. / fs)\nflim = n // 2\n\n# Signal\nfig = plt.figure()\nax = plt.subplot(311)\nax.imshow(read_png(load_data_file('pyplot/logo.png')))\n\nax = plt.subplot(312)\nax.plot(t, signal, 'k-')\n\n# Frequency content\nax = plt.subplot(313)\nidx = np.argmax(magnitude[:flim])\nax.text(freqs[idx], magnitude[idx], 'Max: %s Hz' % freqs[idx],\n verticalalignment='top')\nax.plot(freqs[:flim], magnitude[:flim], 'k-o')\n\nplt.draw()\n\n# NOTE: show() has currently been overwritten to convert to vispy format, so:\n# 1. It must be called to show the results, and\n# 2. Any plotting commands executed after this will not take effect.\n# We are working to remove this limitation.\n\nif __name__ == '__main__':\n plt.show(True)\n", "import numpy as np\nfrom vispy import app, scene, visuals, gloo\nfrom vispy.color import Color\nfrom vispy.visuals.transforms import STTransform\n\nwhite = Color(\"#ecf0f1\")\ngray = Color(\"#121212\")\nred = Color(\"#e74c3c\")\nblue = Color(\"#2980b9\")\norange = Color(\"#e88834\")\n\n\ndef star(inner=0.5, outer=1.0, n=5):\n R = np.array([inner, outer] * n)\n T = np.linspace(0, 2 * np.pi, 2 * n, endpoint=False)\n P = np.zeros((2 * n, 3))\n P[:, 0] = R * np.cos(T)\n P[:, 1] = R * np.sin(T)\n return P\n\n\ndef rec(left=-15, right=15, bottom=-25, top=25):\n P = np.zeros((4, 3))\n R = np.array([[left, bottom],\n [right, bottom],\n [right, top ],\n [left, top ]])\n P[:, :2] = R\n return P\n\n\n\nclass shank(object):\n def __init__(self, pos):\n self.pos = pos\n\nclass probe_geometry(object):\n \"\"\"docstring for probe_geometry\"\"\"\n def __init__(self, shanks):\n super(probe_geometry, self).__init__()\n self.shanks = shanks\n\n\nclass probe_view(scene.SceneCanvas):\n '''probe view\n '''\n def __init__(self):\n scene.SceneCanvas.__init__(self, keys=None, title='probe view')\n self.unfreeze()\n self.view = self.central_widget.add_view()\n self.view.camera = 'panzoom'\n self.electrode_pads = scene.visuals.Markers(parent=self.view.scene)\n self.electrode_text = scene.visuals.Text(parent=self.view.scene)\n # self.electrode_edge = scene.visuals.Line(antialias=False, method='gl', color=(1, 1, 1, 0.2), parent=self.view.scene)\n \n\n def set_data(self, mapping):\n # self.prb = probe\n self.pos = pos\n self.electrode_pads.set_data(self.pos, symbol='square', size=17)\n self.electrode_text.text = [str(i) for i in range(len(pos))] \n self.electrode_text.pos = pos\n self.electrode_text.font_size = 6\n edges = np.array([[0,1],[0,2],[0,3],[1,2],[1,3],[2,3],\n [4,5],[4,6],[4,7],[5,6],[5,7],[6,7]])\n color = np.ones((pos.shape[0], 4))\n color[:4,:] = np.array([1,0,0,1])\n self.electrode_edge = scene.visuals.Line(pos=pos, connect=edges, antialias=False, method='gl',\n color=color, parent=self.view.scene)\n\n self.view.camera.set_range([-100,100])\n\n\n def on_key_press(self, e):\n if e.text == 'r':\n self.view.camera.set_range([-100,100])\n\n\n def run(self):\n self.show()\n self.app.run()\n\n\nif __name__ == '__main__':\n # prb = 'bow-tie'\n prb_view = probe_view()\n nCh = 64\n y_pos = np.linspace(0,600,32).reshape(-1,1)\n x_pos = np.ones_like(y_pos) * -10\n l_shank = np.hstack((x_pos, y_pos)) \n r_shank = np.hstack((-x_pos, y_pos))\n pos = np.empty((l_shank.shape[0] + r_shank.shape[0], 2))\n pos[::2] = l_shank\n pos[1::2] = r_shank\n mapping = {}\n\n print(pos)\n prb_view.set_data(pos)\n prb_view.run()\n", "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\"\nDemonstration of Polygon and subclasses\n\"\"\"\n\nimport sys\nimport numpy as np\n\nfrom vispy import app, visuals\nfrom vispy.visuals import transforms\n\n# vertex positions of polygon data to draw\npos = np.array([[0, 0, 0],\n [0.25, 0.22, 0],\n [0.25, 0.5, 0],\n [0, 0.5, 0],\n [-0.25, 0.25, 0]])\n\npos = np.array([[0, 0],\n [10, 0],\n [10, 10],\n [20, 10],\n [20, 20],\n [25, 20],\n [25, 25],\n [20, 25],\n [20, 20],\n [10, 17],\n [5, 25],\n [9, 30],\n [6, 15],\n [15, 12.5],\n [0, 5]])\n\ntheta = np.linspace(0, 2*np.pi, 11)\npos = np.hstack([np.cos(theta)[:, np.newaxis],\n np.sin(theta)[:, np.newaxis]])\npos[::2] *= 0.4\npos[-1] = pos[0]\n\n\nclass Canvas(app.Canvas):\n def __init__(self):\n app.Canvas.__init__(self, keys='interactive', size=(800, 800))\n global pos\n self.visuals = []\n polygon = visuals.PolygonVisual(pos=pos, color=(0.8, .2, 0, 1),\n border_color=(1, 1, 1, 1))\n polygon.transform = transforms.STTransform(scale=(200, 200),\n translate=(600, 600))\n self.visuals.append(polygon)\n\n ellipse = visuals.EllipseVisual(center=(0, 0, 0), radius=(100, 100),\n color=(0.2, 0.2, 0.8, 1),\n border_color=(1, 1, 1, 1),\n start_angle=180., span_angle=150.)\n ellipse.transform = transforms.STTransform(scale=(0.9, 1.5),\n translate=(200, 200))\n self.visuals.append(ellipse)\n\n rect = visuals.RectangleVisual(center=(600, 200, 0), height=200.,\n width=300.,\n radius=[30., 30., 0., 0.],\n color=(0.5, 0.5, 0.2, 1),\n border_color='white')\n rect.transform = transforms.NullTransform()\n self.visuals.append(rect)\n\n rpolygon = visuals.RegularPolygonVisual(center=(200., 600., 0),\n radius=160,\n color=(0.2, 0.8, 0.2, 1),\n border_color=(1, 1, 1, 1),\n sides=6)\n rpolygon.transform = transforms.NullTransform()\n self.visuals.append(rpolygon)\n\n self._timer = app.Timer('auto', connect=self.on_timer, start=True)\n self.show()\n\n def on_draw(self, ev):\n self.context.set_clear_color((0, 0, 0, 1))\n self.context.set_viewport(0, 0, *self.physical_size)\n self.context.clear()\n for vis in self.visuals:\n vis.draw()\n\n def on_resize(self, event):\n # Set canvas viewport and reconfigure visual transforms to match.\n vp = (0, 0, self.physical_size[0], self.physical_size[1])\n self.context.set_viewport(*vp)\n\n for vis in self.visuals:\n vis.transforms.configure(canvas=self, viewport=vp)\n\n def on_timer(self, event):\n polygon, ellipse, rect, rpolygon = self.visuals\n r = ellipse.radius\n ellipse.radius = r[0], r[1] + np.sin(event.elapsed * 10)\n ellipse.span_angle = (ellipse.span_angle + 100. * event.dt) % 360\n\n c = (0.3 * (0.5 + np.sin(event.elapsed * 2 + 0)),\n 0.3 * (0.5 + np.sin(event.elapsed * 2 + np.pi * 2./3.)),\n 0.3 * (0.5 + np.sin(event.elapsed * 2 + np.pi * 4./3.)))\n polygon.color = c\n polygon.border_color = (.8, .8, .8,\n 0.5 + (0.5 * np.sin(event.elapsed*10)))\n\n rpolygon.radius = 100 + 10 * np.sin(event.elapsed * 3.1)\n rpolygon.sides = int(20 + 17 * np.sin(event.elapsed))\n\n self.update()\n\n\nif __name__ == '__main__':\n win = Canvas() \n if sys.flags.interactive != 1:\n win.app.run()\n" ]
[ [ "numpy.fft.fft", "numpy.arange", "numpy.sin", "numpy.argmax", "numpy.fft.fftfreq", "numpy.random.RandomState" ], [ "numpy.hstack", "numpy.ones_like", "numpy.linspace", "numpy.cos", "numpy.sin", "numpy.ones", "numpy.array", "numpy.zeros", "numpy.empty" ], [ "numpy.array", "numpy.cos", "numpy.linspace", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shixinlishixinli/openvino_training_extensions
[ "cd34e5ceb8ae016e14b8b43b033f82bd5e11949e", "cd34e5ceb8ae016e14b8b43b033f82bd5e11949e" ]
[ "pytorch_toolkit/nncf/examples/object_detection/models/ssd_vgg.py", "pytorch_toolkit/super_resolution/tools/infer_ie.py" ]
[ "\"\"\"\n Copyright (c) 2019 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport os\n\nimport torch\nimport torch.nn as nn\nfrom examples.object_detection.layers import L2Norm\nfrom examples.object_detection.layers.modules.ssd_head import MultiOutputSequential, SSDDetectionOutput\nfrom examples.common.model_loader import load_state\n\nBASE_NUM_OUTPUTS = {\n 300: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M', 512, 512, 512],\n 512: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512],\n}\nEXTRAS_NUM_OUTPUTS = {\n 300: [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],\n 512: [256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128, 'K', 256],\n}\n\nBASE_OUTPUT_INDICES = {\n 300: [12],\n 512: [12],\n}\n\nEXTRA_OUTPUT_INDICES = {\n 300: [2, 5, 7, 9],\n 512: [2, 5, 8, 11, 14],\n}\n\n\nclass SSD_VGG(nn.Module):\n def __init__(self, cfg, size, num_classes, batch_norm=False):\n super(SSD_VGG, self).__init__()\n self.config = cfg\n self.num_classes = num_classes\n self.size = size\n self.enable_batchmorm = batch_norm\n\n base_layers, base_outs, base_feats = build_vgg_ssd_layers(\n BASE_NUM_OUTPUTS[size], BASE_OUTPUT_INDICES[size], batch_norm=batch_norm\n )\n extra_layers, extra_outs, extra_feats = build_vgg_ssd_extra(\n EXTRAS_NUM_OUTPUTS[size], EXTRA_OUTPUT_INDICES[size], batch_norm=batch_norm\n )\n self.basenet = MultiOutputSequential(base_outs, base_layers)\n self.extras = MultiOutputSequential(extra_outs, extra_layers)\n\n self.detection_head = SSDDetectionOutput(base_feats + extra_feats, num_classes, cfg)\n self.L2Norm = L2Norm(512, 20, 1e-10)\n\n def forward(self, x):\n img_tensor = x[0].clone().unsqueeze(0)\n\n sources, x = self.basenet(x)\n sources[0] = self.L2Norm(sources[0])\n\n extra_sources, x = self.extras(x)\n\n return self.detection_head(sources + extra_sources, img_tensor)\n\n def load_weights(self, base_file):\n _, ext = os.path.splitext(base_file)\n if ext == '.pkl' or '.pth':\n print('Loading weights into state dict...')\n self.load_state_dict(torch.load(base_file,\n map_location=lambda storage, loc: storage))\n print('Finished!')\n else:\n print('Sorry only .pth and .pkl files supported.')\n\n\ndef make_ssd_vgg_layer(input_features, output_features, kernel=3, padding=1, dilation=1, modifier=None,\n batch_norm=False):\n stride = 1\n if modifier == 'S':\n stride = 2\n padding = 1\n elif modifier == 'K':\n kernel = 4\n padding = 1\n\n layer = [nn.Conv2d(input_features, output_features, kernel_size=kernel, stride=stride, padding=padding,\n dilation=dilation)]\n if batch_norm:\n layer.append(nn.BatchNorm2d(output_features))\n layer.append(nn.ReLU(inplace=True))\n return layer\n\n\ndef build_vgg_ssd_layers(num_outputs, output_inddices, start_input_channels=3, batch_norm=False):\n vgg_layers = []\n output_num_features = []\n source_indices = []\n in_planes = start_input_channels\n modifier = None\n for i, out_planes in enumerate(num_outputs):\n if out_planes in ('M', 'C'):\n vgg_layers.append(nn.MaxPool2d(kernel_size=2, stride=2, padding=1 if modifier == 'C' else 0))\n continue\n if isinstance(out_planes, str):\n modifier = out_planes\n continue\n vgg_layers.extend(make_ssd_vgg_layer(in_planes, out_planes, modifier=modifier, batch_norm=batch_norm))\n modifier = None\n in_planes = out_planes\n if i in output_inddices:\n source_indices.append(len(vgg_layers) - 1)\n output_num_features.append(out_planes)\n\n vgg_layers.append(nn.MaxPool2d(kernel_size=3, stride=1, padding=1))\n vgg_layers.extend(make_ssd_vgg_layer(in_planes, 1024, kernel=3, padding=6, dilation=6, batch_norm=batch_norm))\n vgg_layers.extend(make_ssd_vgg_layer(1024, 1024, kernel=1, batch_norm=batch_norm))\n\n source_indices.append(len(vgg_layers) - 1)\n output_num_features.append(1024)\n return vgg_layers, source_indices, output_num_features\n\n\ndef build_vgg_ssd_extra(num_outputs, output_indices, statrt_input_channels=1024, batch_norm=False):\n extra_layers = []\n output_num_features = []\n source_indices = []\n in_planes = statrt_input_channels\n modifier = None\n kernel_sizes = (1, 3)\n for i, out_planes in enumerate(num_outputs):\n if isinstance(out_planes, str):\n modifier = out_planes\n continue\n kernel = kernel_sizes[len(extra_layers) % 2]\n extra_layers.extend(make_ssd_vgg_layer(in_planes, out_planes, modifier=modifier, kernel=kernel, padding=0,\n batch_norm=batch_norm))\n modifier = None\n in_planes = out_planes\n if i in output_indices:\n source_indices.append(len(extra_layers) - 1)\n output_num_features.append(out_planes)\n\n return extra_layers, source_indices, output_num_features\n\n\ndef build_ssd_vgg(cfg, size, num_classes, config):\n ssd_vgg = SSD_VGG(cfg, size, num_classes, batch_norm=config.get('batchnorm', False))\n print('Initializing weights...')\n\n # ssd_vgg.apply(weights_init)\n\n if config.basenet:\n print('Loading base network...')\n basenet_weights = torch.load(config.basenet)\n new_weights = {}\n for wn, wv in basenet_weights.items():\n wn = wn.replace('features.', '')\n new_weights[wn] = wv\n\n load_state(ssd_vgg.basenet, new_weights, strict=False)\n return ssd_vgg\n", "#!/usr/bin/env python3\n#\n# Copyright (C) 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\nfrom argparse import ArgumentParser\nimport os\nimport warnings\nimport cv2\nimport skimage\nimport numpy as np\nfrom openvino.inference_engine import IENetwork, IEPlugin\n\n\ndef build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"--model\", type=str, required=True, help=\"Path to an .xml file with a trained model.\")\n parser.add_argument(\"--device\", help=\"Specify the target device to infer on. (default: %(default)s)\",\n choices=[\"CPU\", \"GPU\", \"MYRIAD\"], default=\"CPU\")\n parser.add_argument('--output_dir', default=None, help='Output debugirectory')\n parser.add_argument('input_image', help='Image')\n return parser.parse_args()\n\n\ndef load_ir_model(model_xml, device):\n model_bin = os.path.splitext(model_xml)[0] + \".bin\"\n\n # initialize plugin and read IR\n plugin = IEPlugin(device=device)\n net = IENetwork(model=model_xml, weights=model_bin)\n exec_net = plugin.load(network=net)\n\n input_blobs = net.inputs.keys()\n inputs = [(b, net.inputs[b].shape) for b in input_blobs]\n\n out_blob = next(iter(net.outputs))\n del net\n\n return exec_net, plugin, inputs, out_blob\n\n\ndef image_to_blob(image, shape):\n blob = image.copy()\n blob = blob.transpose((2, 0, 1)) # from HWC to CHW\n blob = blob.reshape(shape)\n return blob\n\n\ndef blob_to_img(blob):\n blob = blob.transpose((1, 2, 0)) # from CHW to HWC\n blob = np.clip(blob, 0.0, 1.0)\n\n # Suppression skimage warning:\n # UserWarning: Possible precision loss when converting from float32 to uint8\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n blob = skimage.img_as_ubyte(blob)\n return blob\n\ndef main():\n args = build_argparser()\n exec_net, _, inputs, out_blob = load_ir_model(args.model, args.device)\n\n # Prepare input blobs\n ih, iw = inputs[0][1][2:]\n image = cv2.imread(args.input_image)\n if image.shape[0] != ih or image.shape[1] != iw:\n image = image[0:ih, 0:iw]\n\n cubic = cv2.resize(image, (inputs[1][1][3], inputs[1][1][2]), interpolation=cv2.INTER_CUBIC)\n\n blob1 = image_to_blob(image, (inputs[0][1]))\n blob2 = image_to_blob(cubic, (inputs[1][1]))\n\n # inference\n result = exec_net.infer(inputs={inputs[0][0]: blob1, inputs[1][0]: blob2})\n\n # Postprocessing\n out_img = blob_to_img(result[out_blob][0])\n\n outpur_dir = args.output_dir if args.output_dir else os.path.dirname(args.input_image)\n out_path = os.path.join(outpur_dir, \"sr_\" + os.path.basename(args.input_image))\n cv2.imwrite(out_path, out_img)\n print(\"Saved: \", out_path)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.load", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ], [ "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gitoni/geopandas
[ "6163bc92ab501c571e98bde737d80c464a9f50cc", "6163bc92ab501c571e98bde737d80c464a9f50cc" ]
[ "geopandas/plotting.py", "tests/test_geocode.py" ]
[ "from __future__ import print_function\n\nimport numpy as np\nfrom six import next\nfrom six.moves import xrange\n\n\ndef plot_polygon(ax, poly, facecolor='red', edgecolor='black', alpha=0.5):\n \"\"\" Plot a single Polygon geometry \"\"\"\n from descartes.patch import PolygonPatch\n a = np.asarray(poly.exterior)\n # without Descartes, we could make a Patch of exterior\n ax.add_patch(PolygonPatch(poly, facecolor=facecolor, alpha=alpha))\n ax.plot(a[:, 0], a[:, 1], color=edgecolor)\n for p in poly.interiors:\n x, y = zip(*p.coords)\n ax.plot(x, y, color=edgecolor)\n\n\ndef plot_multipolygon(ax, geom, facecolor='red', edgecolor='black', alpha=0.5):\n \"\"\" Can safely call with either Polygon or Multipolygon geometry\n \"\"\"\n if geom.type == 'Polygon':\n plot_polygon(ax, geom, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha)\n elif geom.type == 'MultiPolygon':\n for poly in geom.geoms:\n plot_polygon(ax, poly, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha)\n\n\ndef plot_linestring(ax, geom, color='black', linewidth=1):\n \"\"\" Plot a single LineString geometry \"\"\"\n a = np.array(geom)\n ax.plot(a[:, 0], a[:, 1], color=color, linewidth=linewidth)\n\n\ndef plot_multilinestring(ax, geom, color='red', linewidth=1):\n \"\"\" Can safely call with either LineString or MultiLineString geometry\n \"\"\"\n if geom.type == 'LineString':\n plot_linestring(ax, geom, color=color, linewidth=linewidth)\n elif geom.type == 'MultiLineString':\n for line in geom.geoms:\n plot_linestring(ax, line, color=color, linewidth=linewidth)\n\n\ndef plot_point(ax, pt, marker='o', markersize=2):\n \"\"\" Plot a single Point geometry \"\"\"\n ax.plot(pt.x, pt.y, marker=marker, markersize=markersize, linewidth=0)\n\n\ndef gencolor(N, colormap='Set1'):\n \"\"\"\n Color generator intended to work with one of the ColorBrewer\n qualitative color scales.\n\n Suggested values of colormap are the following:\n\n Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3\n\n (although any matplotlib colormap will work).\n \"\"\"\n from matplotlib import cm\n # don't use more than 9 discrete colors\n n_colors = min(N, 9)\n cmap = cm.get_cmap(colormap, n_colors)\n colors = cmap(range(n_colors))\n for i in xrange(N):\n yield colors[i % n_colors]\n\n\ndef plot_series(s, colormap='Set1', axes=None, **color_kwds):\n \"\"\" Plot a GeoSeries\n\n Generate a plot of a GeoSeries geometry with matplotlib.\n\n Parameters\n ----------\n\n Series\n The GeoSeries to be plotted. Currently Polygon,\n MultiPolygon, LineString, MultiLineString and Point\n geometries can be plotted.\n\n colormap : str (default 'Set1')\n The name of a colormap recognized by matplotlib. Any\n colormap will work, but categorical colormaps are\n generally recommended. Examples of useful discrete\n colormaps include:\n\n Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3\n\n axes : matplotlib.pyplot.Artist (default None)\n axes on which to draw the plot\n\n **color_kwds : dict\n Color options to be passed on to plot_polygon\n\n Returns\n -------\n\n matplotlib axes instance\n \"\"\"\n import matplotlib.pyplot as plt\n if axes is None:\n fig = plt.gcf()\n fig.add_subplot(111, aspect='equal')\n ax = plt.gca()\n else:\n ax = axes\n color = gencolor(len(s), colormap=colormap)\n for geom in s:\n if geom.type == 'Polygon' or geom.type == 'MultiPolygon':\n plot_multipolygon(ax, geom, facecolor=next(color), **color_kwds)\n elif geom.type == 'LineString' or geom.type == 'MultiLineString':\n plot_multilinestring(ax, geom, color=next(color))\n elif geom.type == 'Point':\n plot_point(ax, geom)\n plt.draw()\n return ax\n\n\ndef plot_dataframe(s, column=None, colormap=None,\n categorical=False, legend=False, axes=None,\n scheme=None, k=5,\n **color_kwds\n ):\n \"\"\" Plot a GeoDataFrame\n\n Generate a plot of a GeoDataFrame with matplotlib. If a\n column is specified, the plot coloring will be based on values\n in that column. Otherwise, a categorical plot of the\n geometries in the `geometry` column will be generated.\n\n Parameters\n ----------\n\n GeoDataFrame\n The GeoDataFrame to be plotted. Currently Polygon,\n MultiPolygon, LineString, MultiLineString and Point\n geometries can be plotted.\n\n column : str (default None)\n The name of the column to be plotted.\n\n categorical : bool (default False)\n If False, colormap will reflect numerical values of the\n column being plotted. For non-numerical columns (or if\n column=None), this will be set to True.\n\n colormap : str (default 'Set1')\n The name of a colormap recognized by matplotlib.\n\n legend : bool (default False)\n Plot a legend (Experimental; currently for categorical\n plots only)\n\n axes : matplotlib.pyplot.Artist (default None)\n axes on which to draw the plot\n\n scheme : pysal.esda.mapclassify.Map_Classifier\n Choropleth classification schemes\n\n k : int (default 5)\n Number of classes (ignored if scheme is None)\n\n **color_kwds : dict\n Color options to be passed on to plot_polygon\n\n Returns\n -------\n\n matplotlib axes instance\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib.lines import Line2D\n from matplotlib.colors import Normalize\n from matplotlib import cm\n\n if column is None:\n return plot_series(s.geometry, colormap=colormap, axes=axes, **color_kwds)\n else:\n if s[column].dtype is np.dtype('O'):\n categorical = True\n if categorical:\n if colormap is None:\n colormap = 'Set1'\n categories = list(set(s[column].values))\n categories.sort()\n valuemap = dict([(k, v) for (v, k) in enumerate(categories)])\n values = [valuemap[k] for k in s[column]]\n else:\n values = s[column]\n if scheme is not None:\n values = __pysal_choro(values, scheme, k=k)\n cmap = norm_cmap(values, colormap, Normalize, cm)\n if axes is None:\n fig = plt.gcf()\n fig.add_subplot(111, aspect='equal')\n ax = plt.gca()\n else:\n ax = axes\n for geom, value in zip(s.geometry, values):\n if geom.type == 'Polygon' or geom.type == 'MultiPolygon':\n plot_multipolygon(ax, geom, facecolor=cmap.to_rgba(value), **color_kwds)\n elif geom.type == 'LineString' or geom.type == 'MultiLineString':\n plot_multilinestring(ax, geom, color=cmap.to_rgba(value))\n # TODO: color point geometries\n elif geom.type == 'Point':\n plot_point(ax, geom)\n if legend:\n if categorical:\n patches = []\n for value, cat in enumerate(categories):\n patches.append(Line2D([0], [0], linestyle=\"none\",\n marker=\"o\", alpha=color_kwds.get('alpha', 0.5),\n markersize=10, markerfacecolor=cmap.to_rgba(value)))\n ax.legend(patches, categories, numpoints=1, loc='best')\n else:\n # TODO: show a colorbar\n raise NotImplementedError\n plt.draw()\n return ax\n\n\ndef __pysal_choro(values, scheme, k=5):\n \"\"\" Wrapper for choropleth schemes from PySAL for use with plot_dataframe\n\n Parameters\n ----------\n\n values\n Series to be plotted\n\n scheme\n pysal.esda.mapclassify classificatin scheme ['Equal_interval'|'Quantiles'|'Fisher_Jenks']\n\n k\n number of classes (2 <= k <=9)\n\n Returns\n -------\n\n values\n Series with values replaced with class identifier if PySAL is available, otherwise the original values are used\n \"\"\"\n\n try:\n from pysal.esda.mapclassify import Quantiles, Equal_Interval, Fisher_Jenks\n schemes = {}\n schemes['equal_interval'] = Equal_Interval\n schemes['quantiles'] = Quantiles\n schemes['fisher_jenks'] = Fisher_Jenks\n s0 = scheme\n scheme = scheme.lower()\n if scheme not in schemes:\n scheme = 'quantiles'\n print('Unrecognized scheme: ', s0)\n print('Using Quantiles instead')\n if k < 2 or k > 9:\n print('Invalid k: ', k)\n print('2<=k<=9, setting k=5 (default)')\n k = 5\n binning = schemes[scheme](values, k)\n values = binning.yb\n except ImportError:\n print('PySAL not installed, setting map to default')\n\n return values\n\n\ndef norm_cmap(values, cmap, normalize, cm):\n\n \"\"\" Normalize and set colormap\n\n Parameters\n ----------\n\n values\n Series or array to be normalized\n\n cmap\n matplotlib Colormap\n\n normalize\n matplotlib.colors.Normalize\n\n cm\n matplotlib.cm\n\n Returns\n -------\n n_cmap\n mapping of normalized values to colormap (cmap)\n \"\"\"\n\n mn, mx = min(values), max(values)\n norm = normalize(vmin=mn, vmax=mx)\n n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)\n return n_cmap\n", "from __future__ import absolute_import\n\nimport sys\n\nfrom fiona.crs import from_epsg\nimport pandas as pd\nimport pandas.util.testing as tm\nfrom shapely.geometry import Point\nimport geopandas as gpd\nimport nose\n\nfrom geopandas import GeoSeries\nfrom geopandas.tools import geocode, reverse_geocode\nfrom geopandas.tools.geocoding import _prepare_geocode_result\n\nfrom .util import unittest, mock, assert_geoseries_equal\n\n\ndef _skip_if_no_geopy():\n try:\n import geopy\n except ImportError:\n raise nose.SkipTest(\"Geopy not installed. Skipping tests.\")\n except SyntaxError:\n raise nose.SkipTest(\"Geopy is known to be broken on Python 3.2. \"\n \"Skipping tests.\")\n\n\nclass ForwardMock(mock.MagicMock):\n \"\"\"\n Mock the forward geocoding function.\n Returns the passed in address and (p, p+.5) where p increases\n at each call\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(ForwardMock, self).__init__(*args, **kwargs)\n self._n = 0.0\n\n def __call__(self, *args, **kwargs):\n self.return_value = args[0], (self._n, self._n + 0.5)\n self._n += 1\n return super(ForwardMock, self).__call__(*args, **kwargs)\n\n\nclass ReverseMock(mock.MagicMock):\n \"\"\"\n Mock the reverse geocoding function.\n Returns the passed in point and 'address{p}' where p increases\n at each call\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(ReverseMock, self).__init__(*args, **kwargs)\n self._n = 0\n\n def __call__(self, *args, **kwargs):\n self.return_value = 'address{0}'.format(self._n), args[0]\n self._n += 1\n return super(ReverseMock, self).__call__(*args, **kwargs)\n\n\nclass TestGeocode(unittest.TestCase):\n def setUp(self):\n _skip_if_no_geopy()\n self.locations = ['260 Broadway, New York, NY',\n '77 Massachusetts Ave, Cambridge, MA']\n self.points = [Point(-71.0597732, 42.3584308),\n Point(-77.0365305, 38.8977332)]\n\n def test_prepare_result(self):\n # Calls _prepare_result with sample results from the geocoder call\n # loop\n p0 = Point(12.3, -45.6) # Treat these as lat/lon\n p1 = Point(-23.4, 56.7)\n d = {'a': ('address0', p0.coords[0]),\n 'b': ('address1', p1.coords[0])}\n\n df = _prepare_geocode_result(d)\n assert type(df) is gpd.GeoDataFrame\n self.assertEqual(from_epsg(4326), df.crs)\n self.assertEqual(len(df), 2)\n self.assert_('address' in df)\n\n coords = df.loc['a']['geometry'].coords[0]\n test = p0.coords[0]\n # Output from the df should be lon/lat\n self.assertAlmostEqual(coords[0], test[1])\n self.assertAlmostEqual(coords[1], test[0])\n\n coords = df.loc['b']['geometry'].coords[0]\n test = p1.coords[0]\n self.assertAlmostEqual(coords[0], test[1])\n self.assertAlmostEqual(coords[1], test[0])\n\n def test_prepare_result_none(self):\n p0 = Point(12.3, -45.6) # Treat these as lat/lon\n d = {'a': ('address0', p0.coords[0]),\n 'b': (None, None)}\n\n df = _prepare_geocode_result(d)\n assert type(df) is gpd.GeoDataFrame\n self.assertEqual(from_epsg(4326), df.crs)\n self.assertEqual(len(df), 2)\n self.assert_('address' in df)\n\n row = df.loc['b']\n self.assertEqual(len(row['geometry'].coords), 0)\n self.assert_(pd.np.isnan(row['address']))\n \n def test_bad_provider_forward(self):\n with self.assertRaises(ValueError):\n geocode(['cambridge, ma'], 'badprovider')\n\n def test_bad_provider_reverse(self):\n with self.assertRaises(ValueError):\n reverse_geocode(['cambridge, ma'], 'badprovider')\n\n def test_forward(self):\n with mock.patch('geopy.geocoders.googlev3.GoogleV3.geocode',\n ForwardMock()) as m:\n g = geocode(self.locations, provider='googlev3', timeout=2)\n self.assertEqual(len(self.locations), m.call_count)\n\n n = len(self.locations)\n self.assertIsInstance(g, gpd.GeoDataFrame)\n expected = GeoSeries([Point(float(x)+0.5, float(x)) for x in range(n)],\n crs=from_epsg(4326))\n assert_geoseries_equal(expected, g['geometry'])\n tm.assert_series_equal(g['address'],\n pd.Series(self.locations, name='address'))\n\n\n def test_reverse(self):\n with mock.patch('geopy.geocoders.googlev3.GoogleV3.reverse',\n ReverseMock()) as m:\n g = reverse_geocode(self.points, provider='googlev3', timeout=2)\n self.assertEqual(len(self.points), m.call_count)\n\n self.assertIsInstance(g, gpd.GeoDataFrame)\n\n expected = GeoSeries(self.points, crs=from_epsg(4326))\n assert_geoseries_equal(expected, g['geometry'])\n tm.assert_series_equal(g['address'],\n pd.Series('address' + str(x) \n for x in range(len(self.points))))\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.asarray", "numpy.dtype", "matplotlib.pyplot.draw", "matplotlib.pyplot.gcf", "matplotlib.cm.ScalarMappable", "matplotlib.cm.get_cmap", "numpy.array" ], [ "pandas.Series", "pandas.np.isnan" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
egreen-park/oneplus
[ "5b2e6fa3aaf3514bbc5c98ccff9b0f97418cbd2d" ]
[ "selfdrive/ntune.py" ]
[ "import os\nimport fcntl\nimport signal\nimport json\nimport numpy as np\n\nCONF_PATH = '/data/ntune/'\nCONF_LQR_FILE = '/data/ntune/lat_lqr.json'\n\nntunes = {}\n\ndef file_watch_handler(signum, frame):\n global ntunes\n for ntune in ntunes.values():\n ntune.handle()\n\nclass nTune():\n def __init__(self, CP=None, controller=None, group=None):\n\n self.invalidated = False\n self.CP = CP\n self.lqr = None\n self.group = group\n self.config = {}\n\n if \"LatControlLQR\" in str(type(controller)):\n self.lqr = controller\n self.file = CONF_LQR_FILE\n self.lqr.A = np.array([0., 1., -0.22619643, 1.21822268]).reshape((2, 2))\n self.lqr.B = np.array([-1.92006585e-04, 3.95603032e-05]).reshape((2, 1))\n self.lqr.C = np.array([1., 0.]).reshape((1, 2))\n self.lqr.K = np.array([-110., 451.]).reshape((1, 2))\n self.lqr.L = np.array([0.33, 0.318]).reshape((2, 1))\n else:\n self.file = CONF_PATH + group + \".json\"\n\n if not os.path.exists(CONF_PATH):\n os.makedirs(CONF_PATH)\n\n self.read()\n\n try:\n signal.signal(signal.SIGIO, file_watch_handler)\n fd = os.open(CONF_PATH, os.O_RDONLY)\n fcntl.fcntl(fd, fcntl.F_SETSIG, 0)\n fcntl.fcntl(fd, fcntl.F_NOTIFY, fcntl.DN_MODIFY | fcntl.DN_CREATE | fcntl.DN_MULTISHOT)\n except Exception as ex:\n print(\"exception\", ex)\n pass\n\n def handle(self):\n try:\n if os.path.getsize(self.file) > 0:\n with open(self.file, 'r') as f:\n self.config = json.load(f)\n\n if self.checkValid():\n self.write_config(self.config)\n\n self.invalidated = True\n\n except:\n pass\n\n def check(self): # called by LatControlLQR.update\n if self.invalidated:\n self.invalidated = False\n self.update()\n\n def read(self):\n success = False\n try:\n if os.path.getsize(self.file) > 0:\n with open(self.file, 'r') as f:\n self.config = json.load(f)\n\n if self.checkValid():\n self.write_config(self.config)\n self.update()\n success = True\n except:\n pass\n\n if not success:\n try:\n self.write_default()\n with open(self.file, 'r') as f:\n self.config = json.load(f)\n if self.checkValid():\n self.write_config(self.config)\n self.update()\n except:\n pass\n\n def checkValue(self, key, min_, max_, default_):\n updated = False\n\n if key not in self.config:\n self.config.update({key: default_})\n updated = True\n elif min_ > self.config[key]:\n self.config.update({key: min_})\n updated = True\n elif max_ < self.config[key]:\n self.config.update({key: max_})\n updated = True\n\n return updated\n\n def checkValid(self):\n\n if self.lqr is not None:\n return self.checkValidLQR()\n elif self.group == \"common\":\n return self.checkValidCommon()\n else:\n return self.checkValidISCC()\n\n def update(self):\n\n if self.lqr is not None:\n self.updateLQR()\n\n def checkValidCommon(self):\n updated = False\n\n if self.checkValue(\"useLiveSteerRatio\", 0., 1., 1.):\n updated = True\n\n if self.checkValue(\"steerRatio\", 10.0, 20.0, 16.5):\n updated = True\n\n if self.checkValue(\"steerActuatorDelay\", 0., 0.8, 0.1):\n updated = True\n\n if self.checkValue(\"steerRateCost\", 0.1, 1.5, 0.4):\n updated = True\n\n if self.checkValue(\"cameraOffset\", -1.0, 1.0, 0.06):\n updated = True\n\n return updated\n\n def checkValidLQR(self):\n updated = False\n\n if self.checkValue(\"scale\", 500.0, 5000.0, 1800.0):\n updated = True\n\n if self.checkValue(\"ki\", 0.0, 0.2, 0.01):\n updated = True\n\n if self.checkValue(\"dcGain\", 0.002, 0.004, 0.0028):\n updated = True\n\n if self.checkValue(\"steerLimitTimer\", 0.5, 3.0, 2.5):\n updated = True\n\n return updated\n\n def checkValidISCC(self):\n updated = False\n\n if self.checkValue(\"sccGasFactor\", 0.5, 1.5, 1.0):\n updated = True\n\n if self.checkValue(\"sccBrakeFactor\", 0.5, 1.5, 1.0):\n updated = True\n\n if self.checkValue(\"sccCurvatureFactor\", 0.5, 1.5, 1.0):\n updated = True\n\n return updated\n\n def updateLQR(self):\n\n self.lqr.scale = float(self.config[\"scale\"])\n self.lqr.ki = float(self.config[\"ki\"])\n\n self.lqr.dc_gain = float(self.config[\"dcGain\"])\n\n self.lqr.sat_limit = float(self.config[\"steerLimitTimer\"])\n\n self.lqr.x_hat = np.array([[0], [0]])\n self.lqr.reset()\n\n def read_cp(self):\n\n try:\n if self.CP is not None:\n\n if self.CP.lateralTuning.which() == 'lqr' and self.lqr is not None:\n self.config[\"scale\"] = round(self.CP.lateralTuning.lqr.scale, 2)\n self.config[\"ki\"] = round(self.CP.lateralTuning.lqr.ki, 3)\n self.config[\"dcGain\"] = round(self.CP.lateralTuning.lqr.dcGain, 6)\n self.config[\"steerLimitTimer\"] = round(self.CP.steerLimitTimer, 2)\n self.config[\"steerMax\"] = round(self.CP.steerMaxV[0], 2)\n else:\n self.config[\"useLiveSteerRatio\"] = 1.\n self.config[\"steerRatio\"] = round(self.CP.steerRatio, 2)\n self.config[\"steerActuatorDelay\"] = round(self.CP.steerActuatorDelay, 2)\n self.config[\"steerRateCost\"] = round(self.CP.steerRateCost, 2)\n\n except:\n pass\n\n def write_default(self):\n\n try:\n self.read_cp()\n self.checkValid()\n self.write_config(self.config)\n except:\n pass\n\n def write_config(self, conf):\n try:\n with open(self.file, 'w') as f:\n json.dump(conf, f, indent=2, sort_keys=False)\n os.chmod(self.file, 0o666)\n except IOError:\n\n try:\n if not os.path.exists(CONF_PATH):\n os.makedirs(CONF_PATH)\n\n with open(self.file, 'w') as f:\n json.dump(conf, f, indent=2, sort_keys=False)\n os.chmod(self.file, 0o666)\n except:\n pass\n\ndef ntune_get(group, key):\n global ntunes\n if group not in ntunes:\n ntunes[group] = nTune(group=group)\n\n ntune = ntunes[group]\n\n if ntune.config == None or key not in ntune.config:\n ntune.read()\n\n v = ntune.config[key]\n\n if v is None:\n ntune.read()\n v = ntune.config[key]\n\n return v\n\ndef ntune_common_get(key):\n return ntune_get(\"common\", key)\n\ndef ntune_common_enabled(key):\n return ntune_common_get(key) > 0.5\n\ndef ntune_scc_get(key):\n return ntune_get(\"scc\", key)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mzient/vision
[ "6eac421ca2ea1ca52f49e10d7ac191c0d6900df1" ]
[ "torchvision/models/inception.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\n\n\n__all__ = ['Inception3', 'inception_v3']\n\n\nmodel_urls = {\n # Inception v3 ported from TensorFlow\n 'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',\n}\n\n\ndef inception_v3(pretrained=False, **kwargs):\n r\"\"\"Inception v3 model architecture from\n `\"Rethinking the Inception Architecture for Computer Vision\" <http://arxiv.org/abs/1512.00567>`_.\n\n .. note::\n **Important**: In contrast to the other models the inception_v3 expects tensors with a size of\n N x 3 x 299 x 299, so ensure your images are sized accordingly.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n transform_input (bool): If True, preprocesses the input according to the method with which it\n was trained on ImageNet. Default: *False*\n \"\"\"\n if pretrained:\n if 'transform_input' not in kwargs:\n kwargs['transform_input'] = True\n model = Inception3(**kwargs)\n model.load_state_dict(model_zoo.load_url(model_urls['inception_v3_google']))\n return model\n\n return Inception3(**kwargs)\n\n\nclass Inception3(nn.Module):\n\n def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):\n super(Inception3, self).__init__()\n self.aux_logits = aux_logits\n self.transform_input = transform_input\n self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)\n self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)\n self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)\n self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)\n self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)\n self.Mixed_5b = InceptionA(192, pool_features=32)\n self.Mixed_5c = InceptionA(256, pool_features=64)\n self.Mixed_5d = InceptionA(288, pool_features=64)\n self.Mixed_6a = InceptionB(288)\n self.Mixed_6b = InceptionC(768, channels_7x7=128)\n self.Mixed_6c = InceptionC(768, channels_7x7=160)\n self.Mixed_6d = InceptionC(768, channels_7x7=160)\n self.Mixed_6e = InceptionC(768, channels_7x7=192)\n if aux_logits:\n self.AuxLogits = InceptionAux(768, num_classes)\n self.Mixed_7a = InceptionD(768)\n self.Mixed_7b = InceptionE(1280)\n self.Mixed_7c = InceptionE(2048)\n self.fc = nn.Linear(2048, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n import scipy.stats as stats\n stddev = m.stddev if hasattr(m, 'stddev') else 0.1\n X = stats.truncnorm(-2, 2, scale=stddev)\n values = torch.Tensor(X.rvs(m.weight.numel()))\n values = values.view(m.weight.size())\n m.weight.data.copy_(values)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n if self.transform_input:\n x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\n x = torch.cat((x_ch0, x_ch1, x_ch2), 1)\n # N x 3 x 299 x 299\n x = self.Conv2d_1a_3x3(x)\n # N x 32 x 149 x 149\n x = self.Conv2d_2a_3x3(x)\n # N x 32 x 147 x 147\n x = self.Conv2d_2b_3x3(x)\n # N x 64 x 147 x 147\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n # N x 64 x 73 x 73\n x = self.Conv2d_3b_1x1(x)\n # N x 80 x 73 x 73\n x = self.Conv2d_4a_3x3(x)\n # N x 192 x 71 x 71\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n # N x 192 x 35 x 35\n x = self.Mixed_5b(x)\n # N x 256 x 35 x 35\n x = self.Mixed_5c(x)\n # N x 288 x 35 x 35\n x = self.Mixed_5d(x)\n # N x 288 x 35 x 35\n x = self.Mixed_6a(x)\n # N x 768 x 17 x 17\n x = self.Mixed_6b(x)\n # N x 768 x 17 x 17\n x = self.Mixed_6c(x)\n # N x 768 x 17 x 17\n x = self.Mixed_6d(x)\n # N x 768 x 17 x 17\n x = self.Mixed_6e(x)\n # N x 768 x 17 x 17\n if self.training and self.aux_logits:\n aux = self.AuxLogits(x)\n # N x 768 x 17 x 17\n x = self.Mixed_7a(x)\n # N x 1280 x 8 x 8\n x = self.Mixed_7b(x)\n # N x 2048 x 8 x 8\n x = self.Mixed_7c(x)\n # N x 2048 x 8 x 8\n # Adaptive average pooling\n x = F.adaptive_avg_pool2d(x, (1, 1))\n # N x 2048 x 1 x 1\n x = F.dropout(x, training=self.training)\n # N x 2048 x 1 x 1\n x = x.view(x.size(0), -1)\n # N x 2048\n x = self.fc(x)\n # N x 1000 (num_classes)\n if self.training and self.aux_logits:\n return x, aux\n return x\n\n\nclass InceptionA(nn.Module):\n\n def __init__(self, in_channels, pool_features):\n super(InceptionA, self).__init__()\n self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)\n\n self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)\n self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)\n\n self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)\n self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)\n self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)\n\n self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1)\n\n def forward(self, x):\n branch1x1 = self.branch1x1(x)\n\n branch5x5 = self.branch5x5_1(x)\n branch5x5 = self.branch5x5_2(branch5x5)\n\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)\n\n branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionB(nn.Module):\n\n def __init__(self, in_channels):\n super(InceptionB, self).__init__()\n self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)\n\n self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)\n self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)\n self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)\n\n def forward(self, x):\n branch3x3 = self.branch3x3(x)\n\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)\n\n branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)\n\n outputs = [branch3x3, branch3x3dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionC(nn.Module):\n\n def __init__(self, in_channels, channels_7x7):\n super(InceptionC, self).__init__()\n self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)\n\n c7 = channels_7x7\n self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)\n self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))\n self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0))\n\n self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)\n self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))\n self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))\n self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))\n self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))\n\n self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)\n\n def forward(self, x):\n branch1x1 = self.branch1x1(x)\n\n branch7x7 = self.branch7x7_1(x)\n branch7x7 = self.branch7x7_2(branch7x7)\n branch7x7 = self.branch7x7_3(branch7x7)\n\n branch7x7dbl = self.branch7x7dbl_1(x)\n branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)\n\n branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionD(nn.Module):\n\n def __init__(self, in_channels):\n super(InceptionD, self).__init__()\n self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)\n self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)\n\n self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)\n self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))\n self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))\n self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)\n\n def forward(self, x):\n branch3x3 = self.branch3x3_1(x)\n branch3x3 = self.branch3x3_2(branch3x3)\n\n branch7x7x3 = self.branch7x7x3_1(x)\n branch7x7x3 = self.branch7x7x3_2(branch7x7x3)\n branch7x7x3 = self.branch7x7x3_3(branch7x7x3)\n branch7x7x3 = self.branch7x7x3_4(branch7x7x3)\n\n branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)\n outputs = [branch3x3, branch7x7x3, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionE(nn.Module):\n\n def __init__(self, in_channels):\n super(InceptionE, self).__init__()\n self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)\n\n self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)\n self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))\n self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))\n\n self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)\n self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)\n self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))\n self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))\n\n self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)\n\n def forward(self, x):\n branch1x1 = self.branch1x1(x)\n\n branch3x3 = self.branch3x3_1(x)\n branch3x3 = [\n self.branch3x3_2a(branch3x3),\n self.branch3x3_2b(branch3x3),\n ]\n branch3x3 = torch.cat(branch3x3, 1)\n\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = [\n self.branch3x3dbl_3a(branch3x3dbl),\n self.branch3x3dbl_3b(branch3x3dbl),\n ]\n branch3x3dbl = torch.cat(branch3x3dbl, 1)\n\n branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionAux(nn.Module):\n\n def __init__(self, in_channels, num_classes):\n super(InceptionAux, self).__init__()\n self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)\n self.conv1 = BasicConv2d(128, 768, kernel_size=5)\n self.conv1.stddev = 0.01\n self.fc = nn.Linear(768, num_classes)\n self.fc.stddev = 0.001\n\n def forward(self, x):\n # N x 768 x 17 x 17\n x = F.avg_pool2d(x, kernel_size=5, stride=3)\n # N x 768 x 5 x 5\n x = self.conv0(x)\n # N x 128 x 5 x 5\n x = self.conv1(x)\n # N x 768 x 1 x 1\n # Adaptive average pooling\n x = F.adaptive_avg_pool2d(x, (1, 1))\n # N x 768 x 1 x 1\n x = x.view(x.size(0), -1)\n # N x 768\n x = self.fc(x)\n # N x 1000\n return x\n\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_channels, out_channels, **kwargs):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n" ]
[ [ "torch.nn.functional.dropout", "torch.cat", "torch.utils.model_zoo.load_url", "torch.nn.functional.avg_pool2d", "torch.nn.Conv2d", "scipy.stats.truncnorm", "torch.nn.init.constant_", "torch.unsqueeze", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.BatchNorm2d", "torch.nn.functional.max_pool2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]