repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
AndreSlavescu/OSSDC-VisionAI-Core
[ "8f4942c91aa8164401fbca192180d6f064a18e39" ]
[ "race-ossdc-org_webrtc_processing.py" ]
[ "import argparse\nimport asyncio\nimport logging\nimport os\nimport random\nimport numpy as np\nimport cv2\nfrom av import VideoFrame\nimport traceback \n\nfrom aiortc import (\n RTCIceCandidate,\n RTCPeerConnection,\n RTCSessionDescription,\n RTCConfiguration,\n RTCIceCandidate,\n RTCIceServer,\n VideoStreamTrack,\n MediaStreamTrack,\n)\nfrom aiortc.contrib.media import MediaBlackhole, MediaPlayer, MediaRecorder\nfrom signaling_race import BYE, RaceOssdcSignaling, object_from_string, object_to_string, sendSubscribeMessage,sendUnSubscribeMessage, roomName, sendMessage,droneRoomName,sio\n\nimport sys\nimport argparse\n\nimport time\nimport json\n\nimport socket\n\nimport youtube_dl\n\nimport subprocess\n\ndebug=True\n\ndef debug_print(*argv):\n if(debug):\n print(*argv)\n\n#To monitor the output from video processing run on your PC this command:\n#ffplay -fflags nobuffer -f mjpeg tcp://0.0.0.0:45654?listen\nip = 'localhost' #replace with your PC IP where ffplay runs\nip = None #comment to activate above IP\n\nclientsocket = None\nif ip is not None:\n try:\n clientsocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n clientsocket.settimeout(5)\n clientsocket.connect((ip,45654)) #the target ip is where the ffplay is listening\n except Exception as e:\n debug_print(e)\n clientsocket = None\n\n\nclass VideoTransformTrack(MediaStreamTrack):\n \"\"\"\n A video stream track that transforms frames from an another track.\n \"\"\"\n\n kind = \"video\"\n\n def __init__(self, track, transform, signaling, model):\n super().__init__() # don't forget this!\n self.transform = transform\n self.track = track\n self.scale = 1\n self.skipFrames = True\n# self.skipFrames = False #use it for Youtube streaming\n self.processing_model = model\n self.prevTime = time.time()\n self.starttime1 = time.time()\n self.colors = \"Not computed\"\n self.signaling = signaling\n self.frameProcessedCount = 0\n self.frameCount = 0\n self.prevFrameCount = 0\n self.realFPS = 0\n \n async def recv(self):\n global clientsocket\n frame = await self.track.recv()\n self.frameCount=self.frameCount+1\n new_frame = frame\n\n # Consume all available frames.\n # If we don't do this, we'll bloat indefinitely.\n if self.skipFrames:\n while not self.track._queue.empty():\n frame = await self.track.recv() \n self.frameCount=self.frameCount+1\n \n self.frameProcessedCount=self.frameProcessedCount+1\n\n timer = cv2.getTickCount()\n\n try:\n \n img = frame.to_ndarray(format=\"bgr24\")\n\n rows, cols, _ = img.shape\n #debug_print('before',img.shape)\n\n if self.scale!=1:\n img = cv2.resize(img,(cols//self.scale, rows//self.scale))\n rows, cols, _ = img.shape\n\n h, w, _ = img.shape\n rows, cols, _ = img.shape\n\n y = h//3\n x = w//3\n\n # img = img[y+200:y+200+y, x:x+x]\n # rows, cols, _ = img.shape \n # h, w, _ = img.shape\n\n # y = h//3\n # x = w//3\n\n\n #img = cv2.pyrDown(img)\n trakingPoints,img = video_processing_module.process_image(self.transform,self.processing_model,img)\n\n y1 = y+25\n\n if 1==2: # for robot control - works with MiDaS for now\n crop_img = img[y:y+y, x:x+x]\n\n r, c, _ = crop_img.shape\n\n\n if 1==1: #robot control\n avg_color_per_row = np.average(crop_img, axis=0)\n avg_color = np.average(avg_color_per_row, axis=0)\n #debug_print(avg_color)\n\n pix_total = 1\n color_B = avg_color[0]\n color_G = avg_color[1]\n color_R = avg_color[2]\n color_N = 1\n self.colors = ['Blue: {:.2f}'.format(color_B/pix_total), 'Green: {:.2f}'.format(color_G/pix_total), 'Red: {:.2f}'.format(color_R/pix_total)] # + ', Gray: ' + str(color_N/pix_total)\n\n debug_print(self.colors)\n if (time.time() - self.starttime1) > 0.1:\n self.starttime1 = time.time()\n if (color_B/pix_total)>200:\n msg = '{\"setmotor\":[30,30,100,'+ '1605574844705' + ']}'\n jsonmsg = json.loads(msg)\n await self.signaling.send(jsonmsg);\n else:\n msg = '{\"setmotor\":[-30,30,50,'+ '1605574844705' + ']}'\n jsonmsg = json.loads(msg)\n await self.signaling.send(jsonmsg);\n # debug_print('sent message: ',jsonmsg)\n\n # cv2.rectangle(img, (x,y), (x+x,y+y), (50,170,50), 2)\n cv2.rectangle(img, (x,y), (x+x,y+y), (0,0,0), 2)\n\n cv2.putText(img, self.colors[0], (10,y1+125+25), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,255,255), 2)\n cv2.putText(img, self.colors[1], (10,y1+150+25), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,255,255), 2)\n cv2.putText(img, self.colors[2], (10,y1+175+25), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,255,255), 2)\n\n # img = cv2.resize(img,(cols//4, rows//4))\n # y = h//3\n # x = w//3\n # y1 = y+25 \n\n cv2.putText(img, \"ImgSize: \"+str(w)+\"x\"+str(h), (10,y1+50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,255,255), 2)\n cv2.putText(img, \"FrmCnt: \"+str(self.frameCount), (10,y1+75), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,255,255), 2)\n cv2.putText(img, \"FrmProcCnt: \"+str(self.frameProcessedCount), (10,y1+100), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,255,255), 2)\n cv2.putText(img, \"TrkPt: \"+str(len(trakingPoints)), (10,y1+125), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,255,255), 2)\n\n # Calculate Frames per second (FPS)\n fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)\n\n cv2.putText(img, \"ProcFPS : \" + str(int(fps)), (10,y1), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,255,255), 2)\n cv2.putText(img, \"RealFPS : \" + str(int(self.realFPS)), (10,y1+25), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,255,255), 2)\n\n # img = cv2.resize(img,(cols//3, rows//3))\n\n delta = time.time() - self.prevTime\n if delta > 1:\n self.realFPS = (self.frameCount-self.prevFrameCount)/delta\n self.prevFrameCount = self.frameCount\n self.prevTime = time.time()\n\n\n try:\n if clientsocket is not None:\n #img = img.to_ndarray(format=\"bgr24\")\n data = cv2.imencode('.jpg', img)[1].tobytes()\n clientsocket.send(data) \n except Exception as e:\n debug_print(e)\n clientsocket = None\n pass \n\n new_frame = VideoFrame.from_ndarray(img, format=\"bgr24\")\n new_frame.pts = frame.pts\n new_frame.time_base = frame.time_base \n except Exception as e1:\n debug_print(e1)\n\n return new_frame\n\n\nasync def run(pc, player, recorder, signaling, transform, model):\n def add_tracks():\n debug_print(\"player\",player)\n if player and player.video:\n local_video = player.video\n local_video = VideoTransformTrack(player.video, transform=transform, signaling=signaling, model=model)\n pc.addTrack(local_video)\n else:\n pc.addTransceiver('video','sendrecv') #this is the trick to echo webcam back\n\n @pc.on(\"track\")\n def on_track(track):\n debug_print(\"Track %s received\" % track.kind)\n if track.kind == \"video\":\n if not(player and player.video):\n local_video = VideoTransformTrack(\n track, transform=transform, signaling=signaling, model=model\n )\n pc.addTrack(local_video)\n\n @track.on(\"ended\")\n async def on_ended():\n debug_print(\"track ended\")\n signaling.trackEnded=True\n\n trackEnded = False\n params = await signaling.connect()\n\n await sendSubscribeMessage()\n\n debug_print(\"run\")\n # consume signaling\n noneCnt = 0\n while True:\n obj = await signaling.receive()\n# debug_print(\"obj:\", obj)\n if obj is None:\n if(noneCnt>5):\n break\n noneCnt=noneCnt+1\n continue\n try:\n if hasattr(obj, 'type'):\n if obj.type == \"answer\":\n if pc.signalingState == \"stable\":\n pass\n else:\n # add_tracks()\n await pc.setRemoteDescription(obj)\n await recorder.start()\n await signaling.send(pc.localDescription)\n if obj.type == \"offer\":\n if pc.signalingState == \"have-local-offer\" or pc.signalingState == \"stable\":\n pass\n else:\n # add_tracks()\n await pc.setRemoteDescription(obj)\n await recorder.start()\n await signaling.send(pc.localDescription)\n\n if(isinstance(obj,list) and len(obj)==2):\n add_tracks()\n await pc.setLocalDescription(await pc.createOffer())\n await signaling.send(pc.localDescription)\n elif isinstance(obj, RTCSessionDescription):\n debug_print(\"pc.signalingState\",pc.signalingState)\n\n elif isinstance(obj, RTCIceCandidate):\n await pc.addIceCandidate(obj)\n\n elif obj is BYE or signaling.trackEnded:\n debug_print(\"Exiting\")\n break\n else:\n debug_print(\"obj not handled:\",obj)\n except Exception as e:\n noneCnt=noneCnt+1\n debug_print(\"error in run loop\",e)\n if(noneCnt>5):\n break\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"RaceOSSDC\")\n \n parser.add_argument(\"--play-from\", help=\"Read the media from a file and sent it.\"),\n parser.add_argument(\"--record-to\", help=\"Write received media to a file.\"),\n parser.add_argument('-t','--transform', type=str)\n args, unknown = parser.parse_known_args()\n\n transform = None\n if args.transform:\n transform = args.transform\n\n import importlib\n\n module_name = transform.split(\".\")\n if len(module_name) == 2:\n transform = module_name[1]\n module_name = module_name[0]\n else:\n module_name = module_name[0]\n transform = module_name\n\n debug_print(\"We will apply this transform:\",transform, \"from module:\",module_name)\n \n video_processing_module = importlib.import_module(\"video_processing_\"+module_name)\n print('video_processing_module',video_processing_module)\n \n \n model,args1 = video_processing_module.init_model(transform)\n \n # create signaling and peer connection\n args.room = '123456'\n\n videoUrl = None \n# videoUrl = 'https://youtu.be/uuQlMCMT71I' #uncomment to overide with a Youtube video source, set skipFrames to False for Youtube streaming\n \n if videoUrl is not None:\n #install youtube-dl for this to work: pip install youtube-dl\n command = \"youtube-dl -f 'bestvideo[height<1100]' -g '\"+videoUrl+\"'\" \n videoUrl = subprocess.check_output(command, shell = True).decode(\"utf-8\").strip()\n args.play_from = videoUrl\n\n print('videoUrl=',videoUrl)\n\n signaling = RaceOssdcSignaling(args.room)\n \n configuration = RTCConfiguration()\n \n stunServer = RTCIceServer(\"stun:race.ossdc.org:5349\")\n\n configuration.iceServers = []\n configuration.iceServers.append(stunServer)\n\n pc = RTCPeerConnection(configuration=configuration)\n\n\n # create media source\n if args.play_from:\n player = MediaPlayer(args.play_from)\n else:\n player = None\n\n # create media sink\n if args.record_to:\n recorder = MediaRecorder(args.record_to)\n else:\n recorder = MediaBlackhole()\n\n loop = asyncio.get_event_loop()\n\n try:\n loop.run_until_complete(\n run(pc=pc, player=player, recorder=recorder, signaling=signaling,transform=transform, model=model)\n )\n except Exception as e:\n debug_print(e)\n finally:\n loop.close()\n loop.run_until_complete(signaling.close())\n loop.run_until_complete(pc.close())\n\n" ]
[ [ "numpy.average" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
berserkrambo/fcos-pytorch
[ "a064eccf6d45fc85da401151dcefe7a3b01a065b" ]
[ "dataset.py" ]
[ "import os\n\nimport torch\nfrom torchvision import datasets\n\nfrom boxlist import BoxList\n\n\ndef has_only_empty_bbox(annot):\n return all(any(o <= 1 for o in obj['bbox'][2:]) for obj in annot)\n\n\ndef has_valid_annotation(annot):\n if len(annot) == 0:\n return False\n\n if has_only_empty_bbox(annot):\n return False\n\n return True\n\n\nclass COCODataset(datasets.CocoDetection):\n def __init__(self, path, split, transform=None):\n root = os.path.join(path, f'{split}2017')\n annot = os.path.join(path, 'annotations', f'instances_{split}2017.json')\n\n super().__init__(root, annot)\n\n self.ids = sorted(self.ids)\n\n if split == 'train':\n ids = []\n\n for id in self.ids:\n ann_ids = self.coco.getAnnIds(imgIds=id, iscrowd=None)\n annot = self.coco.loadAnns(ann_ids)\n\n if has_valid_annotation(annot):\n ids.append(id)\n\n self.ids = ids\n\n self.category2id = {v: i + 1 for i, v in enumerate(self.coco.getCatIds())}\n self.id2category = {v: k for k, v in self.category2id.items()}\n self.id2img = {k: v for k, v in enumerate(self.ids)}\n\n self.transform = transform\n\n def __getitem__(self, index):\n img, annot = super().__getitem__(index)\n\n annot = [o for o in annot if o['iscrowd'] == 0]\n\n boxes = [o['bbox'] for o in annot]\n boxes = torch.as_tensor(boxes).reshape(-1, 4)\n target = BoxList(boxes, img.size, mode='xywh').convert('xyxy')\n\n classes = [o['category_id'] for o in annot]\n classes = [self.category2id[c] for c in classes]\n classes = torch.tensor(classes)\n target.fields['labels'] = classes\n\n target.clip(remove_empty=True)\n\n if self.transform is not None:\n img, target = self.transform(img, target)\n\n return img, target, index\n\n def get_image_meta(self, index):\n id = self.id2img[index]\n img_data = self.coco.imgs[id]\n\n return img_data\n\n\nclass ImageList:\n def __init__(self, tensors, sizes):\n self.tensors = tensors\n self.sizes = sizes\n\n def to(self, *args, **kwargs):\n tensor = self.tensors.to(*args, **kwargs)\n\n return ImageList(tensor, self.sizes)\n\n\ndef image_list(tensors, size_divisible=0):\n max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))\n\n if size_divisible > 0:\n stride = size_divisible\n max_size = list(max_size)\n max_size[1] = (max_size[1] | (stride - 1)) + 1\n max_size[2] = (max_size[2] | (stride - 1)) + 1\n max_size = tuple(max_size)\n\n shape = (len(tensors),) + max_size\n batch = tensors[0].new(*shape).zero_()\n\n for img, pad_img in zip(tensors, batch):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n\n sizes = [img.shape[-2:] for img in tensors]\n\n return ImageList(batch, sizes)\n\n\ndef collate_fn(config):\n def collate_data(batch):\n batch = list(zip(*batch))\n imgs = image_list(batch[0], config.size_divisible)\n targets = batch[1]\n ids = batch[2]\n\n return imgs, targets, ids\n\n return collate_data\n" ]
[ [ "torch.as_tensor", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
exeex/pytorch-OpCounter
[ "c9b6f6335457aefbfd7e19fd316bc6fa46066135" ]
[ "thop/count_hooks.py" ]
[ "import argparse\n\nimport torch\nimport torch.nn as nn\n\nmultiply_adds = 1\n\n\ndef count_convNd(m, x, y):\n x = x[0]\n cin = m.in_channels\n batch_size = x.size(0)\n\n kernel_ops = m.weight.size()[2:].numel()\n bias_ops = 1 if m.bias is not None else 0\n ops_per_element = kernel_ops + bias_ops\n output_elements = y.nelement()\n\n # cout x oW x oH\n total_ops = batch_size * cin * output_elements * ops_per_element // m.groups\n m.total_ops = torch.Tensor([int(total_ops)])\n\n\ndef count_conv2d(m, x, y):\n x = x[0]\n\n cin = m.in_channels\n cout = m.out_channels\n kh, kw = m.kernel_size\n batch_size = x.size()[0]\n\n out_h = y.size(2)\n out_w = y.size(3)\n\n # ops per output element\n # kernel_mul = kh * kw * cin\n # kernel_add = kh * kw * cin - 1\n kernel_ops = multiply_adds * kh * kw\n bias_ops = 1 if m.bias is not None else 0\n ops_per_element = kernel_ops + bias_ops\n\n # total ops\n # num_out_elements = y.numel()\n output_elements = batch_size * out_w * out_h * cout\n total_ops = output_elements * ops_per_element * cin // m.groups\n\n m.total_ops = torch.Tensor([int(total_ops)])\n\n\ndef count_convtranspose2d(m, x, y):\n x = x[0]\n\n cin = m.in_channels\n cout = m.out_channels\n kh, kw = m.kernel_size\n batch_size = x.size()[0]\n\n out_h = y.size(2)\n out_w = y.size(3)\n\n # ops per output element\n # kernel_mul = kh * kw * cin\n # kernel_add = kh * kw * cin - 1\n kernel_ops = multiply_adds * kh * kw * cin // m.groups\n bias_ops = 1 if m.bias is not None else 0\n ops_per_element = kernel_ops + bias_ops\n\n # total ops\n # num_out_elements = y.numel()\n # output_elements = batch_size * out_w * out_h * cout\n ops_per_element = m.weight.nelement()\n output_elements = y.nelement()\n total_ops = output_elements * ops_per_element\n\n m.total_ops = torch.Tensor([int(total_ops)])\n\n\ndef count_bn(m, x, y):\n x = x[0]\n\n nelements = x.numel()\n # subtract, divide, gamma, beta\n total_ops = 4 * nelements\n\n m.total_ops = torch.Tensor([int(total_ops)])\n\n\ndef count_relu(m, x, y):\n x = x[0]\n\n nelements = x.numel()\n total_ops = nelements\n\n m.total_ops = torch.Tensor([int(total_ops)])\n\n\ndef count_softmax(m, x, y):\n x = x[0]\n\n batch_size, nfeatures = x.size()\n\n total_exp = nfeatures\n total_add = nfeatures - 1\n total_div = nfeatures\n total_ops = batch_size * (total_exp + total_add + total_div)\n\n m.total_ops = torch.Tensor([int(total_ops)])\n\n\ndef count_maxpool(m, x, y):\n kernel_ops = torch.prod(torch.Tensor([m.kernel_size]))\n num_elements = y.numel()\n total_ops = kernel_ops * num_elements\n\n m.total_ops = torch.Tensor([int(total_ops)])\n\n\ndef count_adap_maxpool(m, x, y):\n kernel = torch.Tensor([*(x[0].shape[2:])]) // torch.Tensor(list((m.output_size,))).squeeze()\n kernel_ops = torch.prod(kernel)\n num_elements = y.numel()\n total_ops = kernel_ops * num_elements\n\n m.total_ops = torch.Tensor([int(total_ops)])\n\n\ndef count_avgpool(m, x, y):\n total_add = torch.prod(torch.Tensor([m.kernel_size]))\n total_div = 1\n kernel_ops = total_add + total_div\n num_elements = y.numel()\n total_ops = kernel_ops * num_elements\n\n m.total_ops = torch.Tensor([int(total_ops)])\n\n\ndef count_adap_avgpool(m, x, y):\n kernel = torch.Tensor([*(x[0].shape[2:])]) // torch.Tensor(list((m.output_size,))).squeeze()\n total_add = torch.prod(kernel)\n total_div = 1\n kernel_ops = total_add + total_div\n num_elements = y.numel()\n total_ops = kernel_ops * num_elements\n\n m.total_ops = torch.Tensor([int(total_ops)])\n\n\ndef count_linear(m, x, y):\n # per output element\n total_mul = m.in_features\n total_add = m.in_features - 1\n num_elements = y.numel()\n total_ops = (total_mul + total_add) * num_elements\n\n m.total_ops = torch.Tensor([int(total_ops)])\n" ]
[ [ "torch.prod", "torch.Tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CrackAD/cs207-FinalProject
[ "1a50b9b9d2966029c77ef2f065c3ee62efd6742e" ]
[ "EasyDiff/var.py" ]
[ "import numpy as np\nimport pytest\nclass Var():\n '''\n This class defines a multivariate dual number\n '''\n def __init__(self, val, dual_paras):\n \"\"\" constructor for Var class\n\n INPUT\n =======\n val: value of the input variable\n dual_paras: partial derivatives with respect to each input variable\n \n RETURNS\n =======\n Var object: self.val and self.dir\n\n EXAMPLES\n =======\n >>> a = Var(1, np.array([1]))\n >>> print(a.val, a.der)\n 1 [1]\n \"\"\"\n self.val = val\n self.der = dual_paras\n \n def __add__(self, other):\n \"\"\" returns a Var as the result of self + other\n\n INPUT\n =======\n self: a Var object (object before +)\n other: a Var object or a real number (object after +)\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(2, np.array([1]))\n >>> t = x + 3\n >>> print(t.val, t.der)\n 5 [1]\n \n >>> x = Var(3, np.array([1,0]))\n >>> y = Var(2, np.array([0,1]))\n >>> z = Var(3, np.array([1,0]))\n >>> z1 = x + y\n >>> print('x + y: {}'.format(vars(z1)))\n x + y: {'val': 5, 'der': array([1, 1])}\n >>> z2 = x + 1\n >>> print('x + 1: {}'.format(vars(z2)))\n x + 1: {'val': 4, 'der': array([1, 0])}\n >>> z3 = 1 + x\n >>> print('1 + x: {}'.format(vars(z3)))\n 1 + x: {'val': 4, 'der': array([1, 0])}\n \n \"\"\"\n try: # two Var objects\n value = self.val + other.val\n der = self.der + other.der\n return Var(value, der)\n except AttributeError: # Var + real number\n return Var(self.val + other, self.der)\n \n def __radd__(self, other):\n \"\"\" return a Var as the result of other + self\n\n INPUT\n =======\n self: a Var object (object after +)\n other: a Var object or a real number (object before +)\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(2, np.array([1]))\n >>> t = 3 + x\n >>> print(t.val, t.der)\n 5 [1]\n \"\"\"\n return self + other\n\n def __mul__(self, other):\n \"\"\" returns a Var as the result of self * other\n\n INPUT\n =======\n self: a Var object (object before *)\n other: a Var object or a real number (object after *)\n\n RETURNS\n =======\n Var object: a new Var object with new val and ders\n\n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0]))\n >>> y = Var(2, np.array([0,1]))\n >>> z4 = y*2\n >>> z5 = 2*y\n >>> z6 = -1*y\n >>> z7 = y*(-1)\n >>> z8 = x*y\n >>> print(vars(z4))\n {'val': 4, 'der': array([0, 2])}\n >>> print(vars(z5))\n {'val': 4, 'der': array([0, 2])}\n >>> print(vars(z6))\n {'val': -2, 'der': array([ 0, -1])}\n >>> print(vars(z7)) \n {'val': -2, 'der': array([ 0, -1])}\n >>> print(vars(z8)) \n {'val': 6, 'der': array([2, 3])}\n \"\"\"\n try: # two Var objects\n value = self.val * other.val\n der = self.val*other.der + other.val * self.der # dz / dx1 = dz/dx * dx/dx1 + dz/dy * dy/dx1\n return Var(value, der)\n except AttributeError: # Var * real number\n return Var(self.val*other, self.der * other)\n\n def __rmul__(self, other):\n \"\"\" returns a Var as the result of other * self\n \n INPUT\n =======\n self: a Var object (object after *)\n other: a Var object or a real number (object before *)\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(2, np.array([1]))\n >>> t = 3 * x\n >>> print(t.val, t.der)\n 6 [3]\n \"\"\"\n return self * other\n \n def __sub__(self, other):\n \"\"\" returns a Var as the result of self - other\n \n INPUT\n =======\n self: a Var object (object before -)\n other: a Var object or a real number (object after -)\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0]))\n >>> y = Var(2, np.array([0,1]))\n >>> z1 = x - y\n >>> print('x - y: {}'.format(vars(z1)))\n x - y: {'val': 1, 'der': array([ 1, -1])}\n >>> z2 = x - 2\n >>> print('x - 2: {}'.format(vars(z2)))\n x - 2: {'val': 1, 'der': array([1, 0])}\n >>> z3 = 2 - x\n >>> print('2 - x: {}'.format(vars(z3)))\n 2 - x: {'val': -1, 'der': array([-1, 0])}\n \"\"\"\n try: # two Var objects\n value = self.val - other.val\n der = self.der - other.der\n return Var(value, der)\n except AttributeError: # Var - real number\n return Var(self.val-other, self.der)\n\n def __rsub__(self, other):\n \"\"\" returns a Var as the result of other - self\n \n INPUT\n =======\n self: a Var object (object after -)\n other: a Var object or a real number (object before -)\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(2, np.array([1]))\n >>> t = 3 - x\n >>> print(t.val, t.der)\n 1 [-1]\n \"\"\"\n return -1 *(self-other)\n\n def __pow__(self, other):\n \"\"\" returns a Var as the result of self**(other)\n \n INPUT\n =======\n self: a Var object (object before **)\n other: a Var object or a real number (object after **)\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0]))\n >>> y = Var(2, np.array([0,1]))\n >>> z = Var(3, np.array([1,0]))\n >>> z1 = x**y\n >>> print('x ** y: {}'.format(vars(z1)))\n x ** y: {'val': 9, 'der': array([6. , 9.8875106])}\n >>> z2 = x**2\n >>> print('x ** 2: {}'.format(vars(z2)))\n x ** 2: {'val': 9, 'der': array([6, 0])}\n \"\"\"\n \n try: # two Var objects \n # d(a**c)/dx = d(a**c)/da * (da / dx) + d(a**c)/dc * (dc / dx) \n # = c*(a**(c-1)) * (da / dx) + a**c*ln(a) * (dc / dx) \n value = self.val**other.val\n der = other.val * (self.val ** (other.val - 1)) * self.der + value * np.log(self.val) * other.der\n return Var(value, der)\n except AttributeError: # Var ** real number\n return Var(self.val**other, other * (self.val ** (other-1)) * self.der)\n\n def __rpow__(self, other):\n \"\"\" returns a Var as the result of other**(self)\n\n INPUT\n =======\n self: a Var object (object after **)\n other: a Var object or a real number (object before **)\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0]))\n >>> y = Var(2, np.array([0,1]))\n >>> z = Var(3, np.array([1,0]))\n >>> z1 = x**y\n >>> print('x ** y: {}'.format(vars(z1)))\n x ** y: {'val': 9, 'der': array([6. , 9.8875106])}\n >>> z2 = x**2\n >>> print('x ** 2: {}'.format(vars(z2)))\n x ** 2: {'val': 9, 'der': array([6, 0])}\n >>> z3 = 2**x\n >>> print('2 ** x: {}'.format(vars(z3)))\n 2 ** x: {'val': 8, 'der': array([5.54517744, 0. ])}\n >>> z4 = x**(-1)\n >>> print('x ** (-1): {}'.format(vars(z4)))\n x ** (-1): {'val': 0.3333333333333333, 'der': array([-0.11111111, -0. ])}\n \"\"\"\n \n # the only scenario using this is when other is a real number and self is a Var object\n value = other **self.val\n # d(o ** s)/dx = o**s *log(o)*( ds/dx)\n der = value * np.log(other) * self.der\n return Var(value, der)\n\n def __truediv__(self, other):\n \"\"\" returns a Var as the result of self / other\n\n INPUT\n =======\n self: a Var object (numerator)\n other: a Var object or a real number (denominator)\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n ======= \n >>> x = Var(3, np.array([1,0]))\n >>> y = Var(2, np.array([0,1]))\n >>> p = x * (y * (-1))\n >>> print(p.val, p.der)\n -6 [-2 -3]\n \n \"\"\"\n return self * (other ** (-1))\n \n def __rtruediv__(self, other):\n \"\"\" returns a Var as the result of other / self\n\n INPUT\n =======\n self: a Var object (numerator)\n other: a Var object or a real number (denominator)\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(2, np.array([1,0]))\n >>> y = 2\n >>> v = y*(x**(-1))\n >>> print(v.val, v.der)\n 1.0 [-0.5 -0. ]\n \"\"\"\n return other*(self**(-1))\n \n def __neg__(self):\n \"\"\" returns a Var as the result of - self\n\n INPUT\n =======\n self: a Var object\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(2, np.array([1]))\n >>> p = -x\n >>> print(p.val, p.der)\n -2 [-1]\n \"\"\"\n return (-1)*self\n\n def __pos__(self):\n \"\"\" returns a Var as the result of + self\n\n INPUT\n =======\n self: a Var object\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(2, np.array([1]))\n >>> print(+x.val, +x.der)\n 2 [1]\n \"\"\"\n return Var(self.val, self.der)\n\n def __eq__(self, other):\n \"\"\" returns the result of self == other\n\n INPUT\n =======\n self: a Var object (before ==)\n other: a Var object or something else(after ==)\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0]))\n >>> y = Var(2, np.array([0,1]))\n >>> z = Var(3, np.array([1,0]))\n >>> print(x==y)\n False\n >>> print(x == z)\n True\n \"\"\"\n try:\n return (self.val == other.val) & (list(self.der) == list(other.der))\n except AttributeError:\n return False\n\n def __ne__(self, other):\n \"\"\" returns a the result of self != other\n\n INPUT\n =======\n self: a Var object (before !=)\n other: a Var object or something else(after !=)\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0]))\n >>> y = Var(2, np.array([0,1]))\n >>> z = Var(3, np.array([1,0]))\n >>> print(x!=y)\n True\n >>> print(x != z)\n False\n \"\"\"\n try:\n return (self.val != other.val) | (list(self.der) != list(other.der))\n except AttributeError:\n return True\n\n @staticmethod\n def log(var):\n \"\"\" returns a Var as the result of var.log()\n\n INPUT\n =======\n var: a Var object or real number\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0]))\n >>> z1 = Var.log(x)\n >>> print('log(x): {}'.format(vars(z1)))\n log(x): {'val': 1.0986122886681098, 'der': array([0.33333333, 0. ])}\n \"\"\"\n try:\n val = np.log(var.val)\n der = np.array(list(map(lambda x: x / var.val, var.der)))\n return Var(val, der)\n except AttributeError:\n return np.log(var)\n \n @staticmethod\n def logk(var, k):\n \"\"\" returns a Var as the result of var.logk()\n\n INPUT\n =======\n var: a Var object or a real number\n k: the base for log\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0]))\n >>> z1 = Var.logk(x, 3.0)\n >>> print('logk(x, 3.0): {}'.format(vars(z1)))\n logk(x, 3.0): {'val': 1.0, 'der': array([0.30341308, 0. ])}\n \"\"\"\n try:\n val = np.log(var.val) / np.log(k)\n der = np.array(list(map(lambda x: x / var.val * (1/np.log(k)), var.der)))\n return Var(val, der)\n except AttributeError:\n return np.log(var) / np.log(k)\n \n @staticmethod\n def exp(var):\n \"\"\" returns a Var as the result of var.exp()\n\n INPUT\n =======\n var: a Var object or a real number\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0]))\n >>> z1 = Var.exp(x)\n >>> print('exp(x): {}'.format(vars(z1)))\n exp(x): {'val': 20.085536923187668, 'der': array([20.08553692, 0. ])}\n \"\"\"\n try:\n val = np.exp(var.val)\n der = np.array(list(map(lambda x: x * val, var.der)))\n return Var(val, der)\n except AttributeError:\n return np.exp(var)\n \n @staticmethod\n def expk(var, k):\n \"\"\" returns a Var as the result of k ** (var)\n\n INPUT\n =======\n var: a Var object or a real number\n k: the base of the exponential\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0]))\n >>> z = Var.expk(x, 4)\n >>> z.val == pytest.approx(4**3)\n True\n >>> z.der== pytest.approx([4**3*np.log(4), 0])\n True\n \"\"\"\n try: # var is a Var variable\n val = k ** var.val\n der = val * np.log(k) * var.der\n return Var(val, der)\n except AttributeError: # var is a real number\n return k ** var\n \n @staticmethod\n def logistic(var):\n \"\"\" returns a Var as the result of 1 / (1 + e^(-var))\n\n INPUT\n =======\n var: a Var object or a real number\n \n RETURNS\n =======\n Var object: a new Var object with new value and der\n\n EXAMPLES\n =======\n >>> x = Var(3.0, np.array([1,0]))\n >>> z = Var.logistic(x)\n >>> z.val == pytest.approx(1 / (1 + np.exp(-3)))\n True\n >>> z.der == pytest.approx([np.exp(3) / ((1 + np.exp(3))**2), 0])\n True\n \"\"\"\n try:\n val = 1 / (1 + np.exp(-var.val)) # logistic(x) = 1 / (1 + e^(-x))\n der = val * (1-val) * var.der# dz/x1 = dz/dx * dx/dx1 = (e^x/ ((1 + e^x)**2)) * dx/dx1\n return Var(val, der)\n except AttributeError: # var is a real number\n return 1 / (1 + np.exp(-var))\n\n @staticmethod\n def sqrt(var):\n \"\"\" returns a Var as the result of var.sqrt()\n\n INPUT\n =======\n var: a Var object or a real number\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0])) \n >>> z1 = Var.sqrt(x)\n >>> print('sqrt(x): {}'.format(vars(z1)))\n sqrt(x): {'val': 1.7320508075688772, 'der': array([0.28867513, 0. ])}\n \"\"\"\n try:\n val = np.sqrt(var.val)\n der = np.array(list(map(lambda x: 0.5 * (var.val ** (-0.5)) * x, var.der)))\n return Var(val, der)\n except AttributeError:\n return np.sqrt(var)\n\n @staticmethod\n def sinh(var):\n \"\"\" returns a Var as the result of sinh(var)\n\n INPUT\n =======\n var: a Var object or a real number\n \n RETURNS\n =======\n Var object: a new Var object with new value and children\n\n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0])) \n >>> z = Var.sinh(x)\n >>> z.val == pytest.approx((np.exp(3)-np.exp(-3)) / 2)\n True\n >>> z.der == pytest.approx([(np.exp(3)+np.exp(-3)) / 2, 0])\n True\n \"\"\"\n try:\n val = (np.exp(var.val) - np.exp(-var.val)) / 2 # sinh(x) = (e^x - e^(-x)) / 2\n # df/dx1 = df/dx * dx/dx1 = (e^x + e^(-x)) / 2 * dx/dx1\n der = ((np.exp(var.val) + np.exp(-var.val)) / 2) * var.der\n return Var(val, der)\n except: # var is a real number\n return (np.exp(var) - np.exp(-var)) / 2\n \n @staticmethod\n def cosh(var):\n \"\"\" returns a Var as the result of cosh(var)\n\n INPUT\n =======\n var: a Var object or a real number\n \n RETURNS\n =======\n Var object: a new Var object with new value and children\n\n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0])) \n >>> z = Var.cosh(x)\n >>> z.val == pytest.approx((np.exp(3)+np.exp(-3)) / 2)\n True\n >>> z.der == pytest.approx([(np.exp(3)-np.exp(-3)) / 2, 0])\n True\n \"\"\"\n try:\n val = (np.exp(var.val) + np.exp(-var.val)) / 2 # # cosh(x) = (e^x + e^(-x)) / 2\n # df/dx1 = df/dx * dx/dx1 = (e^x - e^(-x)) / 2 * dx/dx1\n der = ((np.exp(var.val) - np.exp(-var.val)) / 2) * var.der\n return Var(val, der)\n except: # var is a real number\n return (np.exp(var) + np.exp(-var)) / 2\n\n @staticmethod\n def tanh(var):\n \"\"\" returns a Var as the result of tanh(var)\n\n INPUT\n =======\n var: a Var object or a real number\n \n RETURNS\n =======\n Var object: a new Var object with new value and children\n\n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0])) \n >>> z = Var.tanh(x)\n >>> z.val == pytest.approx((np.exp(3)-np.exp(-3)) / (np.exp(3)+np.exp(-3)))\n True\n >>> z.der == pytest.approx([4 / (np.exp(6) + np.exp(-6) + 2), 0])\n True\n \"\"\"\n try:\n return Var.sinh(var) / Var.cosh(var)\n except: # var is a real number\n return (np.exp(var) - np.exp(-var)) / (np.exp(var) + np.exp(-var))\n \n @staticmethod\n def sin(var):\n \"\"\" returns a Var as the result of var.sin()\n\n INPUT\n =======\n var: a Var object or a real number\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0])) \n >>> z1 = Var.sin(x)\n >>> print('sin(x): {}'.format(vars(z1)))\n sin(x): {'val': 0.1411200080598672, 'der': array([-0.9899925, -0. ])}\n \"\"\"\n try:\n val = np.sin(var.val)\n der = np.array(list(map(lambda x: np.cos(var.val) * x, var.der)))\n return Var(val, der)\n except AttributeError:\n return np.sin(var)\n\n @staticmethod\n def cos(var):\n \"\"\" returns a Var as the result of var.cos()\n\n INPUT\n =======\n var: a Var object or a real number\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0])) \n >>> z1 = Var.cos(x)\n >>> print('cos(x): {}'.format(vars(z1)))\n cos(x): {'val': -0.9899924966004454, 'der': array([-0.14112001, -0. ])}\n \"\"\"\n try:\n val = np.cos(var.val)\n der = np.array(list(map(lambda x: -np.sin(var.val) * x, var.der)))\n return Var(val, der)\n except AttributeError:\n return np.cos(var)\n \n\n @staticmethod\n def tan(var):\n \"\"\" returns a Var as the result of var.tan()\n\n INPUT\n =======\n var: a Var object or a real number\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0])) \n >>> z1 = Var.tan(x)\n >>> print('tan(x): {}'.format(vars(z1)))\n tan(x): {'val': -0.1425465430742778, 'der': array([1.02031952, 0. ])}\n \n \"\"\"\n try:\n val = np.tan(var.val)\n der = np.array(list(map(lambda x: 1 / (np.cos(var.val) ** 2) * x, var.der)))\n return Var(val, der)\n except AttributeError:\n return np.tan(var)\n\n @staticmethod\n def arcsin(var):\n \"\"\" returns a Var as the result of var.arcsin()\n\n INPUT\n =======\n var: a Var object or a real number\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(0.5, np.array([1,0])) \n >>> z1 = Var.arcsin(x)\n >>> print('arcsin(x): {}'.format(vars(z1)))\n arcsin(x): {'val': 0.5235987755982988, 'der': array([1.15470054, 0. ])}\n \"\"\"\n try:\n val = np.arcsin(var.val)\n der = np.array(list(map(lambda x: 1 / ((1 - var.val ** 2) ** 0.5) * x, var.der)))\n return Var(val, der)\n except AttributeError:\n return np.arcsin(var)\n\n @staticmethod\n def arccos(var):\n \"\"\" returns a Var as the result of var.cos()\n\n INPUT\n =======\n var: a Var object or a real number\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(0.5, np.array([1,0])) \n >>> z1 = Var.arccos(x)\n >>> print('arccos(x): {}'.format(vars(z1)))\n arccos(x): {'val': 1.0471975511965976, 'der': array([-1.15470054, -0. ])}\n \"\"\"\n try:\n val = np.arccos(var.val)\n der = np.array(list(map(lambda x: -1 / ((1 - var.val ** 2) ** 0.5) * x, var.der)))\n return Var(val, der)\n except AttributeError:\n return np.arccos(var)\n\n @staticmethod\n def arctan(var):\n \"\"\" returns a Var as the result of var.arctan()\n\n INPUT\n =======\n var: a Var object or a real number\n \n RETURNS\n =======\n Var object: a new Var object with new val and ders\n \n EXAMPLES\n =======\n >>> x = Var(3, np.array([1,0])) \n >>> z1 = Var.arctan(x)\n >>> print('arctan(x): {}'.format(vars(z1)))\n arctan(x): {'val': 1.2490457723982544, 'der': array([0.1, 0. ])}\n \"\"\"\n try:\n val = np.arctan(var.val)\n der = np.array(list(map(lambda x: 1 / (1 + var.val ** 2) * x, var.der)))\n return Var(val, der)\n except AttributeError:\n return np.arctan(var)\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod(verbose=True)\n\n # expk\n # x = Var(3, np.array([1,0]))\n # z = Var.expk(x, 4)\n # print(vars(z))\n # x = Var(3, np.array([1,0]))\n # y = Var(2, np.array([0,1]))\n # z = Var(3, np.array([1,0]))\n\n # # eq, ne\n # print(x==y)\n # print(x == z)\n # print(x!=y)\n # print(x != z)\n\n # # neg\n # z1 = -x\n # print('-x: {}'.format(vars(z1)))\n # z2 = -(x**2)\n # print('-x**2: {}'.format(vars(z2)))\n\n # # div\n # z1 = x / y\n # print('x / y: {}'.format(vars(z1)))\n # z2 = x / 2\n # print('x / 2: {}'.format(vars(z2)))\n\n # # pow\n # z1 = x**y\n # print('x ** y: {}'.format(vars(z1)))\n # z2 = x**2\n # print('x ** 2: {}'.format(vars(z2)))\n # z3 = 2**x\n # print('2 ** x: {}'.format(vars(z3)))\n # z4 = x**(-1)\n # print('x ** (-1): {}'.format(vars(z4)))\n\n # # sub\n # z1 = x - y\n # print('x - y: {}'.format(vars(z1)))\n # z2 = x - 2\n # print('x - 2: {}'.format(vars(z2)))\n # z3 = 2 - x\n # print('2 - x: {}'.format(vars(z3)))\n\n\n # # add\n # z1 = x + y\n # print('x + y: {}'.format(vars(z1)))\n # z2 = x + 1\n # print('x + 1: {}'.format(vars(z2)))\n # z3 = 1 + x\n # print('1 + x: {}'.format(vars(z3)))\n\n # # mul\n # z4 = y*2\n # print('y * 2: {}'.format(vars(z4)))\n # z5 = 2*y\n # print('2 * y: {}'.format(vars(z5)))\n # z6 = -1*y\n # print('-1 * y: {}'.format(vars(z6)))\n # z7 = y*(-1)\n # print('y * (-1): {}'.format(vars(z7)))\n # z8 = x*y\n # print('x * y: {}'.format(vars(z8)))\n\n\n # x = Var(3, np.array([1]))\n\n # # log\n # z1 = Var.log(x)\n # print('log(x): {}'.format(vars(z1)))\n\n # # logk\n # z1 = Var.logk(x, 3.0)\n # print('logk(x, 3.0): {}'.format(vars(z1)))\n\n # # exp\n # z1 = Var.exp(x)\n # print('exp(x): {}'.format(vars(z1)))\n \n # # sqrt\n # z1 = Var.sqrt(x)\n # print('sqrt(x): {}'.format(vars(z1)))\n \n # # sin\n # z1 = Var.sin(x)\n # print('sin(x): {}'.format(vars(z1)))\n \n # # cos\n # z1 = Var.cos(x)\n # print('cos(x): {}'.format(vars(z1)))\n \n # # tan\n # z1 = Var.tan(x)\n # print('tan(x): {}'.format(vars(z1)))\n \n\n # x = Var(0.5, np.array([1,0])) \n # z1 = Var.arcsin(x)\n # print('arcsin(x): {}'.format(vars(z1)))\n\n # x = Var(0.5, np.array([1,0])) \n # z1 = Var.arccos(x)\n # print('arccos(x): {}'.format(vars(z1)))\n \n # x = Var(3, np.array([1,0])) \n # z1 = Var.arctan(x)\n # print('arctan(x): {}'.format(vars(z1)))\n " ]
[ [ "numpy.log", "numpy.sqrt", "numpy.arctan", "numpy.arcsin", "numpy.cos", "numpy.arccos", "numpy.sin", "numpy.tan", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fossabot/turicreate
[ "7f07ce795833d0c56c72b3a1fb9339bed6d178d1", "7f07ce795833d0c56c72b3a1fb9339bed6d178d1" ]
[ "src/nnvm/tvm/topi/recipe/conv/test_conv2d_hwcn_map.py", "src/unity/python/turicreate/test/test_graph_analytics.py" ]
[ "\"\"\"Example code to do convolution.\"\"\"\nimport os\nimport numpy as np\nimport scipy.signal\nimport tvm\nfrom tvm.contrib import nvcc\nimport topi\nfrom topi.nn.util import get_const_tuple\n\nTASK = \"conv2d_hwcn_map\"\nUSE_MANUAL_CODE = False\n\[email protected]_func\ndef tvm_callback_cuda_compile(code):\n ptx = nvcc.compile_cuda(code, target=\"ptx\")\n return ptx\n\ndef write_code(code, fname):\n with open(fname, \"w\") as f:\n f.write(code)\n\[email protected]_func\ndef tvm_callback_cuda_postproc(code):\n if not os.path.exists(\"perf\"):\n os.mkdir(\"perf\")\n write_code(code, \"perf/%s_generated.cu\" % TASK)\n if USE_MANUAL_CODE:\n code = open(\"perf/%s_manual.cu\" % TASK).read()\n return code\n\n\ndef test_conv2d_hwcn_map():\n batch = 64\n in_channel = 128\n in_height = 16\n in_width = 16\n num_filter = 128\n kernel = 3\n stride = 2\n padding = 'SAME'\n\n A = tvm.placeholder((in_height, in_width, in_channel, batch), name='A')\n W = tvm.placeholder((kernel, kernel, in_channel, num_filter), name='W')\n B = topi.nn.conv2d_hwcn(A, W, stride, padding)\n C = topi.nn.relu(B)\n s1 = topi.cuda.schedule_conv2d_hwcn([B])\n s2 = topi.cuda.schedule_conv2d_hwcn([C])\n\n a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)\n w_np = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype)\n b_np = topi.testing.conv2d_hwcn_python(a_np, w_np, stride, padding)\n c_np = np.maximum(b_np, 0)\n\n def check_device(device):\n if not tvm.module.enabled(device):\n print(\"Skip because %s is not enabled\" % device)\n return\n ctx = tvm.gpu(0) if device == \"cuda\" else tvm.cl(0)\n a = tvm.nd.array(a_np, ctx)\n w = tvm.nd.array(w_np, ctx)\n b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)\n c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), ctx)\n with tvm.build_config(auto_unroll_max_step=32,\n auto_unroll_min_depth=0,\n unroll_explicit=False):\n func1 = tvm.build(s1, [A, W, B], device)\n func1(a, w, b)\n np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)\n func2 = tvm.build(s2, [A, W, C], device)\n func2(a, w, c)\n np.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5)\n\n for device in ['cuda', 'opencl']:\n check_device(device)\n\n\nif __name__ == \"__main__\":\n test_conv2d_hwcn_map()\n", "# -*- coding: utf-8 -*-\n# Copyright © 2017 Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can\n# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\nfrom __future__ import print_function as _\nfrom __future__ import division as _\nfrom __future__ import absolute_import as _\nfrom . import util\nimport unittest\n\nimport pandas as pd\nfrom pandas.util.testing import assert_frame_equal\n\nimport turicreate as tc\nfrom turicreate.connect.main import get_unity\nfrom turicreate.toolkits._main import ToolkitError\nfrom turicreate.data_structures.sgraph import SGraph\nfrom turicreate.data_structures.sframe import SFrame\n\nimport sys\nif sys.version_info.major == 3:\n unittest.TestCase.assertItemsEqual = unittest.TestCase.assertCountEqual\n\ndataset_server = \"http://testdatasets.s3-website-us-west-2.amazonaws.com/\"\n\nclass GraphAnalyticsTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n url = dataset_server + \"p2p-Gnutella04.txt.gz\"\n cls.graph = tc.load_graph(url, format='snap')\n\n def __test_model_save_load_helper__(self, model):\n with util.TempDirectory() as f:\n model.save(f)\n m2 = tc.load_model(f)\n self.assertItemsEqual(model._list_fields(), m2._list_fields())\n for key in model._list_fields():\n if type(model._get(key)) is SGraph:\n self.assertItemsEqual(model._get(key).summary(), m2._get(key).summary())\n self.assertItemsEqual(model._get(key).get_fields(), m2._get(key).get_fields())\n elif type(model._get(key)) is SFrame:\n sf1 = model._get(key)\n sf2 = m2._get(key)\n self.assertEqual(len(sf1), len(sf2))\n self.assertItemsEqual(sf1.column_names(), sf2.column_names())\n df1 = sf1.to_dataframe()\n print(df1)\n df2 = sf2.to_dataframe()\n print(df2)\n df1 = df1.set_index(df1.columns[0])\n df2 = df2.set_index(df2.columns[0])\n assert_frame_equal(df1, df2)\n else:\n if (type(model._get(key)) is pd.DataFrame):\n assert_frame_equal(model._get(key), m2._get(key))\n else:\n self.assertEqual(model._get(key), m2._get(key))\n\n def test_degree_count(self):\n if \"degree_count\" in get_unity().list_toolkit_functions():\n m = tc.degree_counting.create(self.graph)\n m.summary()\n self.__test_model_save_load_helper__(m)\n\n g = m.graph\n expected_out_deg = g.edges.groupby('__src_id', {'expected': tc.aggregate.COUNT})\n expected_out_deg = expected_out_deg.join(g.vertices[['__id']], on={'__src_id': \"__id\"}, how=\"right\").fillna(\"expected\", 0)\n expected_out_deg = expected_out_deg.sort(\"__src_id\")['expected']\n expected_in_deg = g.edges.groupby('__dst_id', {'expected': tc.aggregate.COUNT})\n expected_in_deg = expected_in_deg.join(g.vertices[['__id']], on={'__dst_id': \"__id\"}, how=\"right\").fillna(\"expected\", 0)\n expected_in_deg = expected_in_deg.sort(\"__dst_id\")['expected']\n\n sf = g.vertices.sort('__id')\n actual_out_deg = sf['out_degree']\n actual_in_deg = sf['in_degree']\n actual_all_deg = sf['total_degree']\n self.assertEqual((expected_in_deg - actual_in_deg).sum(), 0)\n self.assertEqual((expected_out_deg - actual_out_deg).sum(), 0)\n self.assertEqual((actual_all_deg - (actual_out_deg + actual_in_deg)).sum(), 0)\n\n def test_label_propagation(self):\n if \"label_propagation\" in get_unity().list_toolkit_functions():\n g = self.graph.copy()\n num_vertices = len(g.vertices)\n num_classes = 2\n\n def get_label(vid):\n if vid < 100:\n return 0\n elif vid > num_vertices - 100:\n return 1\n else:\n return None\n g.vertices['label'] = g.vertices['__id'].apply(get_label, int)\n m = tc.label_propagation.create(g, label_field='label')\n\n m.summary()\n self.__test_model_save_load_helper__(m)\n\n for row in m.graph.vertices:\n predicted_label = row['predicted_label']\n if predicted_label is None:\n for k in ['P%d' % i for i in range(num_classes)]:\n self.assertAlmostEqual(row[k], 1.0 / num_classes)\n else:\n sum_of_prob = 0.0\n for k in ['P%d' % i for i in range(num_classes)]:\n sum_of_prob += row[k]\n self.assertGreaterEqual(row['P%d' % predicted_label], row[k])\n self.assertAlmostEqual(sum_of_prob, 1.0)\n\n # Add more options: weighted edges, change self weight, and undirected edges\n def get_edge_weight(vid):\n return float(vid) * 10 / num_vertices\n g.edges['weight'] = g.edges['__src_id'].apply(get_edge_weight, float)\n m = tc.label_propagation.create(g, label_field='label', threshold=1e-2,\n weight_field='weight', self_weight=0.5,\n undirected=True)\n\n # Test early termination using max_iteration\n max_iter = 3\n m = tc.label_propagation.create(g, label_field='label', threshold=1e-10, max_iterations=max_iter)\n self.assertEqual(m.num_iterations, max_iter)\n\n # Test that the predict class should be None if all class probabilities are equal\n g = g.add_vertices(tc.SFrame({'__id': [-1]}))\n m = tc.label_propagation.create(g, label_field='label', threshold=1e-10, max_iterations=max_iter)\n result = m.graph.vertices\n self.assertEquals(result[result['__id'] == -1]['predicted_label'][0], None)\n\n def test_pagerank(self):\n if \"pagerank\" in get_unity().list_toolkit_functions():\n m = tc.pagerank.create(self.graph)\n print(m)\n m.summary()\n self.assertEqual((m.pagerank.num_rows(), m.pagerank.num_columns()),\n (self.graph.summary()['num_vertices'], 3))\n self.assertEqual(int(m.pagerank['pagerank'].sum()), 2727)\n self.__test_model_save_load_helper__(m)\n\n m2 = tc.pagerank.create(self.graph, reset_probability=0.5)\n print(m2)\n self.assertEqual((m2.pagerank.num_rows(), m2.pagerank.num_columns()),\n (self.graph.summary()['num_vertices'], 3))\n self.assertAlmostEqual(m2.pagerank['pagerank'].sum(), 7087.08, delta=1e-2)\n with self.assertRaises(Exception):\n assert_frame_equal(m.pagerank.topk('pagerank'), m2.pagerank.topk('pagerank'))\n self.__test_model_save_load_helper__(m2)\n\n def test_triangle_counting(self):\n if \"triangle_counting\" in get_unity().list_toolkit_functions():\n m = tc.triangle_counting.create(self.graph)\n print(m)\n m.summary()\n self.__test_model_save_load_helper__(m)\n self.assertEqual(m.num_triangles, 934)\n\n def test_connected_component(self):\n if \"connected_component\" in get_unity().list_toolkit_functions():\n m = tc.connected_components.create(self.graph)\n print(m)\n m.summary()\n print(m.component_id)\n print(m.component_size)\n self.assertEqual(m.component_size.num_rows(), 1)\n self.__test_model_save_load_helper__(m)\n\n def test_graph_coloring(self):\n if \"graph_coloring\" in get_unity().list_toolkit_functions():\n m = tc.graph_coloring.create(self.graph)\n print(m)\n m.summary()\n # coloring is non-deterministic, so we cannot verify the result here\n self.__test_model_save_load_helper__(m)\n\n def test_kcore(self):\n if \"kcore\" in get_unity().list_toolkit_functions():\n m = tc.kcore.create(self.graph)\n print(m)\n m.summary()\n biggest_core = m.core_id.groupby('core_id', tc.aggregate.COUNT).topk('Count').head(1)\n self.assertEqual(biggest_core['core_id'][0], 6)\n self.assertEqual(biggest_core['Count'][0], 4492)\n self.__test_model_save_load_helper__(m)\n\n def test_shortest_path(self):\n if \"sssp\" in get_unity().list_toolkit_functions():\n m = tc.shortest_path.create(self.graph, source_vid=0)\n print(m)\n m.summary()\n self.__test_model_save_load_helper__(m)\n\n m2 = tc.shortest_path.create(self.graph, source_vid=0)\n print(m2)\n self.__test_model_save_load_helper__(m2)\n\n # Test get_path function on a simple chain graph and star graph\n chain_graph = tc.SGraph().add_edges([tc.Edge(i, i + 1) for i in range(10)])\n m3 = tc.shortest_path.create(chain_graph, source_vid=0)\n for i in range(10):\n self.assertSequenceEqual(m3.get_path(i), [(j, float(j)) for j in range(i + 1)])\n\n star_graph = tc.SGraph().add_edges([tc.Edge(0, i + 1) for i in range(10)])\n m4 = tc.shortest_path.create(star_graph, source_vid=0)\n for i in range(1, 11):\n self.assertSequenceEqual(m4.get_path(i), [(0, 0.0), (i, 1.0)])\n\n # Test that get_path with the 'show' parameter set to True doesn't\n # break.\n #\n # Showing is problematic when there is actually a browser.\n # This will pause scripts.\n # m4.get_path(i, show=True)\n\n # Test sssp ignoring the existing distance field\n star_graph.vertices['distance'] = 0\n m5 = tc.shortest_path.create(star_graph, source_vid=0)\n for i in range(1, 11):\n self.assertSequenceEqual(m5.get_path(i), [(0, 0.0), (i, 1.0)])\n\n def test_compute_shortest_path(self):\n edge_src_ids = ['src1', 'src2', 'a', 'b', 'c' ]\n edge_dst_ids = [ 'a', 'b', 'dst', 'c', 'dst']\n edges = tc.SFrame({'__src_id': edge_src_ids, '__dst_id': edge_dst_ids})\n g=tc.SGraph().add_edges(edges)\n res = list(tc.shortest_path._compute_shortest_path(g, [\"src1\",\"src2\"], \"dst\"))\n self.assertEquals(res, [[\"src1\", \"a\", \"dst\"]])\n res = list(tc.shortest_path._compute_shortest_path(g, \"src2\", \"dst\"))\n self.assertEquals(res, [[\"src2\", \"b\", \"c\", \"dst\"]])\n\n edge_src_ids = [0,1,2,3,4]\n edge_dst_ids = [2,3,5,4,5]\n edge_weights = [1,0.1,1,0.1,0.1]\n g=tc.SFrame({'__src_id':edge_src_ids,'__dst_id':edge_dst_ids, 'weights':edge_weights})\n g=tc.SGraph(edges=g)\n t=tc.shortest_path._compute_shortest_path(g,[0,1],[5],\"weights\")\n self.assertEquals(t.astype(list)[0], [1,3,4,5])\n" ]
[ [ "numpy.maximum" ], [ "pandas.util.testing.assert_frame_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
MarsZhaoYT/SAR2Opt-Heterogeneous-Dataset
[ "af9428453028456d834d3268360d23d2652baf13", "af9428453028456d834d3268360d23d2652baf13", "af9428453028456d834d3268360d23d2652baf13" ]
[ "CUT/util/visualizer.py", "CUT/metric/psnr_ssim.py", "ASGIT/models/attn_cycle_gan_model.py" ]
[ "import numpy as np\nimport os\nimport sys\nimport ntpath\nimport time\nfrom . import util, html\nfrom subprocess import Popen, PIPE\n# try:\n#from func_timeout import func_timeout, FunctionTimedOut\n# except ImportError:\n# print(\"module func_timeout was not installed. Please install func_timeout using pip install func-timeout.\")\n\n\nif sys.version_info[0] == 2:\n VisdomExceptionBase = Exception\nelse:\n VisdomExceptionBase = ConnectionError\n\n\ndef save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):\n \"\"\"Save images to the disk.\n\n Parameters:\n webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)\n visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs\n image_path (str) -- the string is used to create image paths\n aspect_ratio (float) -- the aspect ratio of saved images\n width (int) -- the images will be resized to width x width\n\n This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.\n \"\"\"\n image_dir = webpage.get_image_dir()\n short_path = ntpath.basename(image_path[0])\n name = os.path.splitext(short_path)[0]\n\n webpage.add_header(name)\n ims, txts, links = [], [], []\n\n for label, im_data in visuals.items():\n im = util.tensor2im(im_data)\n #image_name = '%s_%s.png' % (name, label)\n image_name = '%s/%s.png' % (label, name)\n os.makedirs(os.path.join(image_dir, label), exist_ok=True)\n save_path = os.path.join(image_dir, image_name)\n util.save_image(im, save_path, aspect_ratio=aspect_ratio)\n ims.append(image_name)\n txts.append(label)\n links.append(image_name)\n webpage.add_images(ims, txts, links, width=width)\n\n\nclass Visualizer():\n \"\"\"This class includes several functions that can display/save images and print/save logging information.\n\n It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.\n \"\"\"\n\n def __init__(self, opt):\n \"\"\"Initialize the Visualizer class\n\n Parameters:\n opt -- stores all the experiment flags; needs to be a subclass of BaseOptions\n Step 1: Cache the training/test options\n Step 2: connect to a visdom server\n Step 3: create an HTML object for saveing HTML filters\n Step 4: create a logging file to store training losses\n \"\"\"\n self.opt = opt # cache the option\n if opt.display_id is None:\n self.display_id = np.random.randint(100000) * 10 # just a random display id\n else:\n self.display_id = opt.display_id\n self.use_html = opt.isTrain and not opt.no_html\n self.win_size = opt.display_winsize\n self.name = opt.name\n self.port = opt.display_port\n self.saved = False\n if self.display_id > 0: # connect to a visdom server given <display_port> and <display_server>\n import visdom\n self.plot_data = {}\n self.ncols = opt.display_ncols\n if \"tensorboard_base_url\" not in os.environ:\n self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)\n else:\n self.vis = visdom.Visdom(port=2004,\n base_url=os.environ['tensorboard_base_url'] + '/visdom')\n if not self.vis.check_connection():\n self.create_visdom_connections()\n\n if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/\n self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')\n self.img_dir = os.path.join(self.web_dir, 'images')\n print('create web directory %s...' % self.web_dir)\n util.mkdirs([self.web_dir, self.img_dir])\n # create a logging file to store training losses\n self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')\n with open(self.log_name, \"a\") as log_file:\n now = time.strftime(\"%c\")\n log_file.write('================ Training Loss (%s) ================\\n' % now)\n\n def reset(self):\n \"\"\"Reset the self.saved status\"\"\"\n self.saved = False\n\n def create_visdom_connections(self):\n \"\"\"If the program could not connect to Visdom server, this function will start a new server at port < self.port > \"\"\"\n cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port\n print('\\n\\nCould not connect to Visdom server. \\n Trying to start a server....')\n print('Command: %s' % cmd)\n Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)\n\n def display_current_results(self, visuals, epoch, save_result):\n \"\"\"Display current results on visdom; save current results to an HTML file.\n\n Parameters:\n visuals (OrderedDict) - - dictionary of images to display or save\n epoch (int) - - the current epoch\n save_result (bool) - - if save the current results to an HTML file\n \"\"\"\n if self.display_id > 0: # show images in the browser using visdom\n ncols = self.ncols\n if ncols > 0: # show all the images in one visdom panel\n ncols = min(ncols, len(visuals))\n h, w = next(iter(visuals.values())).shape[:2]\n table_css = \"\"\"<style>\n table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}\n table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}\n </style>\"\"\" % (w, h) # create a table css\n # create a table of images.\n title = self.name\n label_html = ''\n label_html_row = ''\n images = []\n idx = 0\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n label_html_row += '<td>%s</td>' % label\n images.append(image_numpy.transpose([2, 0, 1]))\n idx += 1\n if idx % ncols == 0:\n label_html += '<tr>%s</tr>' % label_html_row\n label_html_row = ''\n white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255\n while idx % ncols != 0:\n images.append(white_image)\n label_html_row += '<td></td>'\n idx += 1\n if label_html_row != '':\n label_html += '<tr>%s</tr>' % label_html_row\n try:\n self.vis.images(images, ncols, 2, self.display_id + 1,\n None, dict(title=title + ' images'))\n label_html = '<table>%s</table>' % label_html\n self.vis.text(table_css + label_html, win=self.display_id + 2,\n opts=dict(title=title + ' labels'))\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n else: # show each image in a separate visdom panel;\n idx = 1\n try:\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n self.vis.image(\n image_numpy.transpose([2, 0, 1]),\n self.display_id + idx,\n None,\n dict(title=label)\n )\n idx += 1\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.\n self.saved = True\n # save images to the disk\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))\n util.save_image(image_numpy, img_path)\n\n # update website\n webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=0)\n for n in range(epoch, 0, -1):\n webpage.add_header('epoch [%d]' % n)\n ims, txts, links = [], [], []\n\n for label, image_numpy in visuals.items():\n image_numpy = util.tensor2im(image)\n img_path = 'epoch%.3d_%s.png' % (n, label)\n ims.append(img_path)\n txts.append(label)\n links.append(img_path)\n webpage.add_images(ims, txts, links, width=self.win_size)\n webpage.save()\n\n def plot_current_losses(self, epoch, counter_ratio, losses):\n \"\"\"display the current losses on visdom display: dictionary of error labels and values\n\n Parameters:\n epoch (int) -- current epoch\n counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n \"\"\"\n if len(losses) == 0:\n return\n\n plot_name = '_'.join(list(losses.keys()))\n\n if plot_name not in self.plot_data:\n self.plot_data[plot_name] = {'X': [], 'Y': [], 'legend': list(losses.keys())}\n\n plot_data = self.plot_data[plot_name]\n plot_id = list(self.plot_data.keys()).index(plot_name)\n\n plot_data['X'].append(epoch + counter_ratio)\n plot_data['Y'].append([losses[k] for k in plot_data['legend']])\n try:\n self.vis.line(\n X=np.stack([np.array(plot_data['X'])] * len(plot_data['legend']), 1),\n Y=np.array(plot_data['Y']),\n opts={\n 'title': self.name,\n 'legend': plot_data['legend'],\n 'xlabel': 'epoch',\n 'ylabel': 'loss'},\n win=self.display_id - plot_id)\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n # losses: same format as |losses| of plot_current_losses\n def print_current_losses(self, epoch, iters, losses, t_comp, t_data):\n \"\"\"print current losses on console; also save the losses to the disk\n\n Parameters:\n epoch (int) -- current epoch\n iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n t_comp (float) -- computational time per data point (normalized by batch_size)\n t_data (float) -- data loading time per data point (normalized by batch_size)\n \"\"\"\n message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)\n for k, v in losses.items():\n message += '%s: %.3f ' % (k, v)\n\n print(message) # print the message\n with open(self.log_name, \"a\") as log_file:\n log_file.write('%s\\n' % message) # save the message\n", "\"\"\"\n# > Script for measuring quantitative performances in terms of\n# - Structural Similarity Metric (SSIM)\n# - Peak Signal to Noise Ratio (PSNR)\n# > Maintainer: https://github.com/xahidbuffon\n\"\"\"\n## python libs\nimport numpy as np\nfrom PIL import Image\nfrom glob import glob\nfrom os.path import join\nfrom ntpath import basename\n## local libs\nfrom imqual_utils import getSSIM, getPSNR\nimport cv2\n\n\n## compares avg ssim and psnr\ndef SSIMs_PSNRs(gtr_dir, gen_dir, im_res=(256, 256)):\n \"\"\"\n - gtr_dir contain ground-truths\n - gen_dir contain generated images\n \"\"\"\n gtr_paths = sorted(glob(join(gtr_dir, \"*.*\")))\n gen_paths = sorted(glob(join(gen_dir, \"*.*\")))\n ssims, psnrs = [], []\n for gtr_path, gen_path in zip(gtr_paths, gen_paths):\n gtr_f = basename(gtr_path).split('.')[0]\n gen_f = basename(gen_path).split('.')[0]\n \n # read images from two datasets\n r_im = Image.open(gtr_path).resize(im_res)\n g_im = Image.open(gen_path).resize(im_res)\n\n # get ssim on RGB channels\n ssim = getSSIM(np.array(r_im), np.array(g_im))\n ssims.append(ssim)\n # get psnt on L channel (SOTA norm)\n r_im = r_im.convert(\"L\"); g_im = g_im.convert(\"L\")\n psnr = getPSNR(np.array(r_im), np.array(g_im))\n psnrs.append(psnr)\n return np.array(ssims), np.array(psnrs)\n\n\n\"\"\"\nGet datasets from\n - http://irvlab.cs.umn.edu/resources/euvp-dataset\n - http://irvlab.cs.umn.edu/resources/ufo-120-dataset\n\"\"\"\ngtr_dir = \"Image_translation_codes/contrastive-unpaired-translation/results/sar2opt_FastCUT/test_latest/images/real_B\"\n\n## generated im paths\ngen_dir = \"Image_translation_codes/contrastive-unpaired-translation/results/sar2opt_FastCUT/test_latest/images/fake_B\"\n\n\n### compute SSIM and PSNR\nSSIM_measures, PSNR_measures = SSIMs_PSNRs(gtr_dir, gen_dir)\nprint (\"SSIM on {0} samples\".format(len(SSIM_measures)))\nprint (\"Mean: {0} std: {1}\".format(np.mean(SSIM_measures), np.std(SSIM_measures)))\n\nprint (\"PSNR on {0} samples\".format(len(PSNR_measures)))\nprint (\"Mean: {0} std: {1}\".format(np.mean(PSNR_measures), np.std(PSNR_measures)))\n\n\n", "import torch\nimport itertools\nfrom util.image_pool import ImagePool\nfrom .base_model import BaseModel\nfrom data import aux_dataset\nfrom . import networks\n\n\nclass AttnCycleGANModel(BaseModel):\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout\n parser.add_argument('--mask_size', type=int, default=256)\n parser.add_argument('--s1', type=int, default=32)\n parser.add_argument('--s2', type=int, default=16)\n parser.add_argument('--concat', type=str, default='rmult')\n if is_train:\n parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')\n parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')\n parser.add_argument('--lambda_identity', type=float, default=0.0, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')\n\n return parser\n\n def __init__(self, opt):\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>\n self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']\n # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>\n visual_names_A = ['real_A', 'fake_B', 'rec_A']\n visual_names_B = ['real_B', 'fake_A', 'rec_B']\n if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)\n visual_names_A.append('idt_B')\n visual_names_B.append('idt_A')\n\n self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B\n # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.\n if self.isTrain:\n self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']\n else: # during test time, only load Gs\n self.model_names = ['G_A', 'G_B']\n\n # define networks (both Generators and discriminators)\n # The naming is different from those used in the paper.\n # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)\n\n if opt.concat != 'alpha':\n self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n else:\n self.netG_A = networks.define_G(4, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n self.netG_B = networks.define_G(4, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n # 修改了此处的(3000, 3000) -> (opt.data_length, opt.data_length)\n self.aux_data = aux_dataset.AuxAttnDataset(opt.data_length, opt.data_length, self.gpu_ids[0], mask_size=opt.mask_size)\n self.zero_attn_holder = torch.zeros((1, 1, opt.mask_size, opt.mask_size), dtype=torch.float32).to(self.device)\n self.ones_attn_holder = torch.ones((1, 1, opt.mask_size, opt.mask_size), dtype=torch.float32).to(self.device)\n self.concat = opt.concat\n\n if self.isTrain: # define discriminators\n self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids,\n opt.mask_size, opt.s1, opt.s2)\n self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids,\n opt.mask_size, opt.s1, opt.s2)\n\n if self.isTrain:\n if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels\n # assert(opt.input_nc == opt.output_nc)\n pass\n self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images\n self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images\n # define loss functions\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.\n self.criterionCycle = torch.nn.L1Loss()\n self.criterionIdt = torch.nn.L1Loss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): include the data itself and its metadata information.\n\n The option 'direction' can be used to swap domain A and domain B.\n \"\"\"\n\n AtoB = self.opt.direction == 'AtoB'\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\n self.real_B = input['B' if AtoB else 'A'].to(self.device)\n self.attn_A_index = input['ADX' if AtoB else'BDX']\n self.attn_B_index = input['BDX' if AtoB else'ADX']\n\n\n if AtoB:\n self.attn_A, self.attn_B = self.aux_data.get_attn_map(self.attn_A_index, self.attn_B_index)\n else:\n self.attn_B, self.attn_A = self.aux_data.get_attn_map(self.attn_A_index, self.attn_B_index)\n\n self.image_paths = input['A_paths' if AtoB else 'B_paths']\n\n def forward(self):\n \"\"\"Run forward pass; called by both functions <optimize_parameters> and <test>.\"\"\"\n if self.concat == 'alpha':\n self.fake_B = self.netG_A(torch.cat((self.real_A, self.attn_A), 1))\n self.rec_A = self.netG_B(torch.cat((self.fake_B, self.ones_attn_holder), 1))\n self.fake_A = self.netG_B(torch.cat((self.real_B, self.attn_B), 1))\n self.rec_B = self.netG_A(torch.cat((self.fake_A, self.ones_attn_holder), 1))\n elif self.concat == 'mult':\n self.fake_B = self.netG_A(self.real_A * self.attn_A)\n self.rec_A = self.netG_B(self.fake_B)\n self.fake_A = self.netG_B(self.real_B * self.attn_B)\n self.rec_B = self.netG_A(self.fake_A)\n elif self.concat == 'rmult':\n # print('real_A: ', self.real_A.size()) # [1, 3, 256, 256]\n # print('attn_A: ', self.attn_A.size()) # [1, 1, 128, 128]\n self.fake_B = self.netG_A(self.real_A * (1. + self.attn_A))\n self.rec_A = self.netG_B(self.fake_B)\n self.fake_A = self.netG_B(self.real_B * (1. + self.attn_B))\n self.rec_B = self.netG_A(self.fake_A)\n elif self.concat == 'none':\n self.fake_B = self.netG_A(self.real_A) # G_A(A)\n self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))\n self.fake_A = self.netG_B(self.real_B) # G_B(B)\n self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))\n else:\n raise NotImplementedError('Unsupported concatenation operation')\n\n def forward_test(self):\n \"\"\"Run forward pass; called by both functions <optimize_parameters> and <test>.\"\"\"\n if self.concat == 'alpha':\n self.fake_B = self.netG_A(torch.cat((self.real_A, self.attn_A), 1))\n self.rec_A = self.netG_B(torch.cat((self.fake_B, self.ones_attn_holder), 1))\n self.fake_A = self.netG_B(torch.cat((self.real_B, self.attn_B), 1))\n self.rec_B = self.netG_A(torch.cat((self.fake_A, self.ones_attn_holder), 1))\n elif self.concat == 'mult':\n self.fake_B = self.netG_A(self.real_A * self.attn_A)\n self.rec_A = self.netG_B(self.fake_B)\n self.fake_A = self.netG_B(self.real_B * self.attn_B)\n self.rec_B = self.netG_A(self.fake_A)\n elif self.concat == 'rmult':\n self.fake_B = self.netG_A(self.real_A * 1.5)\n self.rec_A = self.netG_B(self.fake_B)\n self.fake_A = self.netG_B(self.real_B * 1.5)\n self.rec_B = self.netG_A(self.fake_A)\n elif self.concat == 'none':\n self.fake_B = self.netG_A(self.real_A) # G_A(A)\n self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))\n self.fake_A = self.netG_B(self.real_B) # G_B(B)\n self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))\n else:\n raise NotImplementedError('Unsupported concatenation operation')\n\n def backward_D_basic(self, netD, real, fake):\n \"\"\"Calculate GAN loss for the discriminator\n\n Parameters:\n netD (network) -- the discriminator D\n real (tensor array) -- real images\n fake (tensor array) -- images generated by a generator\n\n Return the discriminator loss.\n We also call loss_D.backward() to calculate the gradients.\n \"\"\"\n # Real\n pred_real, _ = netD(real)\n loss_D_real = self.criterionGAN(pred_real, True)\n # Fake\n pred_fake, _ = netD(fake.detach())\n loss_D_fake = self.criterionGAN(pred_fake, False)\n # Combined loss and calculate gradients\n loss_D = (loss_D_real + loss_D_fake) * 0.5\n loss_D.backward()\n return loss_D\n\n def backward_D_A(self):\n \"\"\"Calculate GAN loss for discriminator D_A\"\"\"\n fake_B = self.fake_B_pool.query(self.fake_B)\n self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)\n\n def backward_D_B(self):\n \"\"\"Calculate GAN loss for discriminator D_B\"\"\"\n fake_A = self.fake_A_pool.query(self.fake_A)\n self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)\n\n def backward_G(self):\n \"\"\"Calculate the loss for generators G_A and G_B\"\"\"\n lambda_idt = self.opt.lambda_identity\n lambda_A = self.opt.lambda_A\n lambda_B = self.opt.lambda_B\n # Identity loss\n if lambda_idt > 0:\n if self.concat == 'alpha':\n self.idt_A = self.netG_A(torch.cat((self.real_B, self.ones_attn_holder), 1))\n self.idt_B = self.netG_B(torch.cat((self.real_A, self.ones_attn_holder), 1))\n else:\n self.idt_A = self.netG_A(self.real_B)\n self.idt_B = self.netG_B(self.real_A)\n\n self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt\n self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt\n else:\n self.loss_idt_A = 0.\n self.loss_idt_B = 0.\n\n dis_A_res, self.tmp_attn_A = self.netD_A(self.fake_B)\n # GAN loss D_A(G_A(A))\n self.loss_G_A = self.criterionGAN(dis_A_res, True)\n dis_B_res, self.tmp_attn_B = self.netD_B(self.fake_A)\n # GAN loss D_B(G_B(B))\n self.loss_G_B = self.criterionGAN(dis_B_res, True)\n\n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B\n self.loss_G.backward()\n\n def optimize_parameters(self):\n \"\"\"Calculate losses, gradients, and update network weights; called in every training iteration\"\"\"\n # forward\n self.forward() # compute fake images and reconstruction images.\n # G_A and G_B\n self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs\n self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero\n self.backward_G() # calculate gradients for G_A and G_B\n self.optimizer_G.step() # update G_A and G_B's weights\n # D_A and D_B\n self.set_requires_grad([self.netD_A, self.netD_B], True)\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_D_A() # calculate gradients for D_A\n self.backward_D_B() # calculate graidents for D_B\n self.optimizer_D.step() # update D_A and D_B's weights\n self.aux_data.update_attn_map(self.attn_A_index, self.tmp_attn_A.detach_(), True)\n self.aux_data.update_attn_map(self.attn_B_index, self.tmp_attn_B.detach_(), False)\n\n def test(self):\n \"\"\"Forward function used in test time.\n\n This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop\n It also calls <compute_visuals> to produce additional visualization results\n \"\"\"\n with torch.no_grad():\n self.forward_test()\n self.compute_visuals()\n" ]
[ [ "numpy.array", "numpy.random.randint" ], [ "numpy.std", "numpy.array", "numpy.mean" ], [ "torch.ones", "torch.cat", "torch.zeros", "torch.no_grad", "torch.nn.L1Loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
glhr/gammatone
[ "14fdcd37c0c3054e5c85ed8c53f2cdec6e5d2b99" ]
[ "tests/test_specgram.py" ]
[ "#!/usr/bin/env python3\n# Copyright 2014 Jason Heeris, [email protected]\n#\n# This file is part of the gammatone toolkit, and is licensed under the 3-clause\n# BSD license: https://github.com/detly/gammatone/blob/master/COPYING\nfrom mock import patch\nimport nose\nimport numpy as np\nimport scipy.io\nfrom pkg_resources import resource_stream\n\nimport gammatone.fftweight\n\nREF_DATA_FILENAME = 'data/test_specgram_data.mat'\n\nINPUT_KEY = 'specgram_inputs'\nMOCK_KEY = 'specgram_mocks'\nRESULT_KEY = 'specgram_results'\n\nINPUT_COLS = ('name', 'wave', 'nfft', 'fs', 'nwin', 'nhop')\nMOCK_COLS = ('window',)\nRESULT_COLS = ('res',)\n\n\ndef load_reference_data():\n \"\"\" Load test data generated from the reference code \"\"\"\n # Load test data\n with resource_stream(__name__, REF_DATA_FILENAME) as test_data:\n data = scipy.io.loadmat(test_data, squeeze_me=False)\n\n zipped_data = zip(data[INPUT_KEY], data[MOCK_KEY], data[RESULT_KEY])\n for inputs, mocks, refs in zipped_data:\n input_dict = dict(zip(INPUT_COLS, inputs))\n mock_dict = dict(zip(MOCK_COLS, mocks))\n ref_dict = dict(zip(RESULT_COLS, refs))\n\n yield (input_dict, mock_dict, ref_dict)\n\n\ndef test_specgram():\n for inputs, mocks, refs in load_reference_data():\n args = (\n inputs['nfft'],\n inputs['fs'],\n inputs['nwin'],\n inputs['nhop'],\n )\n\n yield SpecgramTester(\n inputs['name'][0],\n args,\n inputs['wave'],\n mocks['window'],\n refs['res']\n )\n\nclass SpecgramTester:\n \"\"\" Testing class for specgram replacement calculation \"\"\"\n\n def __init__(self, name, args, sig, window, expected):\n self.signal = np.asarray(sig).squeeze()\n self.expected = np.asarray(expected).squeeze()\n self.args = [int(a.squeeze()) for a in args]\n self.window = window.squeeze()\n self.description = \"Specgram for {:s}\".format(name)\n\n\n def __call__(self):\n with patch(\n 'gammatone.fftweight.specgram_window',\n return_value=self.window):\n result = gammatone.fftweight.specgram(self.signal, *self.args)\n\n max_diff = np.max(np.abs(result - self.expected))\n diagnostic = \"Maximum difference: {:6e}\".format(max_diff)\n\n assert np.allclose(result, self.expected, rtol=1e-6, atol=1e-12), diagnostic\n\nif __name__ == '__main__':\n nose.main()\n" ]
[ [ "numpy.asarray", "numpy.abs", "numpy.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
david-zwicker/sensing-normalized-results
[ "5b64ab34d400dcf457626e1ad244c2b4a889ac80", "5b64ab34d400dcf457626e1ad244c2b4a889ac80", "5b64ab34d400dcf457626e1ad244c2b4a889ac80", "5b64ab34d400dcf457626e1ad244c2b4a889ac80" ]
[ "src/binary_response/library_theory.py", "src/binary_response/tests.py", "src/adaptive_response/adaptive_threshold/at_numeric.py", "figures/calc_mutual_information_distributed.py" ]
[ "'''\nCreated on Sep 10, 2015\n\n@author: David Zwicker <[email protected]>\n'''\n\nfrom __future__ import division\n\nimport logging\n\nimport numpy as np\n\nfrom utils.math.distributions import lognorm_mean, DeterministicDistribution\n\n__all__ = ['LibraryLogNormal']\n\n\n\nclass LibraryLogNormal(object):\n \"\"\" represents a single receptor library with random entries drawn from a\n log-normal distribution \"\"\"\n\n\n def __init__(self, mixture, mean_sensitivity=1, correlation=0, **kwargs):\n \"\"\" initialize the receptor library by setting the number of receptors,\n the number of substrates it can respond to, and the typical sensitivity\n or magnitude S0 of the sensitivity matrix.\n The width of the distribution is either set by the parameter `width` or\n by setting the `standard_deviation`.\n \"\"\"\n self.mixture = mixture\n self.mean_sensitivity = mean_sensitivity\n self.correlation = correlation\n\n if 'standard_deviation' in kwargs:\n standard_deviation = kwargs.pop('standard_deviation')\n cv = standard_deviation / mean_sensitivity \n self.width = np.sqrt(np.log(cv**2 + 1))\n elif 'width' in kwargs:\n self.width = kwargs.pop('width')\n else:\n standard_deviation = 1\n cv = standard_deviation / mean_sensitivity \n self.width = np.sqrt(np.log(cv**2 + 1))\n\n # raise an error if keyword arguments have not been used\n if len(kwargs) > 0:\n raise ValueError('The following keyword arguments have not been '\n 'used: %s' % str(kwargs)) \n \n \n @property\n def standard_deviation(self):\n \"\"\" return the standard deviation of the distribution \"\"\"\n return self.mean_sensitivity * np.sqrt((np.exp(self.width**2) - 1))\n \n\n @property\n def sensitivity_distribution(self):\n \"\"\" returns the sensitivity distribution \"\"\"\n if self.correlation != 0:\n raise NotImplementedError('Cannot return the sensitivity '\n 'distribution with correlations, yet')\n \n if self.width == 0:\n return DeterministicDistribution(self.mean_sensitivity)\n else:\n return lognorm_mean(self.mean_sensitivity, self.width)\n\n\n def sensitivity_stats(self):\n \"\"\" returns statistics of the sensitivity distribution \"\"\"\n S0 = self.mean_sensitivity\n var = S0**2 * (np.exp(self.width**2) - 1)\n covar = S0**2 * (np.exp(self.correlation * self.width**2) - 1)\n return {'mean': S0, 'std': np.sqrt(var), 'var': var, 'cov': covar}\n\n\n def get_optimal_parameters(self, fixed_parameter='S0'):\n \"\"\" returns an estimate for the optimal parameters for the random\n interaction matrices.\n `fixed_parameter` determines which parameter is kept fixed during\n the optimization procedure\n \"\"\"\n if self.mixture.is_correlated_mixture:\n logging.warning('The optimization has not been tested for '\n 'correlated mixtures')\n\n ctot_stats = self.mixture.ctot_statistics()\n ctot_mean = ctot_stats['mean']\n ctot_var = ctot_stats['var']\n ctot_cv2 = ctot_var/ctot_mean**2\n \n if fixed_parameter == 'width':\n # keep the width parameter fixed and determine the others \n width_opt = self.width\n \n arg = 1 + ctot_cv2 * np.exp(width_opt**2)\n S0_opt = np.sqrt(arg) / ctot_mean\n std_opt = S0_opt * np.sqrt(np.exp(width_opt**2) - 1)\n \n elif fixed_parameter == 'S0':\n # keep the typical sensitivity fixed and determine the other params \n S0_opt = self.mean_sensitivity\n \n arg = (ctot_mean**2 * self.mean_sensitivity**2 - 1)/ctot_cv2\n if arg >= 1:\n width_opt = np.sqrt(np.log(arg))\n std_opt = self.mean_sensitivity * np.sqrt(arg - 1)\n else:\n logging.warning('Given mean sensitivity is too small to find a '\n 'suitable width parameter')\n width_opt = 0\n std_opt = 0\n \n else:\n raise ValueError('Parameter `%s` is unknown or cannot be held '\n 'fixed' % fixed_parameter) \n \n return {'mean_sensitivity': S0_opt, 'width': width_opt,\n 'standard_deviation': std_opt}\n \n \n def get_optimal_library(self, fixed_parameter='S0'):\n \"\"\" returns an estimate for the optimal parameters for the random\n interaction matrices.\n `fixed_parameter` determines which parameter is kept fixed during\n the optimization procedure\n \"\"\"\n library_opt = self.get_optimal_parameters(fixed_parameter)\n return {'distribution': 'log_normal', 'width': library_opt['width'],\n 'mean_sensitivity': library_opt['mean_sensitivity'],\n 'correlation': 0}\n \n \n ", "'''\nCreated on May 1, 2015\n\n@author: David Zwicker <[email protected]>\n'''\n\nfrom __future__ import division\n\nimport unittest\n\nimport numpy as np\n\nfrom .library_base import LibraryBase\nfrom utils.testing import TestBase\n\n \n \nclass TestLibraryBase(TestBase):\n \"\"\" unit tests for the continuous library \"\"\"\n\n _multiprocess_can_split_ = True #< let nose know that tests can run parallel\n \n\n def test_base_class(self):\n \"\"\" test the base class \"\"\"\n obj = LibraryBase.create_test_instance()\n \n # calculate mutual information\n for method in ('expansion', 'hybrid', 'polynom'):\n q_n = np.full(obj.Nr, 0.1) + 0.8*np.random.rand()\n q_nm = np.full((obj.Nr, obj.Nr), 0.1) + 0.1*np.random.rand()\n\n np.fill_diagonal(q_nm, 0)\n q_nm_mean = q_nm[~np.eye(obj.Nr, dtype=np.bool)].mean()\n q_nm_var = q_nm[~np.eye(obj.Nr, dtype=np.bool)].var()\n \n MI1 = obj._estimate_MI_from_q_values(q_n, q_nm, method=method)\n MI2 = obj._estimate_MI_from_q_stats(\n q_n.mean(), q_nm_mean, q_n.var(), q_nm_var,\n method=method\n )\n msg = 'Mutual informations do not agree for method=`%s`' % method\n self.assertAllClose(MI1, MI2, rtol=0.1, msg=msg)\n \n \n\nif __name__ == '__main__':\n unittest.main()\n", "'''\nCreated on Feb 22, 2016\n\n@author: David Zwicker <[email protected]>\n'''\n\nfrom __future__ import division, absolute_import\n\nimport numpy as np\n\nfrom binary_response.sparse_mixtures.lib_spr_numeric import LibrarySparseNumeric\nfrom .at_base import AdaptiveThresholdMixin\nfrom utils.math.stats import StatisticsAccumulator\n\n\n\nclass AdaptiveThresholdNumeric(AdaptiveThresholdMixin, LibrarySparseNumeric):\n \"\"\" represents a single receptor library that handles sparse mixtures that\n where receptors get active if their excitation is above a fraction of the\n total excitation \"\"\"\n \n \n def concentration_statistics_normalized_monte_carlo(self):\n \"\"\" determines the statistics of the normalized concentration\n numerically using a monte carlo method \"\"\"\n c_hat_stats = StatisticsAccumulator()\n for ci in self._sample_mixtures():\n ctot = ci.sum()\n if ctot > 0:\n ci /= ctot\n c_hat_stats.add(ci)\n \n return {'mean': c_hat_stats.mean, 'std': c_hat_stats.std,\n 'var': c_hat_stats.var}\n \n \n def _excitation_statistics_monte_carlo_base(self, ret_correlations=False):\n \"\"\" \n calculates the statistics of the excitation of the receptors.\n Returns the mean excitation, the variance, and the covariance matrix.\n This function just calculates the statistics of unnormalized\n excitations, which is implemented in the parent function\n We implemented this as a separate function so it can selectively be\n replaced with a version that is sped up by numba \n \"\"\"\n parent = super(AdaptiveThresholdNumeric, self)\n return parent.excitation_statistics_monte_carlo(ret_correlations)\n \n \n def excitation_statistics_monte_carlo(self, ret_correlations=False,\n normalized=False):\n \"\"\"\n calculates the statistics of the excitation of the receptors.\n Returns the mean excitation, the variance, and the covariance matrix.\n \n The algorithms used here have been taken from\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance\n \"\"\"\n if not normalized:\n return self._excitation_statistics_monte_carlo_base(\n ret_correlations)\n \n S_ni = self.sens_mat\n S_ni_mean = S_ni.mean()\n\n # initialize the statistics calculation\n stats = StatisticsAccumulator(ret_cov=ret_correlations)\n\n # sample mixtures and safe the requested data\n for c_i in self._sample_mixtures():\n e_n = np.dot(S_ni, c_i)\n e_n /= c_i.sum() * S_ni_mean #< normalize\n stats.add(e_n)\n\n # return the requested statistics\n if ret_correlations:\n try:\n enm_cov = stats.cov\n except RuntimeError:\n enm_cov = np.full((self.Nr, self.Nr), np.nan, np.double)\n en_var = np.diag(enm_cov)\n return {'mean': stats.mean, 'std': np.sqrt(en_var), 'var': en_var,\n 'cov': enm_cov}\n else: \n en_var = stats.var \n return {'mean': stats.mean, 'std': np.sqrt(en_var), 'var': en_var}\n \n\n def excitation_threshold_statistics(self):\n \"\"\" returns the statistics of the excitation threshold that receptors\n have to overcome to be part of the activation pattern.\n \"\"\"\n S_ni = self.sens_mat\n alpha = self.threshold_factor\n\n e_thresh_stats = StatisticsAccumulator()\n\n # iterate over samples and collect information about the threshold \n for c_i in self._sample_mixtures():\n e_n = np.dot(S_ni, c_i)\n e_thresh = alpha * e_n.mean()\n e_thresh_stats.add(e_thresh)\n\n return {'mean': e_thresh_stats.mean,\n 'var': e_thresh_stats.var,\n 'std': e_thresh_stats.std}\n \n \n def _sample_excitations(self, steps=None):\n \"\"\" sample excitation vectors \"\"\"\n S_ni = self.sens_mat\n\n # iterate over mixtures and yield corresponding excitations\n for c_i in self._sample_mixtures(steps):\n yield np.dot(S_ni, c_i)\n \n \n def _sample_activities(self, steps=None):\n \"\"\" sample activity vectors \"\"\"\n S_ni = self.sens_mat\n alpha = self.threshold_factor\n\n # iterate over mixtures and yield corresponding activities\n for c_i in self._sample_mixtures(steps):\n e_n = np.dot(S_ni, c_i)\n a_n = (e_n >= alpha * e_n.mean())\n yield a_n\n \n \n def receptor_activity_monte_carlo(self, ret_correlations=False):\n \"\"\" calculates the average activity of each receptor \"\"\"\n # prevent integer overflow in collecting activity patterns\n assert self.Nr <= self.parameters['max_num_receptors'] <= 63\n\n r_n = np.zeros(self.Nr)\n if ret_correlations:\n r_nm = np.zeros((self.Nr, self.Nr))\n \n for a_n in self._sample_activities():\n r_n[a_n] += 1\n if ret_correlations:\n r_nm[np.outer(a_n, a_n)] += 1\n \n r_n /= self._sample_steps\n if ret_correlations:\n r_nm /= self._sample_steps\n return r_n, r_nm\n else:\n return r_n \n\n\n def receptor_activity_estimate(self, ret_correlations=False,\n excitation_model='default', clip=False):\n \"\"\" estimates the average activity of each receptor \"\"\"\n raise NotImplementedError\n\n\n def receptor_crosstalk_estimate(self, ret_receptor_activity=False,\n excitation_model='default', clip=False):\n \"\"\" calculates the average activity of the receptor as a response to \n single ligands. \"\"\"\n raise NotImplementedError\n\n\n def receptor_activity_for_mixture(self, c_i):\n \"\"\" returns the receptors that are activated for the mixture `c_i` \"\"\"\n # calculate excitation\n e_n = np.dot(self.sens_mat, c_i)\n return (e_n >= self.threshold_factor * e_n.mean())\n\n \n def activation_pattern_for_mixture(self, c_i):\n \"\"\" returns the receptors that are activated for the mixture `c_i` \"\"\"\n # calculate excitation\n e_n = np.dot(self.sens_mat, c_i)\n a_n = (e_n >= self.threshold_factor * e_n.mean())\n # return the indices of the active receptors\n return np.flatnonzero(a_n)\n \n \n def mutual_information_monte_carlo(self, ret_prob_activity=False):\n \"\"\" calculate the mutual information using a monte carlo strategy. The\n number of steps is given by the model parameter 'monte_carlo_steps' \"\"\"\n # prevent integer overflow in collecting activity patterns\n assert self.Nr <= self.parameters['max_num_receptors'] <= 63\n\n base = 2 ** np.arange(0, self.Nr)\n\n # sample mixtures according to the probabilities of finding\n # substrates\n count_a = np.zeros(2**self.Nr)\n for a_n in self._sample_activities():\n # represent activity as a single integer\n a_id = np.dot(base, a_n)\n # increment counter for this output\n count_a[a_id] += 1\n \n # count_a contains the number of times output pattern a was observed.\n # We can thus construct P_a(a) from count_a. \n q_n = count_a / count_a.sum()\n \n # calculate the mutual information from the result pattern\n MI = -sum(q*np.log2(q) for q in q_n if q != 0)\n\n if ret_prob_activity:\n return MI, q_n\n else:\n return MI\n \n \n def mutual_information_estimate_fast(self):\n \"\"\" not implemented for adaptive thresholds \"\"\" \n raise NotImplementedError\n \n \n def set_threshold_from_activity_numeric(self, activity, method='auto',\n steps=50, verbose=False,\n estimate=None):\n \"\"\" determines the threshold that leads to a given `activity`.\n \n `method` determines the method that is used to determine the receptor\n activity\n `steps` sets the number of optimization steps that are used\n `verbose` determines whether intermediate output should be printed\n `estimate` gives an estimate for the threshold_factor. A good estimate\n generally speeds up the convergence of the algorithm.\n \"\"\"\n # lazy import of the Covariance Matrix Adaptation Evolution Strategy\n # package since it is only used in this method and the rest of the code\n # should be able to run without it\n import cma\n \n if not 0 < activity < 1:\n raise ValueError('Activity must be between 0 and 1')\n \n if estimate is None:\n estimate = 1\n \n def cost_function(alpha):\n \"\"\" objective function \"\"\"\n self.threshold_factor = alpha.mean()\n an = self.receptor_activity(method=method).mean()\n return (an - activity)**2\n \n options = {'maxfevals': steps,\n 'bounds': [0, np.inf],\n 'verb_disp': 1 * int(verbose),\n 'verb_log': 0}\n \n # determine the correct threshold by optimization\n # we here use a two dimensional search, because this particular\n # implementation of cma is not implemented for scalar optimization.\n res = cma.fmin(cost_function, [estimate]*2, 0.1, options=options)\n \n self.threshold_factor = res[0].mean()\n return self.threshold_factor \n ", "#!/usr/bin/env python2\n\nfrom __future__ import division\n\nimport sys, os\nsys.path.append(os.path.join(os.getcwd(), '../src'))\n\nimport time\nimport pickle\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom scipy import optimize\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport pandas as pd\n\nfrom binary_response import *\n\nfrom utils.math.stats import StatisticsAccumulator\nfrom adaptive_response.adaptive_threshold import (AdaptiveThresholdTheory,\n AdaptiveThresholdTheoryReceptorFactors)\n\n\nNr, alpha = 32, 1.5\nNs, s = 256, 32\ns = 0.1 * Ns\n#r_list = [2, 4, 8]\nan_list = [0.5, 0.2, 0.1]\nwidth = 1\n\n# Nr, alpha = 8, 1.3\n# Ns, s = 128, 32\n# r = [2, 4]\n#widths = [1, 2]\n\n\ndata = OrderedDict()\n\ndef get_alpha_from_an(alpha, theory, an):\n \"\"\" helper function \"\"\"\n theory.threshold_factor = alpha\n return an - np.mean(theory.receptor_activity())\n\n\nfor an in an_list:\n print('an=%g' % an)\n theory = AdaptiveThresholdTheoryReceptorFactors(\n Ns, Nr,\n mean_sensitivity=1, width=width,\n parameters={'c_distribution': 'log-normal'})\n theory.threshold_factor = alpha\n theory.choose_commonness('const', mean_mixture_size=s)\n theory.c_means = 1\n theory.c_vars = 1\n\n variances = np.linspace(0, 1, 16)\n MI_mean = []\n MI_std = []\n an_list = []\n for variance in variances:\n\n MI_stats = StatisticsAccumulator()\n for _ in xrange(1000):\n theory.choose_receptor_factors('log_normal', variance=variance)\n\n # determine the threshold factor\n try:\n alpha = optimize.brentq(get_alpha_from_an, 0.1, 5, args=(theory, an))\n except ValueError:\n alpha = optimize.newton(get_alpha_from_an, 2, args=(theory, an))\n theory.threshold_factor = alpha\n\n MI_stats.add(theory.mutual_information(warn=False))\n\n MI_mean.append(MI_stats.mean)\n MI_std.append(MI_stats.std)\n\n ans = theory.receptor_activity()\n an_list.append(ans)\n\n data[an] = {'MI_mean': np.array(MI_mean),\n 'MI_std': np.array(MI_std)}\n\n\nres = {'variances': variances,\n 'data': data}\n\nwith open('data/mutual_information_distributed.pkl', 'wb') as fp:\n pickle.dump(res, fp)\n" ]
[ [ "numpy.log", "numpy.exp", "numpy.sqrt" ], [ "numpy.random.rand", "numpy.eye", "numpy.fill_diagonal", "numpy.full" ], [ "numpy.diag", "numpy.dot", "numpy.log2", "numpy.sqrt", "numpy.arange", "numpy.flatnonzero", "numpy.full", "numpy.outer", "numpy.zeros" ], [ "scipy.optimize.newton", "numpy.array", "numpy.linspace", "scipy.optimize.brentq" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
zozo123/tcrdist3
[ "49c6554f16ad7f20f50d7303a8ac75268f5f601f" ]
[ "tcrdist/centers.py" ]
[ "\"\"\"\ncenters \n\nModule contains functions for evaluating TCRs as center(oids) of meta-clonotypes.\n\nfind_center \n\"\"\"\nimport warnings \nimport numpy as np\nfrom tcrdist.ecdf import distance_ecdf\n\ndef calc_radii(tr, tr_bkgd, chain = 'beta', ctrl_bkgd = 10**-5, use_sparse = True, max_radius=50, chunk_size=100, **kwargs):\n\t\"\"\"\n\tSimply find maximum radii based on an antigen enriched repertoires <tr> \n\tand a background antigen-naive background. \n\n\tIMPORTANT: This function will work with a precomputed TCRrep.rw_chain matrix if it \n\tis supplied and matches row dimensions of tr_bkgd. This way the user does \n\tnot have to recompute it each time they with to change ctrl_bkgd. It \n\talso allows them to customize the distance prior to this stage, although \n\t**kwargs can be passed to the compute_sparse_rect_distances or \n\tcompute_rect_distances function\n\n\tParameters\n\t----------\n\ttr : tcrdist.repertoire.TCRrep\n\t\tAn antigen enriched repertoires TCRrep object\n\ttr_bkgd : tcrdist.repertoire.TCRrep\n\t\tA background antigen-naive background TCRrep object\n\tchain : str\n\t\te.g, 'beta', ctrl_bkgd = 10**-5, use_sparse = True, max_radius=50, chunk_size=50\n\tctrl_bkgd : float\n\t\te.g., 10**-5, \n\tuse_sparse : bool \n\t\tIf True, uses a sparse implementation, \n\tmax_radius : int \n\t\tValues beyond this max_radius are set to 0 in sparcification\n\tchunk_size : int\n\t\tHow many rows to process at a time, based on memory available (100 is a good default, but for more see notes)\n\t**kwargs will be passed to either compute_sparse_rect_distances or compute_rect_distances\n\tReturns\n\t-------\n\tmax_radii : list\n\t\tList of length equal to the numer of rows in <tr> clond_df. \n\t\tThese are radius at which the number of expected background \n\t\tsequences are 'controlled' at a rate of <ctlr_bkgd> \n\n\tNotes\n\t-----\n\tTODO: Discuss chunk size and memory\n\t\"\"\"\n\tassert chain in ['alpha','beta','gamma', 'delta']\n\tif 'weights' not in tr_bkgd.clone_df.columns:\n\t\twarnings.warn(\"No weights provided in background repertoire, setting to 1\")\n\t\ttr_bkgd.clone_df['weights'] = 1\n\n\t# USER MAY HAVE ALRRADY COMPUTED TCRrep.rw_\n\tif getattr(tr, f\"rw_{chain}\", None) is not None:\n\t\tif getattr(tr, f\"rw_{chain}\").shape[1] == tr_bkgd.clone_df.shape[0]:\n\t\t\tprint(f\"IT APPEARS THAT (TCRrep.rw_{chain}) HAS ALREDY BEEN COMPUTED AND MATCHES BACKGROUND TCRrep SIZE\")\n\t\t\tprint(f\"USING EXISTING (TCRrep.rw_{chain}). SET TCRrep.rw_{chain} = None IF YOU WANT TO RECOMPUTE IT.\")\n\telse:\n\t\tif use_sparse:\n\t\t\tprint(f\"COMPUTING SPARSE RECT MATRIX TO FIND RADIUS: (TCRrep.rw_{chain})\")\n\t\t\tprint(f\"USING {tr.cpus} CPUS\")\n\t\t\ttr.compute_sparse_rect_distances(df = tr.clone_df, \n\t\t\t\t\t\t\t\t\t\t\t df2 = tr_bkgd.clone_df,\n\t\t\t\t\t\t\t\t\t\t\t radius=max_radius,\n\t\t\t\t\t\t\t\t\t\t\t chunk_size=chunk_size,\n\t\t\t\t\t\t\t\t\t\t\t **kwargs)\n\t\telse:\n\t\t\tprint(f\"COMPUTING FULL RECT MATRIX TO FIND RADIUS, (TCRrep.rw_{chain})\")\n\t\t\ttr.compute_rect_distances(df = tr.clone_df, \n\t\t\t\t\t\t\t\t\t df2 = tr_bkgd.clone_df, \n\t\t\t\t\t\t\t\t\t store = False,\n\t\t\t\t\t\t\t\t\t **kwargs)\n\tprint(f\"COMPUTING ECDFS PER TCR, TO FIND APPROPRIATE MAX RADII AT {ctrl_bkgd}\")\n\tthresholds, ecdfs = distance_ecdf(pwrect = getattr(tr, f\"rw_{chain}\"), \n\t\t\t\t\t\t thresholds = np.array(range(0,max_radius, 2)), \n\t\t\t\t\t\t weights= tr_bkgd.clone_df.weights, \n\t\t\t\t\t\t pseudo_count=0, \n\t\t\t\t\t\t skip_diag = False, \n\t\t\t\t\t\t absolute_weight = True)\n\t# Based on acceptable ctrl_bkgd, we find max acceptable radi from each TCR\n\t#import pdb; import pdb; pdb.set_trace()\n\tall_radii = [pd.Series(x, index = thresholds) for x in ecdfs]\n\tmax_radii = [s[s<=ctrl_bkgd].last_valid_index() for s in all_radii]\n\t\t# WARNING: There is a potential BUG in the output of the above line. \n\t\t# That is, iF a radius is None (the next line will fail, thus set Nones to 0.\n\tmax_radii = [i if (i is not None) else 0 for i in max_radii]\n\tprint(f\"RETURNING LIST OF MAX RADII\")\n\t\n\treturn max_radii, thresholds, ecdfs \n\n\n\n\n\n\n\ndef find_centers_beta(\t\n\tbackground_filename,\n\ttarget_filename,\n\tncpus,\n\tmin_nsubject,\n\tctrl_bkgd = 10**-5, \n\tprefilter = False):\n\timport os\n\timport pandas as pd\n\timport numpy as np \n\tfrom tcrdist.repertoire import TCRrep\n\tfrom tcrdist.neighbors import compute_ecdf, bkgd_cntl_nn2\n\tfrom tcrdist.automate import auto_pgen\n\tfrom tcrdist.rep_diff import neighborhood_diff\n\tfrom tcrdist.summarize import _summ, _dist_summ, _select, filter_gt, filter_is, test_for_subsets, test_for_almost_subsets\n\timport scipy.sparse\n\t\n\tdf_background = pd.read_csv(background_filename)\n\tprint(df_background)\n\ttr_background = TCRrep( cell_df = df_background.copy(), \n\t\t\t\t\t\t\t\t\torganism = \"human\", \n\t\t\t\t\t\t\t\t\tchains= ['beta'], \n\t\t\t\t\t\t\t\t\tcompute_distances = False)\n\n\tdf_mira = pd.read_csv(target_filename)\n\tdf_mira = df_mira[['subject','cell_type','v_b_gene', 'j_b_gene', 'cdr3_b_aa']]\n\tprint(df_mira)\n\ttr = TCRrep(\tcell_df = df_mira.copy(), \n\t\t\t\t\t\t\t\t\torganism = 'human', \n\t\t\t\t\t\t\t\t\tchains = ['beta'], \n\t\t\t\t\t\t\t\t\tdb_file = 'alphabeta_gammadelta_db.tsv',\n\t\t\t\t\t\t\t\t\tstore_all_cdr = False,\n\t\t\t\t\t\t\t\t\tcompute_distances = True)\n\t\n\tif prefilter:\n\t\t# We can greatly cut down on the number of searches if we drop centroids without minimum publicicity\n\t\tnn_df = neighborhood_diff(clone_df= tr.clone_df,\n\t\t\t\t\t\t\t\t pwmat = tr.pw_beta,\n\t\t\t\t\t\t\t\t count_col = 'count',\n\t\t\t\t\t\t\t\t x_cols = ['cell_type'],\n\t\t\t\t\t\t\t\t knn_radius = 37)\n\t\tdef tabulate_publicity(neighbor_df, clone_df, col_nn ='neighbors'):\n\t\t\t# Tabulate the number of unique subjects at each node\n\t\t\tneighbor_df['nsubject'] = neighbor_df[col_nn].apply( lambda x: len(set(_select(clone_df, iloc_rows =x, col = 'subject'))))\n\t\t\treturn neighbor_df\n\t\tprint(f\"TABULATING PUBLIC CLUSTERS\")\n\t\tnn_df = tabulate_publicity(nn_df, tr.clone_df)\n\t\tnn_df = filter_gt(nn_df, 'nsubject' , min_nsubject)\n\t\n\t\tif nn_df.shape[0] == 0:\n\t\t\tcenters_df = pd.DataFrame({}, columns = ['cdr3_b_aa','v_b_gene','j_b_gene','pgen','max_radi','target_hits','bkgd_hits','bkgd_hits_weighted','bkgd_total','ctrl','ctrl_weighted','target_misses','TR','TR2','BR_weighted','RR_weighted','OR_weighted','chi2dist','target_neighbors','target_seqs','background_neighbors','background_seqs','background_v','background_j','regex','target_re_hits','bkgd_re_hits','bkgd_re_weighted_hits','TR_re','BR_re_weighted','RR_re_weighted','OR_re_weighted','chi2re','chi2joint','nsubject'])\n\t\t\ttr.pw_beta[tr.pw_beta == 0] = 1 # set true zeros to 1\n\t\t\ttr.pw_beta[tr.pw_beta > 50] = 0 # ignores everything less than 100\n\t\t\tpw_beta_sparse = scipy.sparse.csr_matrix(tr.pw_beta)\t\n\t\t\treturn centers_df, pw_beta_sparse\n\t\n\t\ttr.clone_df = tr.clone_df.loc[nn_df.index, :].reset_index(drop = True)\n\t\tdel nn_df\n\t\t# Compute pairwise again with filtered set\n\t\ttr.compute_distances()\n\t\t# compute pgens automatically, currently parmap will max out cpus on this step \n\t\t\n\t\t\n\n\tprint(\"COMPUTING PROBABILITY OF GENERATION\")\n\tauto_pgen(tr)\n\tprint(f\"COMPUTING RECT DIST {tr.clone_df.shape[0]}x{tr_background.clone_df.shape[0]}\")\n\ttr.compute_rect_distances(df = tr.clone_df, \n\t\t\t\t\t\t\t df2 = tr_background.clone_df, \n\t\t\t\t\t\t\t store = False)\n\n\tassert tr.rw_beta.shape[0] == tr.clone_df.shape[0]\n\n\tcenters_df = bkgd_cntl_nn2(\ttr = tr, \n\t\t\t\t\t\t\t\ttr_background = tr_background,\n\t\t\t\t\t\t\t\tctrl_bkgd = ctrl_bkgd, #ctrl_bkgd = 2*10**-5\n\t\t\t\t\t\t\t\tweights =tr_background.clone_df.weights,\n\t\t\t\t\t\t\t\tcol = 'cdr3_b_aa',\n\t\t\t\t\t\t\t\tncpus = ncpus,\n\t\t\t\t\t\t\t\tthresholds = [x for x in range(0,38,2)], # Settign 38 as the max radius\n\t\t\t\t\t\t\t\tgenerate_regex = True, \n\t\t\t\t\t\t\t\ttest_regex = True)\n\n\tdef tabulate_publicity(neighbor_df, clone_df, col_nn ='neighbors'):\n\t\t# Tabulate the number of unique subjects at each node\n\t\tneighbor_df['nsubject'] = neighbor_df[col_nn].apply( lambda x: len(set(_select(clone_df, iloc_rows =x, col = 'subject'))))\n\t\treturn neighbor_df\n\n\tcenters_df = tabulate_publicity(neighbor_df = centers_df, clone_df = tr.clone_df, col_nn ='target_neighbors')\n\t\t\n\ttr.rw_beta[tr.rw_beta == 0] = 1 # set true zeros to 1\n\ttr.rw_beta[tr.rw_beta > 50] = 0 # ignores everything less than 100\n\trw_beta_sparse = scipy.sparse.csr_matrix(tr.rw_beta)\n\t#scipy.sparse.save_npzz(output_matrix_filename, rw_beta_sparse)\n\treturn centers_df, rw_beta_sparse\n\n\ndef rank_centers(centers_filename = None, centers_df = None, rank_column = 'chi2joint', min_nsubject = 2, min_nr = 1):\n\t\"\"\"\n\tThis function takes the output of tcrdist.neighbors.bkgd_cntl_nn2(), \n\ta set of scored metaclonotypes (centers - TCRs + radius) \n\tand ranks them by chi2 statistics, \n\tprioritizing those that include lots of target sequences \n\twhile minimizing inclusion of background sequences. \n\t\n\tParameters\n\t----------\n\tcenters_filename : str or None\n\t\tUser can only provide centers_df or centers_filename but not both\n\t\tThe filepath to a file containing metaclonotype centers information, generally produced with \n\t\ttcrdist.neighbors.bkgd_cntl_nn2()\n\tcenters_df : DataFrame or None\n\t\tUser can only provide centers_df or centers_filename but not both.\n\t\tThe Pandas DataFraem containing metaclonotype centers information, generally produced with \n\t\ttcrdist.neighbors.bkgd_cntl_nn2()\n\trank_column : str\n\t\tDefault : 'chi2joint' (or 'chi2joint' (radius+motif averaged) or chi2re'(using motif only), 'chi2dist' (using radius only) \n\tmin_nsubject : int\n\t\tDefault 2, (minimum publicity of the meta-clonotype). \n\t\tThat is, the minimum number of unique subjects contributing TCRs \n\t\tamong a group of biochemically TCRs to form a meta-clonotype. \n\tmin_nr : int\n\t\tDefault 1, (minimum non-redundancy). Once the metaclonotypes are ranked, \n\t\tthe function requires that lower ranked meta-clonotypes to have a minimum number\n\t\t<min_nr> of new sequences not already spanned by a higher ranked meta-clonotype. \n\t\n\tReturns\n\t-------\n\tdf : DataFrame\n\t\n\t\"\"\"\n\timport pandas as pd\n\timport ast\n\tfrom tcrdist.summarize import filter_gt, filter_is, test_for_subsets, test_for_almost_subsets\n\t\n\tif centers_filename is not None and centers_df is not None:\n\t\traise ValueError(\"rank centers can use <centers_filename> or <centers_df> but not both\")\n\tif centers_df is None:\n\t\tdf = pd.read_csv(centers_filename)\n\telse: \n\t\tdf = centers_df.copy()\n\n\t# VERY IMPORTANT NOTE, pandas reads lists as strings '[1,2]'; so we use ast.literal_eval to convert back t a list \n\tif not isinstance(df['target_neighbors'][0], list):\n\t\tdf['target_neighbors'] = df['target_neighbors'].apply(lambda s: list(ast.literal_eval(s)))\n\tdf = df.sort_values(rank_column, ascending = False)\n\tdf['novel'] = test_for_almost_subsets(df['target_neighbors'], min_nr)\n\tdf = filter_gt(df, 'nsubject', min_nsubject).copy()\n\tdf = filter_is(df, 'novel', 1).copy()\n\treturn df\n\n\nimport re\nimport os\nimport pandas as pd\nimport numpy as np\nfrom tcrdist.repertoire import TCRrep\nfrom tcrdist.adpt_funcs import _valid_cdr3\nimport scipy.sparse\nimport ast\n\ndef check_tsv_csv(filename, check_column_names = ['cdr3_b_aa', 'v_b_gene', 'j_b_gene']):\n\t\"\"\"\n\tTo avoid problems with .tsv or .csv, check for appropriate seperator based on expected columns\n\t\"\"\"\n\t#shape_csv = pd.read_csv(filename, sep = \",\").shape\n\tcolumns_csv = pd.read_csv(filename, sep = \",\").columns\n\t#shape_tsv = pd.read_csv(filename, sep = \"\\t\").shape\n\tcolumns_tsv = pd.read_csv(filename, sep = \"\\t\").columns\n\t\n\tif set(check_column_names) - set(columns_csv) == set():\n\t\tsep = \",\"\n\telif set(check_column_names) - set(columns_tsv) == set():\n\t\tsep = \"\\t\"\n\telse: \n\t\tsep = False\n\t\traise IOError(\"File provided does not appear to be either a .csv or .tsv or lacks required columns {check_column_names}\")\n\treturn sep\n\n\ndef centers_v_bulk(search_filename, bulk_filename, sep_search_filename = \"\\t\"):\n\t\"\"\"\n\t\"\"\"\n\t# Get appropriate seperator (in this case we expect \"\\t\", trust than verify)\n\tsep_bulk_filename = check_tsv_csv(filename = bulk_filename, check_column_names = ['cdr3_b_aa', 'v_b_gene', 'j_b_gene', 'templates', 'productive_frequency','count'])\n\tbulk_df = pd.read_csv(bulk_filename, sep = sep_bulk_filename)\n\n\t# Adaptive uses the terms templates, which are synonymous with counts in our nomenclature\n\tbulk_df['count'] = bulk_df['templates'].copy()\n\t# Ensure that all bulk sequences have valid cdr3s\n\tv = bulk_df.cdr3_b_aa.apply(lambda x: _valid_cdr3(x))\n\t# Ensure length CDR3 > 5\n\tls = bulk_df.cdr3_b_aa.apply(lambda x: len(x) > 5)\n\tbulk_df = bulk_df[(v) & (ls)]\n\n\t# Select only important columns\n\tbulk_df = bulk_df[['cdr3_b_aa', 'v_b_gene', 'j_b_gene', 'templates', 'productive_frequency','count']]\n\t# Assign a cid for tracking purposes\n\tbulk_df['cid'] = [f\"cid{i}\" for i in bulk_df.index]\n\t# Load clean bulk data to assign CDR1,2,3\n\ttr_bulk = TCRrep(\tcell_df = bulk_df, \n\t\t\t\t\t\torganism = 'human', \n\t\t\t\t\t\tchains = ['beta'], \n\t\t\t\t\t\tcompute_distances= False)\n\n\t# Here we expect comma or tab, but we check file for correct seperator (TAB, TWO MANY COMMAS CAUSES A FAILURE)\n\t#sep_search_filename = check_tsv_csv(\n\t#\tfilename = search_filename, \n\t#\tcheck_column_names = ['cdr3_b_aa', 'v_b_gene', 'j_b_gene', 'max_radi' ,'pgen','regex'])\n\t\n\t\t# Read search file\n\tsearch_df = pd.read_csv(search_filename, sep = sep_search_filename )\n\t# Assign a count column of 1, for purposes of loading data into TCRrep\n\tsearch_df['count'] = 1\n\n\t# If source and index columns are missing we provide them, select only the relevant columns\n\ttry:\n\t\tsearch_df = search_df[['cdr3_b_aa', 'v_b_gene', 'j_b_gene', 'count', 'max_radi' ,'pgen','index','source','regex','target_hits', 'target_seqs']].copy()\n\texcept KeyError:\n\t\tsearch_df['source'] = search_filename\n\t\tsearch_df['index'] = search_df.index.to_list()\n\t\tsearch_df = search_df[['cdr3_b_aa', 'v_b_gene', 'j_b_gene', 'count', 'max_radi' ,'pgen','index','source','regex','target_hits', 'target_seqs']].copy()\n\n\t# Load the search file into TCRrep instance, getting CDR1, 2, 2.5 by vgene name infrerence\n\ttr_search = TCRrep( cell_df = search_df.copy(),\n\t\t\t\t\t\torganism = 'human', \n\t\t\t\t\t\tchains = ['beta'], \n\t\t\t\t\t\tcompute_distances= False)\n\n\t# Compute Rectangular distance (search on rows, bulk on columns)\n\ttr_search.compute_rect_distances(df = tr_search.clone_df, \n\t\t\t\t\t\t\t\t\t df2 = tr_bulk.clone_df, \n\t\t\t\t\t\t\t\t\t store = False)\n\n\t# Each search sequence has a max radius \n\tmax_radi = tr_search.clone_df.max_radi.values\n\t# Convert to a 2D array to permit broadcasting (n,1) with (n,m) rw_matrix\n\tmax_radi = max_radi.reshape(len(max_radi), 1)\n\n\t# Assert that [n,1] aligns with [n,m] \n\tassert max_radi.shape[0] == tr_search.rw_beta.shape[0]\n\t# Get index of True, where \n\tij = tr_search.rw_beta < max_radi\n\t# Get the column index of each bulk sequences within the row specific radius\n\ticol = [np.where(x) for x in ij]\n\tbulk_hits = np.sum(ij, axis = 1)\n\tassert np.all(bulk_hits == [len(x[0]) for x in icol])\n\t# Retrieve the distance of sequences with the max radius for each row\n\tidist =[tr_search.rw_beta[i,j] for i,j in enumerate(icol)]\n\n\t# Retrieve sequences from the bulk clone_df\n\tiseqs = [tr_bulk.clone_df['cdr3_b_aa'].iloc[x].to_list() for x in icol]\n\tivgenes = [tr_bulk.clone_df['v_b_gene'].iloc[x].to_list() for x in icol]\n\tijgenes = [tr_bulk.clone_df['j_b_gene'].iloc[x].to_list() for x in icol]\n\t\n\t# Retrieve abundances from the bulk clone df\n\titemplates = [tr_bulk.clone_df['templates'].iloc[x].to_list() for x in icol]\n\ticounts = [tr_bulk.clone_df['count'].iloc[x].to_list() for x in icol]\n\tifreqs = [tr_bulk.clone_df['productive_frequency'].iloc[x].to_list() for x in icol]\n\n\tassert [np.sum(x) for x in itemplates] == [np.sum(x) for x in icounts]\n\tisumtemplates = [np.sum(x) for x in itemplates]\n\tisumcounts = [np.sum(x) for x in icounts]\n\tisumfreqs = [np.sum(x) for x in ifreqs ]\n\n\tdf_summ = pd.DataFrame({ 'bulk_sum_freq' : isumfreqs,\n\t\t\t\t\t\t\t 'bulk_sum_templates': isumtemplates,\n\t\t\t\t\t\t\t 'bulk_sum_counts' : isumcounts,\n\t\t\t\t\t\t\t 'bulk_seqs' \t : iseqs,\n\t\t\t\t\t\t\t 'bulk_v_genes' \t : ivgenes,\n\t\t\t\t\t\t\t 'bulk_j_genes' \t : ijgenes,\n\t\t\t\t\t\t\t 'bulk_distances' \t : idist,\n\t\t\t\t\t\t\t 'bulk_templates' \t : itemplates,\n\t\t\t\t\t\t\t 'bulk_counts' \t : icounts, \n\t\t\t\t\t\t\t 'bulk_freqs' \t : ifreqs})\n\n\tassert tr_search.clone_df.shape[0] == search_df.shape[0]\n\n\t#search_df = pd.read_csv(search_filename, sep = sep)\n\t###### <<>>>>>\n\tresult_df = pd.concat([tr_search.clone_df, df_summ], axis = 1)\n\tresult_df['sourcefile'] = bulk_filename\n\tresult_df['searchfile'] = search_filename\n\t\n\tregex_on_bulk = [[re.search(pattern = r['regex'], string = s) for s in r['bulk_seqs']] for _,r in result_df.iterrows()]\n\tbulk_regex_match = [[True if (x is not None) else False for x in sublist] for sublist in regex_on_bulk]\n\tresult_df['bulk_regex_match'] = bulk_regex_match\n\tresult_df['bulk_sum_freqs_regex_adj'] = [pd.Series(r['bulk_freqs'], dtype = \"float64\")[pd.Series(r['bulk_regex_match'], dtype = 'bool')].sum() for i,r in result_df.iterrows()]\n\tresult_df['bulk_sum_templates_regex_adj'] = [pd.Series(r['bulk_templates'], dtype = \"int32\")[pd.Series(r['bulk_regex_match'], dtype = 'bool')].sum() for i,r in result_df.iterrows()]\n\tresult_df['bulk_sum_counts_regex_adj'] = [pd.Series(r['bulk_counts'], dtype = \"int32\")[pd.Series(r['bulk_regex_match'], dtype = 'bool')].sum() for i,r in result_df.iterrows()]\n\t\n\t# Tabulating Tcrdist == 0\n\tij = tr_search.rw_beta == 0\n\ticol = [np.where(x) for x in ij]\n\titemplates = [tr_bulk.clone_df['templates'].iloc[x].to_list() for x in icol]\n\ticounts = [tr_bulk.clone_df['count'].iloc[x].to_list() for x in icol]\n\tifreqs = [tr_bulk.clone_df['productive_frequency'].iloc[x].to_list() for x in icol]\n\tassert [np.sum(x) for x in itemplates] == [np.sum(x) for x in icounts]\n\tisumcounts0 = [np.sum(x) for x in icounts]\n\tisumtemplates0 = [np.sum(x) for x in itemplates]\n\tisumfreqs0 = [np.sum(x) for x in ifreqs ]\n\tresult_df['bulk_sum_freqs_tcrdist0'] = isumfreqs0\n\tresult_df['bulk_sum_templates_tcrdist0'] = isumtemplates0\n\tresult_df['bulk_sum_counts_tcrdist0'] = isumcounts0\n\n\n\t# Tabulate Perfect Match \n\tresult_df['cdr3_exact_match'] = [[s == r['cdr3_b_aa'] for s in r['bulk_seqs']] for _,r in result_df.iterrows()]\n\tresult_df['v_gene_exact_match'] = [[s == r['v_b_gene'] for s in r['bulk_v_genes']] for _,r in result_df.iterrows()]\n\tdef safely_compare_boolan_lists(a,b):\n\t\tif len(a) < 1:\n\t\t\tr = list()\n\t\telse:\n\t\t\tr = np.array(a) & np.array(b)\n\t\t\tr = list(r)\n\t\treturn r\n\tresult_df['vcdr3_exact_match'] = [safely_compare_boolan_lists(r['cdr3_exact_match'], r['v_gene_exact_match']) for _,r in result_df.iterrows()]\n\t# Summarize Exact Matches\n\tresult_df['bulk_sum_freqs_vcdr3match'] = [np.sum(pd.Series(r['bulk_freqs'],dtype = 'float64')[r['vcdr3_exact_match']]) for _,r in result_df.iterrows()]\n\tresult_df['bulk_sum_templates_vcdr3match'] = [np.sum(pd.Series(r['bulk_templates'],dtype = 'int32')[r['vcdr3_exact_match']]) for _,r in result_df.iterrows()]\n\tresult_df['bulk_sum_counts_vcdr3match'] = [np.sum(pd.Series(r['bulk_counts'],dtype = 'int32')[r['vcdr3_exact_match']]) for _,r in result_df.iterrows()]\n\n\t# Tabulate Perfect Matches to any of the Target Seqs\n\tresult_df['target_seqs'] = [ast.literal_eval(x) for x in result_df['target_seqs']]\n\tresult_df['bulk_seq_within_targetseqs'] = [[s in r['target_seqs'] for s in r['bulk_seqs']] for _,r in result_df.iterrows()]\n\tresult_df['bulk_sum_freqs_within_targetset'] = [np.sum(pd.Series(r['bulk_freqs'],dtype = 'float64')[r['bulk_seq_within_targetseqs']]) for _,r in result_df.iterrows()]\n\tresult_df['bulk_sum_templates_within_targetset'] = [np.sum(pd.Series(r['bulk_templates'],dtype = 'int32')[r['bulk_seq_within_targetseqs']]) for _,r in result_df.iterrows()]\n\tresult_df['bulk_sum_counts_within_targetset'] = [np.sum(pd.Series(r['bulk_counts'],dtype = 'int32')[r['bulk_seq_within_targetseqs']]) for _,r in result_df.iterrows()]\n\n\n\tdesired_output_column_order = [ 'cdr3_b_aa',\n\t\t\t\t\t\t\t\t\t 'v_b_gene',\n\t\t\t\t\t\t\t\t\t 'j_b_gene',\n\t\t\t\t\t\t\t\t\t 'max_radi',\n\t\t\t\t\t\t\t\t\t 'pgen',\n\t\t\t\t\t\t\t\t\t 'index',\n\t\t\t\t\t\t\t\t\t 'source',\n\t\t\t\t\t\t\t\t\t 'target_seqs',\n\t\t\t\t\t\t\t\t\t 'regex',\n\t\t\t\t\t\t\t\t\t 'cdr1_b_aa',\n\t\t\t\t\t\t\t\t\t 'cdr2_b_aa',\n\t\t\t\t\t\t\t\t\t 'pmhc_b_aa',\n\t\t\t\t\t\t\t\t\t 'count',\n\t\t\t\t\t\t\t\t\t 'clone_id',\n\t\t\t\t\t\t\t\t\t 'sourcefile',\n\t\t\t\t\t\t\t\t\t 'searchfile',\n\t\t\t\t\t\t\t\t\t \n\t\t\t\t\t\t\t\t\t 'bulk_distances',\n\t\t\t\t\t\t\t\t\t 'bulk_templates',\n\t\t\t\t\t\t\t\t\t 'bulk_counts',\n\t\t\t\t\t\t\t\t\t 'bulk_freqs',\n\t\t\t\t\t\t\t\t\t 'bulk_seqs',\n\t\t\t\t\t\t\t\t\t 'bulk_v_genes',\n\t\t\t\t\t\t\t\t\t 'bulk_j_genes',\n\t\t\t\t\t\t\t\t\t 'bulk_regex_match',\n\n\t\t\t\t\t\t\t\t\t 'bulk_sum_freq',\n\t\t\t\t\t\t\t\t\t 'bulk_sum_templates',\n\t\t\t\t\t\t\t\t\t 'bulk_sum_counts',\n\t\t\t\t\t\t\t\t\t \n\t\t\t\t\t\t\t\t\t 'bulk_sum_freqs_regex_adj',\n\t\t\t\t\t\t\t\t\t 'bulk_sum_templates_regex_adj',\n\t\t\t\t\t\t\t\t\t 'bulk_sum_counts_regex_adj',\n\t\t\t\t\t\t\t\t\t \n\t\t\t\t\t\t\t\t\t 'bulk_sum_freqs_tcrdist0',\n\t\t\t\t\t\t\t\t\t 'bulk_sum_templates_tcrdist0',\n\t\t\t\t\t\t\t\t\t 'bulk_sum_counts_tcrdist0',\n\t\t\t\t\t\t\t\t\t #'cdr3_exact_match',\n\t\t\t\t\t\t\t\t\t #'v_gene_exact_match',\n\t\t\t\t\t\t\t\t\t #'vcdr3_exact_match',\n\t\t\t\t\t\t\t\t\t 'bulk_sum_freqs_vcdr3match',\n\t\t\t\t\t\t\t\t\t 'bulk_sum_templates_vcdr3match',\n\t\t\t\t\t\t\t\t\t 'bulk_sum_counts_vcdr3match',\n\n\t\t\t\t\t\t\t\t\t 'bulk_sum_freqs_within_targetset',\n\t\t\t\t\t\t\t\t\t 'bulk_sum_templates_within_targetset',\n\t\t\t\t\t\t\t\t\t 'bulk_sum_counts_within_targetset']\n\n\tresult_df = result_df[desired_output_column_order] \n\n\t# COMPRESS SPARSE MATRIX FOR LATER REFRENCE\n\ttr_search.rw_beta[tr_search.rw_beta == 0] = 1 # set true zeros to 1\n\ttr_search.rw_beta[tr_search.rw_beta > 50] = 0 # ignores everything less than 100\n\trw_beta_sparse = scipy.sparse.csr_matrix(tr_search.rw_beta)\n\ttr_search.rw_beta = rw_beta_sparse\n\n\treturn result_df, rw_beta_sparse, tr_search\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.Series", "pandas.DataFrame", "numpy.array", "numpy.where", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
RobertClay/Paper1
[ "d08bbed37add5a128db20c24e2eea0727508dbd7", "d08bbed37add5a128db20c24e2eea0727508dbd7" ]
[ "minos/data_generation/US_full_data_parser.py", "minos/data_generation/US_format_raw.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 17 14:12:06 2021\n\n@author: robertclay\n\nThis file is for parsing the understanding societies data over all waves into \na persistent data frame containing immutable person attributes for all\nagents over all times and variable frames containing values that do change\nover each wave.\n\nThis file is necessary due to the formatting of BHPS. If a person enters\na given wave most of their attributes are not carries on to the next wave. \nFor example their ethnicity is registered when they enter but then recorded as \nN/A (-8) for remaining waves. Even variable attributes such as their age may \nbe recorded as N/A when they can change. In some cases this may be due to\nno follow up. Someone with a disease may not be asked about it again. \nIf this not a chronic disease it is difficult to say if they have maintained \ntheir state and interpolation is required.\n\nFor now, this file is just reorganising the 4 main attributes extracted for \nvivarium age, ethnicity, id number, and sex to keep the population high.\n\"\"\"\n\nimport glob\nfrom string import ascii_lowercase\nimport pandas as pd\n\ndef all_wave_directories(source, suffix):\n \"\"\" Get all file names for bhps waves\n \n\n Parameters\n ----------\n source : str\n `minos` where are directories.\n\n Returns\n -------\n directories : list\n List of `directories` to extract from.\n\n \"\"\"\n \n directories = sorted(glob.glob(source + suffix + \"*\"))\n return directories\n\ndef extract_data_files(directories):\n \n \n return datasets\n\nif __name__ == \"__main__\":\n source = \"/Users/robertclay/Documents/6614stata_471A424C959CCFC9AE423A866B5794B9_V1/UKDA-6614-stata/stata/stata11_se/\"\n bhps_dirs = all_wave_directories(source, \"bhps_\")\n uklhs_dirs = all_wave_directories(source, \"ukhls_w\")\n \n uklhs_cross = pd.read_stata(uklhs_dirs[-1] + \"/xwavedat.dta\")\n uklhs_cross = uklhs_cross[[\"pid\", \"ethn_dv\", \"sex\"]]\n ", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\" This file formats Understanding Society variables for using in a microsimulation.\nIt DOES NOT handle missing data. see US_missing.py.\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport argparse\n\nimport US_utils\n\n# suppressing a warning that isn't a problem\npd.options.mode.chained_assignment = None # default='warn' #supress SettingWithCopyWarning\n\n# TODO there is an issue with the connection between the BHPS and ukhls waves. (see format_time)\n\"\"\" There seems to be a gap between 2007-2008 where people who were in the old \nstudy do not transfer to the new one until next year. Need to find out why this is.\n\nThis is most likely me missing some variable in the data which would better serve\nas a time variable. (e.g. interview year/month).\n\nFor now just assume the end of BHPS and start of ukhls occur in the same year.\nRecord years by the END date. The first wave is SEP 90 - SEP 91 so is recorded as \n1991_US_Cohort.csv. \n\"\"\"\n\n\"\"\"\nLoad persistent JSON data dictionaries for US. These are data dictionaries that simplify categorical variables\nfrom integers to strings for easier readability. Its easier to see white british, black-african than digits 1, 5.\n\nThese dictionaries also simplify some variables for use in a microsim. \nE.g. education state has a large number of equivalent qualifications (O-Level, GCSE, CSE, etc.) compressed into one.\nWhile this destroys some detail it makes it much easier to make transition models particularly for rare items.\n\nNOTE there are some time dependent dictionaries here. BHPS changes formatting in 2002 for ethnicity so there are \ntwo ethnicity dictionaries to reflect that. The time prefix indicates when the change takes place or the end of the \ndataset (2008 for BHPS).\n\"\"\"\n\n# Where are all persistent files for US data. E.g. int to string variable encodings.\njson_source = \"persistent_data/JSON/\"\n# Sex.\nsex_dict = US_utils.load_json(json_source, \"sexes.json\")\n# Ethnicity.\nethnicity_bhps_2002 = US_utils.load_json(json_source, \"ethnicity_bhps_2002.json\")\nethnicity_bhps_2008 = US_utils.load_json(json_source, \"ethnicity_bhps_2008.json\")\nethnicity_ukhls = US_utils.load_json(json_source, \"ethnicity_ukhls.json\")\n# Employment.\nlabour_bhps = US_utils.load_json(json_source, \"labour_status_bhps.json\")\nlabour_ukhls = US_utils.load_json(json_source, \"labour_status_ukhls.json\")\n# Education.\neducation_bhps = US_utils.load_json(json_source, \"education_bhps.json\")\n# Use simplified one for ukhls currently.\n# education_ukhls = US_utils.load_json(json_source, \"education_ukhls.json\")\neducation_ukhls = US_utils.load_json(json_source, \"education_ukhls_simple.json\")\n# Depression.\ndepression = US_utils.load_json(json_source, \"depression.json\")\ndepression_change = US_utils.load_json(json_source, \"depression_change.json\")\n# Heating.\nheating_bhps = US_utils.load_json(json_source, \"heating_bhps.json\")\nheating_ukhls = US_utils.load_json(json_source, \"heating_ukhls.json\")\n# Location\nregion_dict = US_utils.load_json(json_source, \"region.json\")\n\n\ndef format_sex(data):\n \"\"\" Format sex data.\n\n Parameters\n ----------\n data : pd.DataFrame\n Data to process genders of.\n Returns\n -------\n data : pd.DataFrame\n Data with processed genders.\n\n \"\"\"\n # Remap sex data from ints to strings. Easier to interpret.\n data[\"sex\"] = data[\"sex\"].astype(str).map(sex_dict)\n return data\n\n\ndef format_location(data, year):\n \"\"\" Format any spatial data. Does nothing yet.\n\n Parameters\n ----------\n data : pd.DataFrame\n Data before location formatting.\n Returns\n -------\n data : pd.DataFrame\n Data with location formatting.\n\n \"\"\"\n # No spatial data yet so does nothing.\n # data[\"MSOA\"] = \"no_location\"\n # data[\"location\"] = \"no_location\"\n data[\"region\"] = data[\"region\"].astype(str).map(region_dict)\n return data\n\n\ndef format_mental_state(data):\n \"\"\" Format mental health data.\n\n Parameters\n ----------\n data : pd.DataFrame\n US data with raw depression columns.\n\n Returns\n -------\n data : pd.DataFrame\n US data with formatted depression columns.\n \"\"\"\n # TODO Only using binary values for now. Makes it easier to show off traditional binary models.\n data[\"depression\"] = data[\"depression\"].astype(str).map(depression)\n data[\"depression_change\"] = data[\"depression_change\"].astype(str).map(depression_change)\n return data\n\n\ndef format_academic_year(data):\n \"\"\" Format academic year variables.\n\n Parameters\n ----------\n data : pd.DataFrame\n US data with raw academic year columns.\n\n Returns\n -------\n data: pd.DataFrame\n The data frame with the academic year column added.\n\n \"\"\"\n # If someone is 15 years old force them to be born after september.\n # They have to be in the graduating GCSE academic year.\n # No other birth months can be inferred.\n # I thought about using the interview month as well but it doesnt really help.\n # Can be combined with birth_year to determine if their birthday is before/after the interview.\n # Pretty sure it academic year cannot be fully derived from this.\n # TODO I dont have special access to birth month data. Give random months as a stop gap.\n data[\"birth_month\"] = 0\n new_months = data.loc[data[\"age\"] == 15, \"birth_month\"].apply(lambda x: np.random.randint(9, 13))\n data.loc[data[\"age\"] == 15, \"birth_month\"] = new_months\n new_months = data.loc[data[\"age\"] > 15, \"birth_month\"].apply(lambda x: np.random.randint(1, 13))\n data.loc[data[\"age\"] > 15, \"birth_month\"] = new_months\n\n data[\"academic_year\"] = data[\"birth_year\"]\n # Everyone born before September is bumped down to the previous academic year.\n data.loc[data[\"birth_month\"] < 9, \"academic_year\"] -= 1\n return data\n\n\ndef format_time(data, year):\n \"\"\"Format any time variables in US.\n\n Parameters\n ----------\n data : pd.DataFrame\n Data without time formatting.\n year : int\n The `year` of the wave being processed.\n\n Returns\n -------\n data : pd.DataFrame\n Data with time formatting.\n \"\"\"\n # See to do messages at the top of the file.\n # Theres some wierd overlap in the pidp data. Theres essentially a gap in September 2008 with noone in it from\n # BHPS which makes transition models fail.\n # Following 2 lines are a stupid work around.\n # if self.year <= 2008:\n # self.year += 1\n data[\"time\"] = year\n return data\n\n\n##########################\n# BHPS specific functions.\n##########################\n\ndef format_bhps_columns(year):\n \"\"\" Get BHPS literal and formatted column names.\n\n Parameters\n ----------\n year : int\n The year of the wave being processed.\n Returns\n -------\n attribute_columns, column_names : str\n The literal attribute_columns names used from BHPS data. (ba_age, ba_qfachi...)\n Corresponding cleaner column_names for use in a microsim. (age, education_state...)\n \"\"\"\n\n # consistent column names accross all waves\n attribute_columns = [\"pidp\", # Cross wave identifier\n \"hidp\", # Cross wave household identified\n \"sex\", # Sex.\n \"age\", # Age.\n \"doby\", # Birth Year.\n \"qfachi\", # Highest education\n # \"hiqual_dv\", # Highest education\n \"scghqi\", # GHQ Depression.\n \"hlprbi\", # Clinical Depression.\n \"jbstat\", # Labour status.\n \"jbnssec8_dv\", # NSSEC code.\n \"cduse5\", # fridge/freezer\n \"cduse6\", # washing machine\n \"cduse7\", # tumble dryer\n \"cduse8\", # dishwasher\n \"cduse9\", # microwave oven\n \"gor_dv\", # Government Region Derived.\n \"hsprbk\" # accom: lack of adequate heating\n ]\n\n column_names = [\"pidp\", # pidp\n \"hidp\", # hidp\n \"sex\", # sex\n \"age\", # age\n \"birth_year\", # doby\n \"education_state\", # qfachi. Was hiqual_dv but too much missing.\n \"depression_change\", # scghqi\n \"depression\", # hlprbi\n \"labour_state\", # jbstat\n \"job_sec\", # jbnssec8_dv\n \"fridge_freezer\", # cduse5\n \"washing_machine\", # cduse6\n \"tumble_dryer\", # cduse7\n \"dishwasher\", # cduse8\n \"microwave\", # cduse9\n \"region\", # gor_dv\n \"heating\" # hsprbk\n ]\n\n # Variables that change names over dataset.\n # First up is job duration. Changes names in wave 6,\n if year < 1996:\n attribute_columns += [\"cjsbgm\", \"cjsbgy\"] # Month and year when current employment started.\n column_names += [\"job_duration_m\", \"job_duration_y\"]\n else:\n attribute_columns += [\"cjsbgm\", \"cjsbgy4\"] # Month and year when current employment started.\n column_names += [\"job_duration_m\", \"job_duration_y\"]\n\n # Name of SIC code variable changes for some reason half way through.\n if year >= 2001:\n attribute_columns += [\"jbsic92\"] # SIC 92 codes\n column_names += [\"job_industry\"]\n else:\n attribute_columns += [\"jbsic\"] # SIC 92 codes.\n column_names += [\"job_industry\"]\n\n # Name change for race as well.\n if year <= 2001:\n attribute_columns += [\"race\"]\n column_names += [\"ethnicity\"] # Ethnicity.\n elif year > 2001:\n attribute_columns += [\"racel_bh\"]\n column_names += [\"ethnicity\"] # Ethnicity.\n\n # SOC codes updated every decade.\n if year < 2000:\n attribute_columns += [\"jbsoc90_cc\"]\n column_names += [\"job_occupation\"] # Occupation code.\n else:\n attribute_columns += [\"jbsoc00_cc\"]\n column_names += [\"job_occupation\"] # Occupation code.\n\n # Add wave specific letters of BHPS variable names.\n # Do not add letters to cross wave variables (IDs).\n # The format here is ba_sex for wave 1, bb_sex for wave 2 and so on..\n # pidp stays the same for all waves.\n attribute_columns = US_utils.bhps_wave_prefix(attribute_columns, year)\n\n return attribute_columns, column_names\n\n\ndef format_bhps_ethnicity(data, year):\n \"\"\" Format ethnicities for BHPS data.\n\n Parameters\n ----------\n data : pd.DataFrame\n Raw data to format ethnicities of.\n year : int\n The year of the wave being processed.\n Returns\n -------\n data : pd.DataFrame\n Data with ethnicities formatted.\n \"\"\"\n\n # Mapping changes in 2002 as categories expanded.\n if year > 2001:\n eth_dict = ethnicity_bhps_2008\n else:\n eth_dict = ethnicity_bhps_2002\n # Map ethnicity int codes to strings.\n data[\"ethnicity\"] = data[\"ethnicity\"].astype(str).map(eth_dict)\n return data\n\n\ndef format_bhps_education(data):\n \"\"\" Format US education data.\n\n Parameters\n ----------\n data : pd.DataFrame\n Data frame before formatting educations.\n\n Returns\n -------\n data : pd.DataFrame\n Data after formatting educations.\n \"\"\"\n # Map education codes to readable strings.\n data[\"education_state\"] = data[\"education_state\"].astype(str).map(education_bhps)\n return data\n\n\ndef format_bhps_employment(data):\n \"\"\" Format employment variables.\n\n Parameters\n ----------\n data : pd.DataFrame\n Data frame to format employment for.\n\n Returns\n -------\n data : Pd.DataFrame\n Data with formatted education column.\n \"\"\"\n # Remap job status ints to strings.\n data[\"labour_state\"] = data[\"labour_state\"].astype(str).map(labour_bhps)\n return data\n\n\ndef format_bhps_heating(data):\n \"\"\" Format heating variable.\n\n Parameters\n ----------\n data : pd.DataFrame\n Data frame to format heating for.\n\n Returns\n -------\n data : Pd.DataFrame\n Data with formatted heating column.\n \"\"\"\n ## Need to reverse the binary heating variable as it is in the opposite orientation to the corresponding ukhls var\n data[\"heating\"] = data[\"heating\"].fillna(-9) # have to replace a single NA value before mapping\n data[\"heating\"] = data[\"heating\"].astype(int).astype(str).map(heating_bhps) # convert to int then string then map\n return data\n\n\n######################\n# ukhls Wave Functions\n######################\n\n\ndef format_ukhls_columns(year):\n \"\"\" Specify subset of ukhls columns to be used in microsim.\n\n Parameters\n ----------\n year : int\n The year of the wave being processed.\n\n Returns\n -------\n attribute_columns, column_names: str\n The attribute_columns names directly from US data. Which columns will be extracted.\n The simplified column_names that are used in the microsim.\n \"\"\"\n\n attribute_columns = [\"pidp\", # Cross wave personal identifier.\n \"hidp\", # Cross wave household identified\n \"sex\", # Sex.\n \"dvage\", # Age.\n \"doby_dv\", # Birth Year.\n \"racel_dv\", # Ethnicity.\n \"qfhigh_dv\", # Highest Qualification.\n # \"hiqual_dv\", # Highest Qualification.\n \"scghqi\", # GHQ depression.\n \"jbstat\", # job status\n \"jbsic07_cc\", # SIC code for job (if any).\n \"jbnssec8_dv\", # NSSEC socioeconomic code.\n \"cduse5\", # deep freeze or fridge freezer\n \"cduse6\", # washing machine\n \"cduse7\", # tumble dryer\n \"cduse8\", # dishwasher\n \"cduse9\", # microwave oven\n \"hheat\",\n \"gor_dv\", # Government Region Derived.\n \"sf12mcs_dv\", # SF-12 Mental Component Summary (PCS)\n \"fihhmnnet1_dv\", # total household net income - no deductions\n \"rentgrs_dv\", # monthly gross rent, including housing benefit\n \"xpmg_dv\", # monthly mortgage payment including imputations\n \"ieqmoecd_dv\", # Modified OECD equivalence scale\n \"intdatey\", # household interview year\n \"intdatem\", # household interview month\n \"ctband_dv\" # council_tax\n ]\n # New names for the above columns.\n column_names = [\"pidp\",\n \"hidp\",\n \"sex\",\n \"age\",\n \"birth_year\",\n \"ethnicity\",\n \"education_state\",\n \"depression_change\",\n \"labour_state\",\n \"job_industry\",\n \"job_sec\",\n \"fridge_freezer\", # cduse5\n \"washing_machine\", # cduse6\n \"tumble_dryer\", # cduse7\n \"dishwasher\", # cduse8\n \"microwave\", # cduse9\n \"heating\", # hheat\n \"region\", # gor_dv\n \"SF-12\", # sf12mcs_dv\n \"hh_netinc\", # fihhmnnet1_dv\n \"hh_rent\", # rentgrs_dv\n \"hh_mortgage\", # xpmg_dv\n \"oecd_equiv\", # ieqmoecd_dv\n \"hh_int_y\", # intdatey\n \"hh_int_m\", # intdatem\n \"council_tax\" # ctband_dv\n ]\n\n # Variables that change names for ukhls data.\n # Attributes for job duration.\n # First wave of employment attribute names are different.\n # 7th wave also changes names.\n if year < 2009:\n attribute_columns += [\"jbbgm\", \"jbbgy\"] # What month and year did current employment start.\n column_names += [\"job_duration_m\", \"job_duration_y\"]\n elif year < 2014:\n attribute_columns += [\"jbbgdatm\", \"jbbgdaty\"] # What month and year did current employment start.\n column_names += [\"job_duration_m\", \"job_duration_y\"]\n else:\n attribute_columns += [\"jbbgm\", \"jbbgy\"] # What month and year did current employment start.\n column_names += [\"job_duration_m\", \"job_duration_y\"]\n\n # SOC codes updated every decade.\n if year < 2010:\n attribute_columns += [\"jbsoc00_cc\"]\n column_names += [\"job_occupation\"]\n else:\n attribute_columns += [\"jbsoc10_cc\"]\n column_names += [\"job_occupation\"]\n # clinical depression changes in wave 10.\n if year < 2017:\n attribute_columns += [\"hcond17\"] # Clinical depression.\n column_names += [\"depression\"]\n else:\n attribute_columns += [\"hcondcode38\"] # Clinical depression.\n column_names += [\"depression\"]\n\n # All attributes have a wave dependent suffix apart from identifiersb (pidp, hidp etc.).\n # Adjust attribute_columns as necessary.\n # E.g age -> a_age, age -> b_age ... for waves of ukhls.\n\n attribute_columns = US_utils.ukhls_wave_prefix(attribute_columns, year)\n\n return attribute_columns, column_names\n\ndef format_council_tax(data):\n \"\"\"Format any council tax data for calculation of monthly overheads.\"\"\"\n\ndef format_ukhls_ethnicity(data):\n \"\"\" Format ethnicity variables.\n\n\n Parameters\n ----------\n data : pd.DataFrame\n Raw data to format ethnicities of.\n\n\n Returns\n -------\n data : pd.DataFrame\n Data with ethnicities formatted.\n \"\"\"\n # Map ethnicity integers to strings.\n data[\"ethnicity\"] = data[\"ethnicity\"].astype(str).map(ethnicity_ukhls)\n return data\n\n\ndef format_ukhls_education(data):\n \"\"\" Format US education data.\n\n Parameters\n ----------\n data : pd.DataFrame\n Data frame before formatting educations.\n Returns\n -------\n data : pd.DataFrame\n Data after formatting educations.\n \"\"\"\n # Map education ints to strings.\n data[\"education_state\"] = data[\"education_state\"].astype(str).map(education_ukhls)\n return data\n\n\ndef format_ukhls_employment(data):\n \"\"\" Format employment columns for data.\n\n Parameters\n ----------\n data : pd.DataFrame\n Data frame to format employment for.\n\n Returns\n -------\n data : Pd.DataFrame\n Which wave of data is being saved.\n \"\"\"\n # TODO Code moved to US_missing_deterministic. move problem description somewhere too.\n \"\"\"Correct some incorrectly missing data here.\n Some people are registered as unemployed but are missing job codes.\n Makes them impossible to differentiate with those who are missing for other reasons.\n Hence will assign anyone who is unemployed SIC/SOC/NSSEC code 0.\n 0 is undefined in all 3 sets.\n Calculate people who are unemployed (labour state 2) but registered as missing in SIC codes.\n Assign these people 0 value SIC/SOC/NSSEC codes. Also set their job duration to 0.\n\n There are a lot of potential reasons for this.\n People who transition job status mid year and arent properly recorded.\n` E.g. check pid 274047 this person retires in April 2008.\n They are incorrectly recorded as still employed by Sept. 2008 but have no company data at all.\n They are incorrectly employed and have -8 for SOC/SIC/NSSEC values because they have no company.\n\n Seems to be a clash between behaviour for the majority of the year and\n current behaviour.\n For now just assume they are unemployed and assign their industries to 0.\"\"\"\n\n # Remap job statuses.\n data[\"labour_state\"] = data[\"labour_state\"].astype(str).map(labour_ukhls)\n return data\n\n\ndef format_ukhls_heating(data):\n \"\"\" Format heating variable.\n\n Parameters\n ----------\n data : pd.DataFrame\n Data frame to format heating for.\n\n Returns\n -------\n data : Pd.DataFrame\n Data with formatted heating column.\n \"\"\"\n ## Need to reverse the binary heating variable as it is in the opposite orientation to the corresponding ukhls var\n data[\"heating\"] = data[\"heating\"].astype(str).map(heating_ukhls)\n return data\n\n\ndef combine_indresp_hhresp(year, indresp_name, hhresp_name):\n \"\"\" Function to collect and merge the indresp and hhresp files for a specific year.\n\n Parameters\n ----------\n year : int\n The `year` of the wave being processed.\n indresp_name : str\n The name of the indresp file for specific year\n hhresp_name : str\n Name of the hhresp file for specific year\n Returns\n -------\n indresp_hhresp: Pd.DataFrame\n Dataframe containing indresp and hhresp data combined on hid\n \"\"\"\n # load both indresp and hhresp files\n indresp = US_utils.load_file(indresp_name)\n hhresp = US_utils.load_file(hhresp_name)\n\n # calculate wave letter based on year, and generate hidp variable name for use as merge key\n wave_letter = US_utils.get_wave_letter(year)\n if year < 2008:\n merge_key = f\"b{wave_letter}_hidp\"\n else:\n merge_key = f\"{wave_letter}_hidp\"\n\n # merge the data on the hidp variable and return combined dataframe.\n # Code here prevents duplicate columns that occur in both datasets. 44444\n combined = indresp.merge(right=hhresp, on=merge_key, suffixes=('', '_delme'))\n combined = combined[[c for c in combined.columns if not c.endswith(\"_delme\")]]\n return combined\n\n\ndef format_data(year, data):\n \"\"\" Main function for formatting US data. Loads formats and saves each wave sequentially.\n\n Parameters\n ----------\n year : int\n The `year` of the wave being processed.\n data : Pd.DataFrame\n Pandas DataFrame containing both indresp and hhresp data merged on hidp\n Returns\n -------\n data : Pd.DataFrame\n Returns a formatted dataframe to be saved as csv\n \"\"\"\n # Load file and take desired subset of columns.\n # data = US_utils.load_file(file_name)\n if year <= 2007:\n attribute_columns, column_names = format_bhps_columns(year)\n else:\n attribute_columns, column_names = format_ukhls_columns(year)\n data = US_utils.subset_data(data, attribute_columns, column_names)\n\n # Format columns by categories.\n # Categories that are formatted the same regardless of wave.\n data = format_sex(data)\n data = format_academic_year(data)\n data = format_mental_state(data)\n data = format_time(data, year)\n data = format_location(data, year)\n ukhls_heat_skipyrs = [2010, 2012, 2014]\n\n # Categories that vary for bhps/ukhls waves.\n if year <= 2007:\n data = format_bhps_ethnicity(data, year)\n data = format_bhps_education(data)\n data = format_bhps_employment(data)\n data = format_bhps_heating(data) # no data before 1996.\n elif year > 2007:\n data = format_ukhls_ethnicity(data)\n data = format_ukhls_employment(data)\n data = format_ukhls_education(data)\n #if year not in ukhls_heat_skipyrs:\n data = format_ukhls_heating(data)\n\n return data\n\n\ndef main(wave_years: list, file_source: str, file_output: str) -> None:\n \"\"\" Main file for processing raw US data.\n\n Parameters\n ----------\n wave_years: list\n What years to process data for. Data goes from 1990-2021 currently.\n file_source, file_output : str\n Where is minos of the raw US data.\n Where should processed data be output to.\n Which section of US data is being used. Usually independent response (indresp).\n \"\"\"\n\n # Loop over wave years and format data.\n for year in wave_years:\n # Two types of wave with different naming conventions and variables.\n # The BHPS waves circa 2008 and ukhls waves post 2009 have different classes for processing.\n\n # Merge the indresp and hhresp files for a particular year then format\n indresp_name = US_utils.US_file_name(year, file_source, \"indresp\")\n hhresp_name = US_utils.US_file_name(year, file_source, \"hhresp\")\n indresp_hhresp = combine_indresp_hhresp(year, indresp_name, hhresp_name)\n\n data = format_data(year, indresp_hhresp)\n\n # check for and remove any null rows (1 created in bhps due to merge)\n data = data.loc[~data[\"pidp\"].isnull()]\n\n # Save formatted data\n US_utils.save_file(data, file_output, \"\", year)\n\n\nif __name__ == \"__main__\":\n years = np.arange(1990, 2019)\n\n # Take source from command line args (or most likely from Makefile variable)\n parser = argparse.ArgumentParser(description=\"Raw Data formatting from Understanding Society\")\n parser.add_argument(\"-s\", \"--source_dir\", required=True, type=str,\n help=\"The source directory for Understanding Society data.\")\n args = parser.parse_args()\n\n # Get source from args\n source = args.source_dir\n output = \"data/raw_US/\"\n\n main(years, source, output)\n" ]
[ [ "pandas.read_stata" ], [ "numpy.arange", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xinyual/horovod
[ "65ae9afd05b854bc0dc9719dc246454edadf9487" ]
[ "test/parallel/test_torch.py" ]
[ "# Copyright 2018 Uber Technologies, Inc. All Rights Reserved.\n# Modifications copyright (C) 2019 Intel Corporation\n# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom distutils.version import LooseVersion\n\nimport inspect\nimport itertools\nimport os\nimport platform\nimport sys\nimport unittest\nimport warnings\nimport time\nimport json\n\nfrom collections.abc import Iterable\n\nimport numpy as np\nimport pytest\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport horovod.torch as hvd\n\nsys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'utils'))\n\nfrom common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath\n\n_1_5_api = LooseVersion(torch.__version__) >= LooseVersion('1.5.0')\n\nccl_supported_types = set([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,\n torch.IntTensor, torch.LongTensor, torch.FloatTensor,\n torch.DoubleTensor])\n\n# Set environment variable for dynamic timeline API test\nos.environ[\"HOROVOD_TIMELINE\"] = \"DYNAMIC\"\n\n# Set environment variable to enable adding/removing process sets after initializing Horovod.\nos.environ[\"HOROVOD_DYNAMIC_PROCESS_SETS\"] = \"1\"\n\nclass TorchTests(unittest.TestCase):\n \"\"\"\n Tests for ops in horovod.torch.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(TorchTests, self).__init__(*args, **kwargs)\n warnings.simplefilter('module')\n\n def convert_cpu_fp16_to_fp32(self, *values):\n # PyTorch doesn't support any CPU ops on FP16 tensors.\n # In case we need to do ops, we will convert tensor to FP32 here.\n result = []\n for value in values:\n if value.dtype in [torch.float16, torch.HalfTensor] and not value.is_cuda:\n result.append(value.float())\n else:\n result.append(value)\n return result\n\n def cast_and_place(self, tensor, dtype):\n if dtype.is_cuda:\n return tensor.cuda(hvd.local_rank()).type(dtype)\n return tensor.type(dtype)\n\n def filter_supported_types(self, types):\n if 'CCL_ROOT' in os.environ:\n types = [t for t in types if t in ccl_supported_types]\n return types\n\n def test_gpu_required(self):\n if not torch.cuda.is_available():\n skip_or_fail_gpu_test(self, \"No GPUs available\")\n\n @pytest.mark.skipif(platform.system() == 'Darwin', reason='Reinit not supported on macOS')\n def test_horovod_reinit(self):\n \"\"\"Test that Horovod can init -> shutdown -> init successfully.\"\"\"\n mpi_rank, _ = mpi_env_rank_and_size()\n gloo_rank = int(os.getenv('HOROVOD_RANK', -1))\n\n is_mpi = gloo_rank == -1\n if is_mpi:\n # Horovod cannot be re-initialized after shutdown when using MPI, so\n # this test can only be done using the Gloo controller\n self.skipTest(\"Gloo is not available\")\n\n hvd.init()\n rank, size = hvd.rank(), hvd.size()\n hvd.shutdown()\n hvd.init()\n rank2, size2 = hvd.rank(), hvd.size()\n\n assert rank == rank2\n assert size == size2\n\n def test_horovod_is_initialized(self):\n \"\"\"Test that is_initialized returned by hvd.is_initialized() is correct.\"\"\"\n hvd.init()\n assert hvd.is_initialized()\n\n gloo_rank = int(os.getenv('HOROVOD_RANK', -1))\n is_mpi = gloo_rank == -1\n if is_mpi:\n # Only applies for Gloo\n self.skipTest(\"Gloo is not available\")\n\n hvd.shutdown()\n assert not hvd.is_initialized()\n hvd.init()\n\n def test_horovod_rank(self):\n \"\"\"Test that the rank returned by hvd.rank() is correct.\"\"\"\n mpi_rank, _ = mpi_env_rank_and_size()\n gloo_rank = int(os.getenv('HOROVOD_RANK', -1))\n\n # The mpi rank does not match gloo rank, we need to figure which one\n # we are using to run the test.\n is_mpi = gloo_rank == -1\n hvd.init()\n rank = hvd.rank()\n\n if is_mpi:\n assert mpi_rank == rank\n else:\n assert gloo_rank == rank\n\n def test_horovod_size(self):\n \"\"\"Test that the size returned by hvd.size() is correct.\"\"\"\n _, mpi_size = mpi_env_rank_and_size()\n gloo_size = int(os.getenv('HOROVOD_SIZE', -1))\n\n # The mpi size does not match gloo size, we need to figure which one\n # we are using to run the test.\n is_mpi = gloo_size == -1\n hvd.init()\n size = hvd.size()\n if is_mpi:\n assert mpi_size == size\n else:\n assert gloo_size == size\n\n def test_horovod_allreduce(self):\n \"\"\"Test that the allreduce correctly sums 1D, 2D, 3D tensors.\"\"\"\n hvd.init()\n size = hvd.size()\n dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,\n torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])\n if torch.cuda.is_available():\n dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)\n tensor = self.cast_and_place(tensor, dtype)\n summed = hvd.allreduce(tensor, average=False)\n tensor, summed = self.convert_cpu_fp16_to_fp32(tensor, summed)\n multiplied = tensor * size\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'\n\n def test_horovod_allreduce_average(self):\n \"\"\"Test that the allreduce correctly averages 1D, 2D, 3D tensors.\"\"\"\n hvd.init()\n size = hvd.size()\n dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,\n torch.FloatTensor, torch.DoubleTensor])\n if torch.cuda.is_available():\n dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)\n tensor = self.cast_and_place(tensor, dtype)\n averaged = hvd.allreduce(tensor, average=True)\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n assert torch.allclose(averaged, tensor, threshold), 'hvd.allreduce produces incorrect results'\n\n def test_horovod_allreduce_inplace(self):\n \"\"\"Test that the allreduce correctly sums 1D, 2D, 3D tensors.\"\"\"\n hvd.init()\n size = hvd.size()\n dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,\n torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])\n if torch.cuda.is_available():\n dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)\n multiplied = self.cast_and_place(tensor * size, dtype)\n tensor = self.cast_and_place(tensor, dtype)\n hvd.allreduce_(tensor, average=False)\n tensor, multiplied = self.convert_cpu_fp16_to_fp32(tensor, multiplied)\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n assert torch.allclose(tensor, multiplied, threshold), 'hvd.allreduce produces incorrect results'\n\n def test_horovod_allreduce_async_fused(self):\n \"\"\"Test that the allreduce correctly sums 1D, 2D, 3D tensors\n with Tensor Fusion.\"\"\"\n hvd.init()\n size = hvd.size()\n dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,\n torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])\n if torch.cuda.is_available():\n dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n tests = []\n is_hvd_poll_false_once = False\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)\n tensor = self.cast_and_place(tensor, dtype)\n handle = hvd.allreduce_async(tensor, average=False)\n if not hvd.poll(handle):\n is_hvd_poll_false_once = True\n tensor, = self.convert_cpu_fp16_to_fp32(tensor)\n multiplied = tensor * size\n tests.append((dtype, multiplied, handle))\n\n # Make sure it's an asynchronous operation.\n assert is_hvd_poll_false_once, 'hvd.poll() always returns True, not an async op?'\n\n for dtype, multiplied, handle in tests:\n summed = hvd.synchronize(handle)\n summed, = self.convert_cpu_fp16_to_fp32(summed)\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'\n\n def test_horovod_allreduce_multi_gpu(self):\n \"\"\"Test that the allreduce works on multiple GPUs.\"\"\"\n # Only do this test if there are GPUs available.\n if not torch.cuda.is_available():\n self.skipTest(\"No GPUs available\")\n\n hvd.init()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n # Skip the test if there are not enough GPUs.\n if torch.cuda.device_count() < hvd.local_size() * 2:\n self.skipTest(\"Not enough GPUs available\")\n\n iter = 0\n dtypes = [torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n iter += 1\n torch.manual_seed(1234)\n tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)\n device = local_rank * 2 + (iter + local_rank) % 2\n tensor = tensor.cuda(device).type(dtype)\n multiplied = tensor * size\n hvd.allreduce_(tensor, average=False)\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [torch.cuda.IntTensor, torch.cuda.LongTensor]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n assert torch.allclose(tensor, multiplied, threshold), 'hvd.allreduce produces incorrect results'\n\n def test_horovod_allreduce_prescale(self):\n \"\"\"Test that the allreduce correctly sums 1D, 2D, 3D tensors with prescaling.\"\"\"\n hvd.init()\n size = hvd.size()\n dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,\n torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])\n if torch.cuda.is_available():\n dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n int_types = [torch.IntTensor, torch.LongTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor]\n half_types = [torch.HalfTensor, torch.cuda.HalfTensor]\n\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n np.random.seed(1234)\n factor = np.random.uniform()\n tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)\n tensor = self.cast_and_place(tensor, dtype)\n summed = hvd.allreduce(tensor, average=False,\n prescale_factor=factor)\n\n factor = torch.tensor(factor, dtype=torch.float64)\n factor = factor.cuda(hvd.local_rank()) if dtype.is_cuda else factor\n if dtype.is_cuda and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # For integer types, scaling done in FP64\n factor = factor.type(torch.float64 if dtype in int_types else dtype)\n tensor = tensor.type(torch.float64 if dtype in int_types else dtype)\n else:\n # For integer types, scaling done in FP64, FP32 math for FP16 on CPU\n factor = factor.type(torch.float32 if dtype in half_types else\n torch.float64 if dtype in int_types else dtype)\n tensor = tensor.type(torch.float32 if dtype in half_types else\n torch.float64 if dtype in int_types else dtype)\n multiplied = factor * tensor\n multiplied = multiplied.type(dtype)\n summed, multiplied = self.convert_cpu_fp16_to_fp32(summed, multiplied)\n multiplied *= size\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in int_types:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'\n\n def test_horovod_allreduce_postscale(self):\n \"\"\"Test that the allreduce correctly sums 1D, 2D, 3D tensors with postscaling.\"\"\"\n hvd.init()\n size = hvd.size()\n dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,\n torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])\n if torch.cuda.is_available():\n dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n int_types = [torch.IntTensor, torch.LongTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor]\n half_types = [torch.HalfTensor, torch.cuda.HalfTensor]\n\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n np.random.seed(1234)\n factor = np.random.uniform()\n tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)\n tensor = self.cast_and_place(tensor, dtype)\n summed = hvd.allreduce(tensor, average=False,\n postscale_factor=factor)\n\n factor = torch.tensor(factor, dtype=torch.float64)\n factor = factor.cuda(hvd.local_rank()) if dtype.is_cuda else factor\n if dtype.is_cuda and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # For integer types, scaling done in FP64\n factor = factor.type(torch.float64 if dtype in int_types else dtype)\n tensor = tensor.type(torch.float64 if dtype in int_types else dtype)\n else:\n # For integer types, scaling done in FP64, FP32 math for FP16 on CPU\n factor = factor.type(torch.float32 if dtype in half_types else\n torch.float64 if dtype in int_types else dtype)\n tensor = tensor.type(torch.float32 if dtype in half_types else\n torch.float64 if dtype in int_types else dtype)\n multiplied = size * tensor\n multiplied = multiplied * factor\n multiplied = multiplied.type(dtype)\n summed, multiplied = self.convert_cpu_fp16_to_fp32(summed, multiplied)\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in int_types:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'\n \n def test_horovod_allreduce_process_sets(self):\n \"\"\"Test that the allreduce correctly sums 1D, 2D, 3D tensors if restricted to non-global process sets.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n \n if hvd.ccl_built():\n self.skipTest(\"Multiple process sets currently do not support CCL.\")\n \n even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]\n odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]\n\n even_set = hvd.add_process_set(even_ranks)\n odd_set = hvd.add_process_set(odd_ranks)\n \n dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,\n torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])\n if torch.cuda.is_available():\n dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n even_rank_tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)\n odd_rank_tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)\n if rank in even_ranks:\n tensor = self.cast_and_place(even_rank_tensor, dtype)\n summed = hvd.allreduce(tensor, average=False, process_set=even_set)\n elif rank in odd_ranks:\n tensor = self.cast_and_place(odd_rank_tensor, dtype)\n summed = hvd.allreduce(tensor, average=False, process_set=odd_set)\n tensor, summed = self.convert_cpu_fp16_to_fp32(tensor, summed)\n if rank in even_ranks:\n multiplied = tensor * len(even_ranks)\n elif rank in odd_ranks:\n multiplied = tensor * len(odd_ranks)\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n max_process_set_size = max(len(even_ranks), len(odd_ranks))\n if max_process_set_size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor]:\n threshold = 0\n elif max_process_set_size < 10:\n threshold = 1e-4\n elif max_process_set_size < 15:\n threshold = 5e-4\n else:\n break\n\n assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'\n \n hvd.remove_process_set(odd_set)\n hvd.remove_process_set(even_set)\n\n def test_horovod_allreduce_error(self):\n \"\"\"Test that the allreduce raises an error if different ranks try to\n send tensors of different rank or dimension.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n # Same rank, different dimension\n torch.manual_seed(1234)\n dims = [17 + rank] * 3\n tensor = torch.FloatTensor(*dims).random_(-100, 100)\n try:\n hvd.allreduce(tensor)\n assert False, 'hvd.allreduce did not throw error'\n except (torch.FatalError, RuntimeError):\n pass\n\n # Same number of elements, different rank\n torch.manual_seed(1234)\n if rank == 0:\n dims = [17, 23 * 57]\n else:\n dims = [17, 23, 57]\n tensor = torch.FloatTensor(*dims).random_(-100, 100)\n try:\n hvd.allreduce(tensor)\n assert False, 'hvd.allreduce did not throw error'\n except (torch.FatalError, RuntimeError):\n pass\n\n def test_horovod_allreduce_type_error(self):\n \"\"\"Test that the allreduce raises an error if different ranks try to\n send tensors of different type.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n # Same rank, different dimension\n dims = [17] * 3\n if rank % 2 == 0:\n tensor = torch.IntTensor(*dims)\n else:\n tensor = torch.FloatTensor(*dims)\n\n try:\n hvd.allreduce(tensor)\n assert False, 'hvd.allreduce did not throw error'\n except (torch.FatalError, RuntimeError):\n pass\n\n def test_horovod_allreduce_cpu_gpu_error(self):\n \"\"\"Test that the allreduce raises an error if different ranks try to\n perform reduction on CPU and GPU.\"\"\"\n # Only do this test if there are GPUs available.\n if not torch.cuda.is_available():\n self.skipTest(\"No GPUs available\")\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n # Same rank, different dimension\n dims = [17] * 3\n if rank % 2 == 0:\n tensor = torch.cuda.FloatTensor(*dims)\n else:\n tensor = torch.FloatTensor(*dims)\n\n try:\n hvd.allreduce(tensor)\n assert False, 'hvd.allreduce did not throw error'\n except (torch.FatalError, RuntimeError):\n pass\n\n def test_horovod_allreduce_duplicate_name_error(self):\n \"\"\"Test that the allreduce raises an error if there are\n two concurrent operations with the same name.\"\"\"\n hvd.init()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n dims = [17] * 3\n tensor = torch.FloatTensor(*dims)\n\n hvd.allreduce_async(tensor, name='duplicate_name')\n try:\n for i in range(10):\n hvd.allreduce_async(tensor, name='duplicate_name')\n assert False, 'hvd.allreduce_async did not throw error'\n except (torch.FatalError, ValueError):\n pass\n if LooseVersion(torch.__version__) >= LooseVersion('1.10.0'):\n # To fix https://github.com/horovod/horovod/issues/3149\n hvd.join()\n\n def test_horovod_allreduce_grad(self):\n \"\"\"Test the correctness of the allreduce gradient.\"\"\"\n hvd.init()\n size = hvd.size()\n # Only Tensors of floating point dtype can require gradients\n dtypes = [torch.FloatTensor, torch.DoubleTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)\n tensor = self.cast_and_place(tensor, dtype)\n tensor.requires_grad_()\n summed = hvd.allreduce(tensor, average=False)\n\n summed.backward(self.cast_and_place(torch.ones([17] * dim), dtype))\n grad_out = tensor.grad.data.cpu().numpy()\n\n expected = np.ones([17] * dim) * size\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_allreduce_grad_average(self):\n \"\"\"Test the correctness of the allreduce averaged gradient.\"\"\"\n hvd.init()\n # Only Tensors of floating point dtype can require gradients\n dtypes = [torch.FloatTensor, torch.DoubleTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)\n tensor = self.cast_and_place(tensor, dtype)\n tensor.requires_grad_()\n summed = hvd.allreduce(tensor, average=True)\n\n summed.backward(self.cast_and_place(torch.ones([17] * dim), dtype))\n grad_out = tensor.grad.data.cpu().numpy()\n\n expected = np.ones([17] * dim)\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_allreduce_grad_process_sets(self):\n \"\"\"Test the correctness of the allreduce gradient if restricted to non-global process sets.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n if hvd.ccl_built():\n self.skipTest(\"Multiple process sets currently do not support CCL.\")\n\n even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]\n odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]\n\n even_set = hvd.add_process_set(even_ranks)\n odd_set = hvd.add_process_set(odd_ranks)\n\n # Only Tensors of floating point dtype can require gradients\n dtypes = [torch.FloatTensor, torch.DoubleTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n even_rank_tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)\n odd_rank_tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)\n if rank in even_ranks:\n tensor = self.cast_and_place(even_rank_tensor, dtype)\n this_set = even_set\n set_size = len(even_ranks)\n elif rank in odd_ranks:\n tensor = self.cast_and_place(odd_rank_tensor, dtype)\n this_set = odd_set\n set_size = len(odd_ranks)\n tensor.requires_grad_()\n summed = hvd.allreduce(tensor, average=False, process_set=this_set)\n\n summed.backward(self.cast_and_place(torch.ones([17] * dim), dtype))\n grad_out = tensor.grad.data.cpu().numpy()\n\n expected = np.ones([17] * dim) * set_size\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n hvd.remove_process_set(odd_set)\n hvd.remove_process_set(even_set)\n\n def test_horovod_grouped_allreduce(self):\n \"\"\"Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors.\"\"\"\n hvd.init()\n size = hvd.size()\n dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,\n torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])\n if torch.cuda.is_available():\n dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]\n tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]\n summed = hvd.grouped_allreduce(tensors, average=False)\n tensors, summed = zip(*[self.convert_cpu_fp16_to_fp32(t, s) for t, s in zip(tensors, summed)])\n multiplied = [tensor * size for tensor in tensors]\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(summed, multiplied)]), \\\n 'hvd.grouped_allreduce produces incorrect results'\n\n def test_horovod_grouped_allreduce_average(self):\n \"\"\"Test that the grouped allreduce correctly averages 1D, 2D, 3D tensors.\"\"\"\n hvd.init()\n size = hvd.size()\n dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,\n torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])\n if torch.cuda.is_available():\n dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]\n tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]\n averaged = hvd.grouped_allreduce(tensors, average=True)\n tensors, averaged = zip(*[self.convert_cpu_fp16_to_fp32(t, m) for t, m in zip(tensors, averaged)])\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(averaged, tensors)]), \\\n 'hvd.grouped_allreduce produces incorrect results for average'\n\n def test_horovod_grouped_allreduce_inplace(self):\n \"\"\"Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors.\"\"\"\n hvd.init()\n size = hvd.size()\n dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,\n torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])\n if torch.cuda.is_available():\n dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]\n multiplied = [self.cast_and_place(tensor * size, dtype) for tensor in tensors]\n tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]\n hvd.grouped_allreduce_(tensors, average=False)\n tensors, multiplied = zip(*[self.convert_cpu_fp16_to_fp32(t, m) for t, m in zip(tensors, multiplied)])\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(tensors, multiplied)]), \\\n 'hvd.grouped_allreduce_ produces incorrect results'\n\n def test_horovod_grouped_allreduce_process_sets(self):\n \"\"\"Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors if restricted to process sets.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n if hvd.ccl_built():\n self.skipTest(\"Multiple process sets currently do not support CCL.\")\n\n even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]\n odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]\n\n even_set = hvd.add_process_set(even_ranks)\n odd_set = hvd.add_process_set(odd_ranks)\n\n dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,\n torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])\n if torch.cuda.is_available():\n dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n even_rank_tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]\n odd_rank_tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]\n if rank in even_ranks:\n tensors = [self.cast_and_place(tensor, dtype) for tensor in even_rank_tensors]\n summed = hvd.grouped_allreduce(tensors, average=False, process_set=even_set)\n elif rank in odd_ranks:\n tensors = [self.cast_and_place(tensor, dtype) for tensor in odd_rank_tensors]\n summed = hvd.grouped_allreduce(tensors, average=False, process_set=odd_set)\n tensors, summed = zip(*[self.convert_cpu_fp16_to_fp32(t, s) for t, s in zip(tensors, summed)])\n if rank in even_ranks:\n multiplied = [tensor * len(even_ranks) for tensor in tensors]\n elif rank in odd_ranks:\n multiplied = [tensor * len(odd_ranks) for tensor in tensors]\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n max_process_set_size = max(len(even_ranks), len(odd_ranks))\n if max_process_set_size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor]:\n threshold = 0\n elif max_process_set_size < 10:\n threshold = 1e-4\n elif max_process_set_size < 15:\n threshold = 5e-4\n else:\n break\n\n assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(summed, multiplied)]), \\\n 'hvd.grouped_allreduce produces incorrect results'\n\n hvd.remove_process_set(odd_set)\n hvd.remove_process_set(even_set)\n\n def test_horovod_grouped_allreduce_cpu_gpu_error(self):\n \"\"\"Test that the grouped allreduce raises an error if the input tensor\n list contains a mix of tensors on CPU and GPU.\"\"\"\n # Only do this test if there are GPUs available.\n if not torch.cuda.is_available():\n self.skipTest(\"No GPUs available\")\n\n hvd.init()\n tensors = [torch.FloatTensor(10) if i % 2 else torch.cuda.FloatTensor(10) for i in range(5)]\n try:\n hvd.grouped_allreduce(tensors, average=False)\n assert False, 'hvd.allreduce did not throw error'\n except (torch.FatalError, RuntimeError):\n pass\n\n def test_horovod_grouped_allreduce_grad(self):\n \"\"\"Test the correctness of the grouped allreduce gradient.\"\"\"\n hvd.init()\n size = hvd.size()\n # Only Tensors of floating point dtype can require gradients\n dtypes = [torch.FloatTensor, torch.DoubleTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]\n tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]\n for tensor in tensors:\n tensor.requires_grad_()\n summed = hvd.grouped_allreduce(tensors, average=False)\n\n for s in summed:\n s.backward(self.cast_and_place(torch.ones([17] * dim), dtype))\n\n grads_out = [tensor.grad.data.cpu().numpy() for tensor in tensors]\n\n expected = np.ones([17] * dim) * size\n for grad_out in grads_out:\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_grouped_allreduce_grad_average(self):\n \"\"\"Test the correctness of the grouped allreduce averaged gradient.\"\"\"\n hvd.init()\n # Only Tensors of floating point dtype can require gradients\n dtypes = [torch.FloatTensor, torch.DoubleTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]\n tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]\n for tensor in tensors:\n tensor.requires_grad_()\n summed = hvd.grouped_allreduce(tensors, average=True)\n\n for s in summed:\n s.backward(self.cast_and_place(torch.ones([17] * dim), dtype))\n\n grads_out = [tensor.grad.data.cpu().numpy() for tensor in tensors]\n\n expected = np.ones([17] * dim)\n for grad_out in grads_out:\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_grouped_allreduce_grad_process_sets(self):\n \"\"\"Test the correctness of the grouped allreduce gradient if restricted to process sets.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n if hvd.ccl_built():\n self.skipTest(\"Multiple process sets currently do not support CCL.\")\n\n even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]\n odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]\n\n even_set = hvd.add_process_set(even_ranks)\n odd_set = hvd.add_process_set(odd_ranks)\n\n # Only Tensors of floating point dtype can require gradients\n dtypes = [torch.FloatTensor, torch.DoubleTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n torch.manual_seed(1234)\n even_rank_tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]\n odd_rank_tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]\n if rank in even_ranks:\n tensors = [self.cast_and_place(tensor, dtype) for tensor in even_rank_tensors]\n this_set = even_set\n set_size = len(even_ranks)\n elif rank in odd_ranks:\n tensors = [self.cast_and_place(tensor, dtype) for tensor in odd_rank_tensors]\n this_set = odd_set\n set_size = len(odd_ranks)\n for tensor in tensors:\n tensor.requires_grad_()\n summed = hvd.grouped_allreduce(tensors, average=False, process_set=this_set)\n\n for s in summed:\n s.backward(self.cast_and_place(torch.ones([17] * dim), dtype))\n\n grads_out = [tensor.grad.data.cpu().numpy() for tensor in tensors]\n\n expected = np.ones([17] * dim) * set_size\n for grad_out in grads_out:\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n hvd.remove_process_set(odd_set)\n hvd.remove_process_set(even_set)\n\n def test_horovod_allgather(self):\n \"\"\"Test that the allgather correctly gathers 1D, 2D, 3D tensors.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,\n torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,\n torch.HalfTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)\n tensor = self.cast_and_place(tensor, dtype)\n gathered = hvd.allgather(tensor)\n tensor, gathered = self.convert_cpu_fp16_to_fp32(tensor, gathered)\n\n assert list(gathered.shape) == [17 * size] + [17] * (dim - 1)\n\n for i in range(size):\n rank_tensor = gathered[i * 17:(i + 1) * 17]\n assert list(rank_tensor.shape) == [17] * dim, \\\n 'hvd.allgather produces incorrect gathered shape'\n assert rank_tensor.data.min() == i, 'hvd.allgather produces incorrect gathered tensor'\n assert rank_tensor.data.max() == i, 'hvd.allgather produces incorrect gathered tensor'\n\n def test_horovod_allgather_variable_size(self):\n \"\"\"Test that the allgather correctly gathers 1D, 2D, 3D tensors,\n even if those tensors have different sizes along the first dim.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,\n torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,\n torch.HalfTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n # Support tests up to MPI Size of 35\n if size > 35:\n break\n\n tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5\n tensor_sizes = tensor_sizes[:size]\n\n tensor = torch.FloatTensor(\n *([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank)\n tensor = self.cast_and_place(tensor, dtype)\n gathered = hvd.allgather(tensor)\n tensor, gathered = self.convert_cpu_fp16_to_fp32(tensor, gathered)\n\n expected_size = sum(tensor_sizes)\n assert list(gathered.shape) == [expected_size] + [17] * (dim - 1)\n\n for i in range(size):\n rank_size = [tensor_sizes[i]] + [17] * (dim - 1)\n rank_tensor = gathered[sum(\n tensor_sizes[:i]):sum(tensor_sizes[:i + 1])]\n assert list(rank_tensor.shape) == rank_size\n assert rank_tensor.data.min() == i\n assert rank_tensor.data.max() == i\n\n def test_horovod_allgather_async_fused(self):\n \"\"\"Test that the allgather correctly gathers 1D, 2D, 3D tensors\n with Tensor Fusion.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,\n torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,\n torch.HalfTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n tests = []\n is_hvd_poll_false_once = False\n for dtype, dim in itertools.product(dtypes, dims):\n rank_shape = [17] * dim\n tensor = torch.FloatTensor(*(rank_shape)).fill_(1).mul_(rank)\n tensor = self.cast_and_place(tensor, dtype)\n handle = hvd.allgather_async(tensor)\n if not hvd.poll(handle):\n is_hvd_poll_false_once = True\n tests.append((handle, rank_shape))\n\n # Make sure it's an asynchronous operation.\n assert is_hvd_poll_false_once, 'hvd.poll() always returns True, not an async op?'\n\n for handle, rank_shape in tests:\n gathered = hvd.synchronize(handle)\n gathered, = self.convert_cpu_fp16_to_fp32(gathered)\n\n for i in range(size):\n rank_tensor = gathered[i * 17:(i + 1) * 17]\n assert list(rank_tensor.shape) == rank_shape, \\\n 'hvd.allgather produces incorrect gathered shape'\n assert rank_tensor.data.min() == i, 'hvd.allgather produces incorrect gathered tensor'\n assert rank_tensor.data.max() == i, 'hvd.allgather produces incorrect gathered tensor'\n\n def test_horovod_allgather_process_sets(self):\n \"\"\"Test that the allgather correctly gathers 1D, 2D, 3D tensors if restricted to process sets.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n if hvd.ccl_built():\n self.skipTest(\"Multiple process sets currently do not support CCL.\")\n\n even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]\n odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]\n\n even_set = hvd.add_process_set(even_ranks)\n odd_set = hvd.add_process_set(odd_ranks)\n\n if rank in even_ranks:\n set_size = len(even_ranks)\n set_ranks = even_ranks\n this_set = even_set\n elif rank in odd_ranks:\n set_size = len(odd_ranks)\n set_ranks = odd_ranks\n this_set = odd_set\n\n dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,\n torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,\n torch.HalfTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)\n tensor = self.cast_and_place(tensor, dtype)\n gathered = hvd.allgather(tensor, process_set=this_set)\n tensor, gathered = self.convert_cpu_fp16_to_fp32(tensor, gathered)\n\n assert list(gathered.shape) == [17 * set_size] + [17] * (dim - 1)\n\n for i in range(set_size):\n rank_tensor = gathered[i * 17:(i + 1) * 17]\n assert list(rank_tensor.shape) == [17] * dim, \\\n 'hvd.allgather produces incorrect gathered shape'\n value = set_ranks[i]\n assert rank_tensor.data.min() == value, 'hvd.allgather produces incorrect gathered tensor'\n assert rank_tensor.data.max() == value, 'hvd.allgather produces incorrect gathered tensor'\n\n hvd.remove_process_set(odd_set)\n hvd.remove_process_set(even_set)\n\n def test_horovod_allgather_error(self):\n \"\"\"Test that the allgather returns an error if any dimension besides\n the first is different among the tensors being gathered.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n tensor_size = [17] * 3\n tensor_size[1] = 10 * (rank + 1)\n tensor = torch.FloatTensor(*tensor_size).fill_(1).mul_(rank)\n\n try:\n hvd.allgather(tensor)\n assert False, 'hvd.allgather did not throw error'\n except (torch.FatalError, RuntimeError):\n pass\n\n def test_horovod_allgather_type_error(self):\n \"\"\"Test that the allgather returns an error if the types being gathered\n differ among the processes\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n tensor_size = [17] * 3\n if rank % 2 == 0:\n tensor = torch.IntTensor(*tensor_size)\n else:\n tensor = torch.FloatTensor(*tensor_size)\n\n try:\n hvd.allgather(tensor)\n assert False, 'hvd.allgather did not throw error'\n except (torch.FatalError, RuntimeError):\n pass\n\n def test_horovod_allgather_duplicate_name_error(self):\n \"\"\"Test that the allgather raises an error if there are\n two concurrent operations with the same name.\"\"\"\n hvd.init()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n dims = [17] * 3\n tensor = torch.FloatTensor(*dims)\n\n hvd.allgather_async(tensor, name='duplicate_name')\n try:\n for i in range(10):\n hvd.allgather_async(tensor, name='duplicate_name')\n assert False, 'hvd.allgather_async did not throw error'\n except (torch.FatalError, ValueError):\n pass\n if LooseVersion(torch.__version__) >= LooseVersion('1.10.0'):\n # To fix https://github.com/horovod/horovod/issues/3149\n hvd.join()\n\n def test_horovod_allgather_grad(self):\n \"\"\"Test the correctness of the allgather gradient.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # Only Tensors of floating point dtype can require gradients\n dtypes = [torch.FloatTensor, torch.DoubleTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n # Support tests up to MPI Size of 35\n if size > 35:\n break\n\n tensor_sizes = [3, 2, 7, 4, 6, 8, 10] * 5\n tensor_sizes = tensor_sizes[:size]\n\n tensor = torch.FloatTensor(\n *([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank)\n tensor = self.cast_and_place(tensor, dtype)\n tensor.requires_grad_()\n\n grad_list = []\n for r, tensor_size in enumerate(tensor_sizes):\n grad_list.append(self.cast_and_place(\n torch.ones([tensor_size] + [17] * (dim - 1)), dtype) * r)\n grad_ys = torch.cat(grad_list, dim=0)\n\n gathered = hvd.allgather(tensor)\n gathered.backward(grad_ys)\n grad_out = tensor.grad.data.cpu().numpy()\n\n expected = np.ones(\n [tensor_sizes[rank]] + [17] * (dim - 1)\n ) * rank\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_allgather_grad_process_sets(self):\n \"\"\"Test the correctness of the allgather gradient if restricted to process sets.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n if hvd.ccl_built():\n self.skipTest(\"Multiple process sets currently do not support CCL.\")\n\n even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]\n odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]\n\n even_set = hvd.add_process_set(even_ranks)\n odd_set = hvd.add_process_set(odd_ranks)\n\n if rank in even_ranks:\n set_ranks = even_ranks\n this_set = even_set\n elif rank in odd_ranks:\n set_ranks = odd_ranks\n this_set = odd_set\n\n # Only Tensors of floating point dtype can require gradients\n dtypes = [torch.FloatTensor, torch.DoubleTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n # Support tests up to MPI Size of 35\n if size > 35:\n break\n\n tensor_sizes = [3, 2, 7, 4, 6, 8, 10] * 5\n tensor_sizes = tensor_sizes[:size]\n set_tensor_sizes = [tensor_sizes[rk] for rk in set_ranks]\n\n tensor = torch.FloatTensor(\n *([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank)\n tensor = self.cast_and_place(tensor, dtype)\n tensor.requires_grad_()\n\n grad_list = []\n for r, tensor_size in zip(set_ranks, set_tensor_sizes):\n grad_list.append(self.cast_and_place(\n torch.ones([tensor_size] + [17] * (dim - 1)), dtype) * r)\n grad_ys = torch.cat(grad_list, dim=0)\n\n gathered = hvd.allgather(tensor, process_set=this_set)\n gathered.backward(grad_ys)\n grad_out = tensor.grad.data.cpu().numpy()\n\n expected = np.ones(\n [tensor_sizes[rank]] + [17] * (dim - 1)\n ) * rank\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n hvd.remove_process_set(odd_set)\n hvd.remove_process_set(even_set)\n\n def test_horovod_broadcast(self):\n \"\"\"Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,\n torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,\n torch.HalfTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n root_ranks = list(range(size))\n for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):\n tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)\n root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank)\n tensor = self.cast_and_place(tensor, dtype)\n root_tensor = self.cast_and_place(root_tensor, dtype)\n broadcasted_tensor = hvd.broadcast(tensor, root_rank)\n tensor, root_tensor, broadcasted_tensor = \\\n self.convert_cpu_fp16_to_fp32(tensor, root_tensor, broadcasted_tensor)\n if rank != root_rank:\n assert (tensor == root_tensor).max() == 0, \\\n 'hvd.broadcast modifies source tensor'\n assert (broadcasted_tensor.data == root_tensor).min() == 1, \\\n 'hvd.broadcast produces incorrect broadcasted tensor'\n\n def test_horovod_broadcast_inplace(self):\n \"\"\"Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,\n torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,\n torch.HalfTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n root_ranks = list(range(size))\n for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):\n tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)\n root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank)\n tensor = self.cast_and_place(tensor, dtype)\n root_tensor = self.cast_and_place(root_tensor, dtype)\n broadcasted_tensor = hvd.broadcast_(tensor, root_rank)\n tensor, root_tensor, broadcasted_tensor = \\\n self.convert_cpu_fp16_to_fp32(tensor, root_tensor, broadcasted_tensor)\n assert (tensor == broadcasted_tensor).min() == 1, \\\n 'hvd.broadcast does not modify source tensor'\n assert (broadcasted_tensor == root_tensor).min() == 1, \\\n 'hvd.broadcast produces incorrect broadcasted tensor'\n\n def test_horovod_broadcast_process_sets(self):\n \"\"\"Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors if restricted to process sets.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n if hvd.ccl_built():\n self.skipTest(\"Multiple process sets currently do not support CCL.\")\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]\n odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]\n\n even_set = hvd.add_process_set(even_ranks)\n odd_set = hvd.add_process_set(odd_ranks)\n\n if rank in even_ranks:\n set_size = len(even_ranks)\n set_ranks = even_ranks\n this_set = even_set\n elif rank in odd_ranks:\n set_size = len(odd_ranks)\n set_ranks = odd_ranks\n this_set = odd_set\n\n dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,\n torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,\n torch.HalfTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n root_ranks = list(set_ranks)\n for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):\n tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)\n root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank)\n tensor = self.cast_and_place(tensor, dtype)\n root_tensor = self.cast_and_place(root_tensor, dtype)\n broadcasted_tensor = hvd.broadcast(tensor, root_rank, process_set=this_set)\n tensor, root_tensor, broadcasted_tensor = \\\n self.convert_cpu_fp16_to_fp32(tensor, root_tensor, broadcasted_tensor)\n if rank != root_rank:\n assert (tensor == root_tensor).max() == 0, \\\n 'hvd.broadcast modifies source tensor'\n assert (broadcasted_tensor.data == root_tensor).min() == 1, \\\n 'hvd.broadcast produces incorrect broadcasted tensor'\n\n hvd.remove_process_set(odd_set)\n hvd.remove_process_set(even_set)\n\n def test_horovod_broadcast_error(self):\n \"\"\"Test that the broadcast returns an error if any dimension besides\n the first is different among the tensors being broadcasted.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n tensor_size = [17] * 3\n tensor_size[1] = 10 * (rank + 1)\n tensor = torch.FloatTensor(*tensor_size).fill_(1).mul_(rank)\n\n try:\n hvd.broadcast(tensor, 0)\n assert False, 'hvd.broadcast did not throw error'\n except (torch.FatalError, RuntimeError):\n pass\n\n def test_horovod_broadcast_type_error(self):\n \"\"\"Test that the broadcast returns an error if the types being broadcasted\n differ among the processes\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n tensor_size = [17] * 3\n if rank % 2 == 0:\n tensor = torch.IntTensor(*tensor_size)\n else:\n tensor = torch.FloatTensor(*tensor_size)\n\n try:\n hvd.broadcast(tensor, 0)\n assert False, 'hvd.broadcast did not throw error'\n except (torch.FatalError, RuntimeError):\n pass\n\n def test_horovod_broadcast_rank_error(self):\n \"\"\"Test that the broadcast returns an error if different ranks\n specify different root rank.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n tensor = torch.FloatTensor(*([17] * 3)).fill_(1)\n\n try:\n hvd.broadcast(tensor, rank)\n assert False, 'hvd.broadcast did not throw error'\n except (torch.FatalError, RuntimeError):\n pass\n\n def test_horovod_broadcast_duplicate_name_error(self):\n \"\"\"Test that the broadcast raises an error if there are\n two concurrent operations with the same name.\"\"\"\n hvd.init()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n dims = [17] * 3\n tensor = torch.FloatTensor(*dims)\n\n hvd.broadcast_async(tensor, root_rank=0, name='duplicate_name')\n try:\n for i in range(10):\n hvd.broadcast_async(tensor, root_rank=0, name='duplicate_name')\n assert False, 'hvd.broadcast_async did not throw error'\n except (torch.FatalError, ValueError):\n pass\n if LooseVersion(torch.__version__) >= LooseVersion('1.10.0'):\n # To fix https://github.com/horovod/horovod/issues/3149\n hvd.join()\n\n def test_horovod_broadcast_grad(self):\n \"\"\"Test the correctness of the broadcast gradient.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n # Only Tensors of floating point dtype can require gradients\n dtypes = [torch.FloatTensor, torch.DoubleTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n root_ranks = list(range(size))\n for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):\n tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)\n tensor = self.cast_and_place(tensor, dtype)\n tensor.requires_grad_()\n\n broadcasted_tensor = hvd.broadcast(tensor, root_rank)\n broadcasted_tensor.backward(self.cast_and_place(torch.ones([17] * dim), dtype))\n grad_out = tensor.grad.data.cpu().numpy()\n\n c = 1 if rank == root_rank else 0\n expected = np.ones([17] * dim) * c\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_broadcast_grad_process_sets(self):\n \"\"\"Test the correctness of the broadcast gradient if restricted to process sets.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n if hvd.ccl_built():\n self.skipTest(\"Multiple process sets currently do not support CCL.\")\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]\n odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]\n\n even_set = hvd.add_process_set(even_ranks)\n odd_set = hvd.add_process_set(odd_ranks)\n\n if rank in even_ranks:\n set_size = len(even_ranks)\n set_ranks = even_ranks\n this_set = even_set\n elif rank in odd_ranks:\n set_size = len(odd_ranks)\n set_ranks = odd_ranks\n this_set = odd_set\n\n # Only Tensors of floating point dtype can require gradients\n dtypes = [torch.FloatTensor, torch.DoubleTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n root_ranks = list(set_ranks)\n for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):\n tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)\n tensor = self.cast_and_place(tensor, dtype)\n tensor.requires_grad_()\n\n broadcasted_tensor = hvd.broadcast(tensor, root_rank, process_set=this_set)\n broadcasted_tensor.backward(self.cast_and_place(torch.ones([17] * dim), dtype))\n grad_out = tensor.grad.data.cpu().numpy()\n\n c = 1 if rank == root_rank else 0\n expected = np.ones([17] * dim) * c\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n hvd.remove_process_set(odd_set)\n hvd.remove_process_set(even_set)\n\n def test_horovod_alltoall(self):\n \"\"\"Test that the alltoall correctly distributes 1D, 2D, and 3D tensors.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,\n torch.IntTensor, torch.LongTensor, torch.FloatTensor,\n torch.DoubleTensor, torch.HalfTensor])\n if torch.cuda.is_available():\n dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n vals = []\n for i in range(size):\n vals += [i] * (rank + 1)\n\n tensor = torch.Tensor(vals)\n for _ in range(dim - 1):\n tensor = tensor.unsqueeze(1)\n tensor = torch.cat((tensor, tensor), dim=1)\n\n splits = torch.tensor([rank + 1] * size, dtype=torch.int32)\n tensor = self.cast_and_place(tensor, dtype)\n collected, received_splits = hvd.alltoall(tensor, splits)\n tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)\n\n assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'\n assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'\n assert collected.numel() == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'\n self.assertSequenceEqual(received_splits.tolist(), [rk + 1 for rk in range(size)],\n \"hvd.alltoall returned incorrect received_splits\")\n\n def test_horovod_alltoall_equal_split(self):\n \"\"\"Test that the alltoall correctly distributes 1D tensors with default splitting.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,\n torch.IntTensor, torch.LongTensor, torch.FloatTensor,\n torch.DoubleTensor, torch.HalfTensor])\n if torch.cuda.is_available():\n dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n vals = []\n for i in range(size):\n vals += [i] * (rank + 1)\n\n tensor = torch.Tensor(vals)\n for _ in range(dim - 1):\n tensor = tensor.unsqueeze(1)\n tensor = torch.cat((tensor, tensor), dim=1)\n\n tensor = self.cast_and_place(tensor, dtype)\n collected = hvd.alltoall(tensor)\n tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)\n\n assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'\n assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'\n assert collected.numel() == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'\n\n def test_horovod_alltoall_splits_on_gpu(self):\n \"\"\"Test that the alltoall works correctly when the splits argument is a tensor on GPU.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n if not torch.cuda.is_available():\n self.skipTest(\"No GPUs available\")\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,\n torch.IntTensor, torch.LongTensor, torch.FloatTensor,\n torch.DoubleTensor, torch.HalfTensor])\n dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n vals = []\n for i in range(size):\n vals += [i] * (rank + 1)\n\n tensor = torch.Tensor(vals)\n for _ in range(dim - 1):\n tensor = tensor.unsqueeze(1)\n tensor = torch.cat((tensor, tensor), dim=1)\n\n splits = torch.tensor([rank + 1] * size, dtype=torch.int32, device=\"cuda\")\n tensor = self.cast_and_place(tensor, dtype)\n collected, received_splits = hvd.alltoall(tensor, splits)\n tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)\n\n assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'\n assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'\n assert collected.numel() == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'\n self.assertEqual(received_splits.device.type, \"cuda\", \"received_splits should be on GPU here\")\n self.assertSequenceEqual(received_splits.tolist(), [rk + 1 for rk in range(size)],\n \"hvd.alltoall returned incorrect received_splits\")\n\n def test_horovod_alltoall_process_sets(self):\n \"\"\"Test that the alltoall correctly distributes 1D, 2D, and 3D tensors if restricted to process sets.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n if hvd.ccl_built():\n self.skipTest(\"Multiple process sets currently do not support CCL.\")\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]\n odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]\n even_set = hvd.add_process_set(even_ranks)\n odd_set = hvd.add_process_set(odd_ranks)\n if rank in even_ranks:\n set_size = len(even_ranks)\n set_ranks = even_ranks\n this_set = even_set\n elif rank in odd_ranks:\n set_size = len(odd_ranks)\n set_ranks = odd_ranks\n this_set = odd_set\n\n dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,\n torch.IntTensor, torch.LongTensor, torch.FloatTensor,\n torch.DoubleTensor, torch.HalfTensor])\n if torch.cuda.is_available():\n dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,\n torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n vals = []\n for i in set_ranks:\n vals += [i] * (rank + 1)\n\n tensor = torch.Tensor(vals)\n for _ in range(dim - 1):\n tensor = tensor.unsqueeze(1)\n tensor = torch.cat((tensor, tensor), dim=1)\n\n splits = torch.tensor([rank + 1] * set_size, dtype=torch.int32)\n tensor = self.cast_and_place(tensor, dtype)\n collected, received_splits = hvd.alltoall(tensor, splits, process_set=this_set)\n tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)\n\n assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'\n assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'\n assert collected.numel() == sum(rk + 1 for rk in set_ranks) * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'\n self.assertSequenceEqual(received_splits.tolist(), [rk + 1 for rk in set_ranks],\n \"hvd.alltoall returned incorrect received_splits\")\n\n hvd.remove_process_set(odd_set)\n hvd.remove_process_set(even_set)\n\n def test_horovod_alltoall_type_error(self):\n \"\"\"Test that the alltoall returns an error if the tensor types differ\n across the processes.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n if rank % 2:\n tensor = torch.empty(size, dtype=torch.int32)\n else:\n tensor = torch.empty(size, dtype=torch.float32)\n try:\n hvd.alltoall(tensor)\n assert False, 'hvd.alltoall did not throw error'\n except (torch.FatalError, RuntimeError):\n pass\n\n def test_horovod_alltoall_equal_split_length_error(self):\n \"\"\"Test that the alltoall with default splitting returns an error if the tensor length is not a multiple\n of the number of workers.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n tensor = torch.empty(size + 1)\n try:\n hvd.alltoall(tensor)\n assert False, 'hvd.alltoall did not throw error'\n except (torch.FatalError, ValueError):\n pass\n\n def test_horovod_alltoall_splits_error(self):\n \"\"\"Test that the alltoall returns an error if the sum of the splits entries exceeds\n the first dimension of the input tensor.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n tensor = torch.empty(size - 1)\n splits = torch.ones(size, dtype=torch.int32)\n try:\n hvd.alltoall(tensor, splits)\n assert False, 'hvd.alltoall did not throw error'\n except (torch.FatalError, ValueError):\n pass\n\n def test_horovod_alltoall_splits_type_error(self):\n \"\"\"Test that the alltoall returns an error if the splits tensor does not\n contain 32-bit integers.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n tensor = torch.empty(size)\n splits = torch.empty(size, dtype=torch.float32)\n try:\n hvd.alltoall(tensor, splits)\n assert False, 'hvd.alltoall did not throw error'\n except (torch.FatalError, ValueError):\n pass\n\n def test_horovod_alltoall_rank_error(self):\n \"\"\"Test that the alltoall returns an error if any dimension besides\n the first is different among the tensors being processed.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n tensor_size = [2 * size] * 3\n tensor_size[1] = 10 * (rank + 1)\n tensor = torch.ones(tensor_size)\n\n try:\n hvd.alltoall(tensor)\n assert False, 'hvd.alltoall did not throw error'\n except (torch.FatalError, RuntimeError):\n pass\n\n\n def test_horovod_alltoall_grad(self):\n \"\"\"Test the correctness of the alltoall gradient.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n # Only Tensors of floating point dtype can require gradients\n dtypes = [torch.FloatTensor, torch.DoubleTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]\n\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n vals = []\n for i in range(size):\n vals += [i] * (rank + 1)\n\n tensor = torch.Tensor(vals)\n for _ in range(dim - 1):\n tensor = tensor.unsqueeze(1)\n tensor = torch.cat((tensor, tensor), dim=1)\n\n tensor = self.cast_and_place(tensor, dtype)\n tensor.requires_grad_()\n splits = torch.tensor([rank + 1] * size, dtype=torch.int32)\n collected, received_splits = hvd.alltoall(tensor, splits)\n\n collected.backward(self.cast_and_place(torch.ones(collected.shape), dtype))\n grad_out = tensor.grad.data.cpu().numpy()\n\n expected = np.ones(tensor.shape)\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_alltoall_equal_split_grad(self):\n \"\"\"Test the correctness of the alltoall gradient with default splitting.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n # Only Tensors of floating point dtype can require gradients\n dtypes = [torch.FloatTensor, torch.DoubleTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]\n\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n vals = []\n for i in range(size):\n vals += [i] * (rank + 1)\n\n tensor = torch.Tensor(vals)\n for _ in range(dim - 1):\n tensor = tensor.unsqueeze(1)\n tensor = torch.cat((tensor, tensor), dim=1)\n\n tensor = self.cast_and_place(tensor, dtype)\n tensor.requires_grad_()\n collected = hvd.alltoall(tensor)\n\n collected.backward(self.cast_and_place(torch.ones(collected.shape), dtype))\n grad_out = tensor.grad.data.cpu().numpy()\n\n expected = np.ones(tensor.shape)\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_alltoall_grad_process_sets(self):\n \"\"\"Test the correctness of the alltoall gradient if restricted to process sets.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n if hvd.ccl_built():\n self.skipTest(\"Multiple process sets currently do not support CCL.\")\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]\n odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]\n even_set = hvd.add_process_set(even_ranks)\n odd_set = hvd.add_process_set(odd_ranks)\n if rank in even_ranks:\n set_size = len(even_ranks)\n set_ranks = even_ranks\n this_set = even_set\n elif rank in odd_ranks:\n set_size = len(odd_ranks)\n set_ranks = odd_ranks\n this_set = odd_set\n\n # Only Tensors of floating point dtype can require gradients\n dtypes = [torch.FloatTensor, torch.DoubleTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]\n\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n vals = []\n for i in set_ranks:\n vals += [i] * (rank + 1)\n\n tensor = torch.Tensor(vals)\n for _ in range(dim - 1):\n tensor = tensor.unsqueeze(1)\n tensor = torch.cat((tensor, tensor), dim=1)\n\n tensor = self.cast_and_place(tensor, dtype)\n tensor.requires_grad_()\n splits = torch.tensor([rank + 1] * set_size, dtype=torch.int32)\n collected, received_splits = hvd.alltoall(tensor, splits, process_set=this_set)\n\n collected.backward(self.cast_and_place(torch.ones(collected.shape), dtype))\n grad_out = tensor.grad.data.cpu().numpy()\n\n expected = np.ones(tensor.shape)\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n hvd.remove_process_set(odd_set)\n hvd.remove_process_set(even_set)\n\n def test_broadcast_state(self):\n hvd.init()\n\n N, D_in, H, D_out = 64, 100, 10, 10\n x = torch.randn(N, D_in).requires_grad_()\n y = torch.randn(N, D_out).requires_grad_()\n\n def new_optimizer(cls, opt_params, model):\n p = {\n k: v for k, v in opt_params.items()\n if k in inspect.getargspec(cls.__init__).args\n }\n return cls(model.parameters(), **p)\n\n def create_model(opt_class, opt_params):\n model = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out),\n )\n\n optimizer = new_optimizer(opt_class, opt_params, model)\n optimizer = hvd.DistributedOptimizer(\n optimizer, named_parameters=model.named_parameters())\n\n return model, optimizer\n\n def get_model_param_values(model):\n params = sorted(model.state_dict().items())\n return [(k, v.clone()) for k, v in params]\n\n def get_optimizer_param_values(optimizer):\n results = []\n state_dict = optimizer.state_dict()\n for group in state_dict['param_groups']:\n for param_id in group['params']:\n if param_id not in state_dict['state']:\n continue\n params = sorted(state_dict['state'][param_id].items())\n for k, v in params:\n results.append(\n (k, v.clone() if torch.is_tensor(v) else v))\n return results\n\n # L-BFGS is currently unsupported, as are sparse tensors, which are\n # required by SparseAdam optimizer\n optimizers = [\n (subclass.__name__, subclass)\n for subclass in torch.optim.Optimizer.__subclasses__()\n if subclass.__module__.startswith('torch.optim') and\n subclass != torch.optim.LBFGS and\n subclass != torch.optim.SparseAdam\n ]\n optimizers.sort(key=lambda tup: tup[0])\n\n opt_params_list = [\n dict(lr=0.2, momentum=0.9, weight_decay=0.1, centered=True),\n dict(lr=0.2)\n ]\n\n for (opt_name, opt_class), opt_params in itertools.product(optimizers, opt_params_list):\n model, optimizer = create_model(opt_class, opt_params)\n y_pred = model(x)\n loss = F.mse_loss(y_pred, y, size_average=False)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n model_param_values = get_model_param_values(model)\n for name, model_param_value in model_param_values:\n hvd.broadcast_(model_param_value, root_rank=0)\n\n opt_param_values_updated = []\n opt_param_values = get_optimizer_param_values(optimizer)\n for name, opt_param_value in opt_param_values:\n is_tensor = torch.is_tensor(opt_param_value)\n if is_tensor:\n hvd.broadcast_(opt_param_value, root_rank=0)\n else:\n opt_param_value = hvd.broadcast_object(opt_param_value, name=name)\n opt_param_values_updated.append((name, opt_param_value))\n opt_param_values = opt_param_values_updated\n\n with temppath() as fname:\n if hvd.rank() == 0:\n state = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }\n torch.save(state, fname)\n\n model, optimizer = create_model(opt_class, opt_params)\n if hvd.rank() == 0:\n checkpoint = torch.load(fname)\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n model_param_value_after = get_model_param_values(model)\n for before, after in zip(model_param_values,\n model_param_value_after):\n name, model_param_value = before\n name_after, model_param_value_after = after\n self.assertEqual(name, name_after)\n self.assertEqual(type(model_param_value),\n type(model_param_value_after))\n self.assertTrue(\n (model_param_value == model_param_value_after).all())\n\n expected_tensors = hvd.broadcast_object(len(optimizer.state_dict()['state'].values()))\n hvd.broadcast_optimizer_state(optimizer, root_rank=0)\n self.assertEqual(len(optimizer.state_dict()['state'].values()), expected_tensors)\n\n opt_param_values_after = get_optimizer_param_values(optimizer)\n for before, after in zip(opt_param_values, opt_param_values_after):\n name, opt_param_value = before\n name_after, opt_param_value_after = after\n self.assertEqual(name, name_after)\n self.assertEqual(type(opt_param_value),\n type(opt_param_value_after))\n if torch.is_tensor(opt_param_value):\n self.assertTrue(\n (opt_param_value == opt_param_value_after).all())\n else:\n self.assertEqual(opt_param_value, opt_param_value_after)\n\n # TODO: investigate why this hangs on K80s\n @unittest.skip\n def test_broadcast_state_gpu(self):\n # Only do this test if there are GPUs available.\n if not torch.cuda.is_available():\n self.skipTest(\"No GPUs available\")\n # Set default tensor type, ensuring optimizer tensor-wrapping is robust\n # to this setting.\n try:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n self.test_broadcast_state()\n finally:\n torch.set_default_tensor_type(torch.FloatTensor)\n\n def test_broadcast_state_options(self):\n hvd.init()\n\n N, D_in, H, D_out = 64, 100, 10, 10\n x = torch.randn(N, D_in).requires_grad_()\n y = torch.randn(N, D_out).requires_grad_()\n\n params_0 = dict(lr=0.1, momentum=0.8, weight_decay=0.2, nesterov=True,\n betas=(0.9, 0.999), etas=(0.8, 2.4), step_sizes=(1e-5, 100))\n params_1 = dict(lr=0.2, momentum=0.9, weight_decay=0.1, nesterov=False,\n betas=(0.8, 0.9), etas=(0.25, 1.75), step_sizes=(1e-7, 5))\n\n def create_model(opt_class):\n model = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out),\n )\n\n params = params_0 if hvd.rank() == 0 else params_1\n p = {\n k: v for k, v in params.items()\n if k in inspect.getargspec(opt_class.__init__).args\n }\n opt = opt_class(model.parameters(), **p)\n opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters())\n\n return model, opt\n\n # Include subclass name so we can sort them lexicographically, otherwise different\n # ranks will have different optimizer orderings\n optimizers = [\n (subclass.__name__, subclass)\n for subclass in torch.optim.Optimizer.__subclasses__()\n if subclass.__module__.startswith('torch.optim') and\n subclass != torch.optim.LBFGS and\n subclass != torch.optim.SparseAdam\n ]\n optimizers.sort(key=lambda tup: tup[0])\n\n for _, opt_class in optimizers:\n model, optimizer = create_model(opt_class)\n y_pred = model(x)\n loss = F.mse_loss(y_pred, y, size_average=False)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n hvd.broadcast_optimizer_state(optimizer, root_rank=0)\n p0 = {\n k: v for k, v in params_0.items()\n if k in inspect.getargspec(opt_class.__init__).args\n }\n for k, p in p0.items():\n p_actual = optimizer.param_groups[0][k]\n if not isinstance(p, Iterable):\n p_actual = [p_actual]\n p = [p]\n for i in range(len(p)):\n self.assertEqual(type(p_actual[i]), type(p[i]))\n self.assertAlmostEqual(p_actual[i], p[i], delta=1e-5)\n\n # Ensure that the parameter option types are compatible with ops\n y_pred = model(x)\n loss = F.mse_loss(y_pred, y, size_average=False)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n def test_broadcast_state_no_grad(self):\n class ModelNoGrad(nn.Module):\n def __init__(self, a, b):\n super(ModelNoGrad, self).__init__()\n self.a = nn.Parameter(a.int(), requires_grad=False)\n self.b = nn.Parameter(b)\n\n def forward(self, x):\n return torch.index_select(self.b, 0, self.a.long()) * x\n\n hvd.init()\n\n a = torch.Tensor([1, 3])\n b = torch.rand(4)\n\n model = ModelNoGrad(a, b)\n\n optimizer = torch.optim.SGD(model.parameters(), lr=0.001, weight_decay=1e-6, momentum=0.9, nesterov=True)\n optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())\n\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n hvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\n grad = optimizer.param_groups[0]['params'][1].grad\n bgrad = hvd.broadcast(grad, root_rank=0)\n\n assert optimizer.param_groups[0]['params'][0].grad is None\n assert torch.all(torch.eq(grad, bgrad)).item()\n\n def test_broadcast_object(self):\n hvd.init()\n\n expected_obj = {\n 'hello': 123,\n 0: [1, 2]\n }\n obj = expected_obj if hvd.rank() == 0 else {}\n\n obj = hvd.broadcast_object(obj, root_rank=0)\n self.assertDictEqual(obj, expected_obj)\n\n def test_broadcast_object_process_sets(self):\n hvd.init()\n\n if hvd.ccl_built():\n self.skipTest(\"Multiple process sets currently do not support CCL.\")\n\n # This test does not apply if there is only one worker.\n if hvd.size() == 1:\n self.skipTest(\"Only one worker available\")\n\n even_ranks = [rk for rk in range(0, hvd.size()) if rk % 2 == 0]\n odd_ranks = [rk for rk in range(0, hvd.size()) if rk % 2 == 1]\n even_set = hvd.add_process_set(even_ranks)\n odd_set = hvd.add_process_set(odd_ranks)\n if hvd.rank() in even_ranks:\n set_ranks = even_ranks\n this_set = even_set\n elif hvd.rank() in odd_ranks:\n set_ranks = odd_ranks\n this_set = odd_set\n\n expected_obj = {\n 'hello': 123,\n 0: [1, 2]\n }\n obj = expected_obj if hvd.rank() == set_ranks[0] else {}\n\n obj = hvd.broadcast_object(obj, root_rank=set_ranks[0], process_set=this_set)\n self.assertDictEqual(obj, expected_obj)\n\n hvd.remove_process_set(odd_set)\n hvd.remove_process_set(even_set)\n\n def test_allgather_object(self):\n hvd.init()\n\n d = {'metric_val_1': hvd.rank()}\n if hvd.rank() == 1:\n d['metric_val_2'] = 42\n\n results = hvd.allgather_object(d)\n\n expected = [{'metric_val_1': i} for i in range(hvd.size())]\n if hvd.size() > 1:\n expected[1] = {'metric_val_1': 1, 'metric_val_2': 42}\n\n self.assertEqual(len(results), hvd.size())\n self.assertListEqual(results, expected)\n\n def test_compression_fp16(self):\n valid_dtypes = [torch.float32, torch.float64]\n invalid_dtypes = [torch.uint8, torch.int8, torch.int16,\n torch.int32, torch.int64]\n\n tensor_size = [5] * 3\n compression = hvd.Compression.fp16\n\n for dtype in valid_dtypes:\n tensor = torch.ones(tensor_size, dtype=dtype)\n\n tensor_compressed, ctx = compression.compress(tensor)\n self.assertEqual(tensor_compressed.dtype, torch.float16)\n\n tensor_decompressed = compression.decompress(tensor_compressed, ctx)\n self.assertEqual(tensor_decompressed.dtype, dtype)\n\n expected = np.ones(tensor_size)\n err = np.linalg.norm(expected - tensor_decompressed.data.numpy())\n self.assertLess(err, 0.00000001)\n\n for dtype in invalid_dtypes:\n tensor = torch.ones(tensor_size, dtype=dtype)\n\n tensor_compressed, ctx = compression.compress(tensor)\n self.assertEqual(tensor_compressed.dtype, dtype)\n\n tensor_decompressed = compression.decompress(tensor_compressed, ctx)\n self.assertEqual(tensor_decompressed.dtype, dtype)\n\n if dtype != torch.int8: # Cannot cast to NumPy with a CharTensor\n expected = np.ones(tensor_size)\n err = np.linalg.norm(expected - tensor_decompressed.data.numpy())\n self.assertLess(err, 0.00000001)\n\n def test_force_allreduce(self):\n \"\"\"Test that allreduce is forced on all gradients during opt.step().\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n N, D_in, H, D_out = 64, 100, 10, 10\n x = torch.randn(N, D_in).requires_grad_()\n y = torch.randn(N, D_out).requires_grad_()\n\n def new_optimizer(cls, opt_params, model):\n p = {\n k: v for k, v in opt_params.items()\n if k in inspect.getargspec(cls.__init__).args\n }\n return cls(model.parameters(), **p)\n\n class Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = torch.nn.Linear(D_in, H)\n self.fc2 = torch.nn.Linear(H, D_out)\n self.fc3 = torch.nn.Linear(D_out, D_out)\n\n def forward(self, x_):\n x_ = F.relu(self.fc1(x_))\n x1_ = self.fc2(x_)\n x2_ = self.fc3(F.relu(x1_))\n return x1_, x2_\n\n def create_model(opt_class, opt_params):\n model = Net()\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n opt = new_optimizer(opt_class, opt_params, model)\n opt = hvd.DistributedOptimizer(\n opt, named_parameters=model.named_parameters())\n return model, opt\n\n # L-BFGS is currently unsupported, as are sparse tensors, which are\n # required by SparseAdam optimizer\n optimizers = [\n (subclass.__name__, subclass)\n for subclass in torch.optim.Optimizer.__subclasses__()\n if subclass.__module__.startswith('torch.optim') and\n subclass != torch.optim.LBFGS and\n subclass != torch.optim.SparseAdam\n ]\n optimizers.sort(key=lambda tup: tup[0])\n\n opt_params_list = [\n dict(lr=0.2, momentum=0.9, weight_decay=0.1, centered=True),\n dict(lr=0.2)\n ]\n\n for (opt_name, opt_class), opt_params in itertools.product(optimizers, opt_params_list):\n model, optimizer = create_model(opt_class, opt_params)\n y_pred1, y_pred2 = model(x)\n if rank == 0:\n loss = F.mse_loss(y_pred1, y, size_average=False)\n else:\n loss = F.mse_loss(y_pred2, y, size_average=False)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n def test_model_parallelism(self):\n \"\"\"Test that tensors on different GPUs are supported.\"\"\"\n # Only do this test if there are GPUs available.\n if not torch.cuda.is_available():\n self.skipTest(\"No GPUs available\")\n\n hvd.init()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n # Skip the test if there are not enough GPUs.\n if torch.cuda.device_count() < hvd.local_size() * 2:\n self.skipTest(\"Not enough GPUs available\")\n\n first_device = local_rank * 2\n second_device = local_rank * 2 + 1\n\n class Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n # Place parts of model on different GPUs.\n self.conv1 = torch.nn.Conv2d(1, 100, 1).cuda(first_device)\n self.conv2 = torch.nn.Conv2d(100, 1, 1).cuda(second_device)\n\n def forward(self, x):\n x = x.cuda(first_device)\n x = self.conv1(x)\n x = x.cuda(second_device)\n x = self.conv2(x)\n return x\n\n model = Net()\n inp = torch.rand([1, 1, 1000, 1000])\n\n opt = torch.optim.SGD(model.parameters(), lr=0.1)\n opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters())\n\n loss = model(inp).sum()\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n def test_delta_optimizer(self):\n \"\"\"Test that delta optimizer.\"\"\"\n hvd.init()\n # TODO support non-MPI Adasum operation\n # Only do this test if there are GPUs available.\n if not hvd.mpi_enabled() or not torch.cuda.is_available():\n self.skipTest(\"No GPUs available\")\n\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n class Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = torch.nn.Conv2d(1, 100, 1).cuda(local_rank)\n self.conv2 = torch.nn.Conv2d(100, 1, 1).cuda(local_rank)\n\n def forward(self, x):\n x = x.cuda(local_rank)\n x = self.conv1(x)\n x = x.cuda(local_rank)\n x = self.conv2(x)\n return x\n\n model = Net()\n inp = torch.rand([1, 1, 1000, 1000])\n\n opt = torch.optim.SGD(model.parameters(), lr=0.1)\n\n opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters(), op=hvd.Adasum)\n loss = model(inp).sum()\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n def test_duplicate_names(self):\n \"\"\"Test that passing duplicate names to optimizer will fail.\"\"\"\n net1 = torch.nn.Conv2d(1, 1, 1)\n net2 = torch.nn.Conv2d(1, 1, 1)\n\n parameters = itertools.chain(net1.parameters(), net2.parameters())\n opt = torch.optim.SGD(parameters, lr=0.1)\n\n # This will have duplicate names, since both net1 and net2 have 'weight' and 'bias'\n named_parameters = itertools.chain(net1.named_parameters(), net2.named_parameters())\n try:\n hvd.DistributedOptimizer(opt, named_parameters=named_parameters)\n assert False, 'hvd.DistributedOptimizer did not throw error'\n except ValueError:\n pass\n\n def test_dynamic_requires_grad(self):\n \"\"\"Test that makes sure that gradients can be turned off/on dynamically.\"\"\"\n hvd.init()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n gen = torch.nn.Conv2d(1, 10, 1)\n disc = torch.nn.Conv2d(10, 1, 1)\n inp = torch.rand([1, 1, 100, 100])\n\n gen_opt = torch.optim.SGD(gen.parameters(), lr=0.1)\n gen_opt = hvd.DistributedOptimizer(gen_opt, named_parameters=gen.named_parameters())\n\n disc_opt = torch.optim.SGD(disc.parameters(), lr=0.1)\n disc_opt = hvd.DistributedOptimizer(disc_opt, named_parameters=disc.named_parameters())\n\n def train_step(train_generator=False, train_discriminator=False):\n for p in gen.parameters():\n p.requires_grad_(train_generator)\n for p in disc.parameters():\n p.requires_grad_(train_discriminator)\n\n gen_opt.zero_grad()\n disc_opt.zero_grad()\n\n loss = disc(gen(inp)).sum()\n loss.backward()\n\n for p in gen.parameters():\n assert train_generator == (p.grad is not None and p.grad.max().is_nonzero()), \\\n 'Gradient for generator is zero but it should be trained or vice versa.'\n for p in disc.parameters():\n assert train_discriminator == (p.grad is not None and p.grad.max().is_nonzero()), \\\n 'Gradient for discriminator is zero but it should be trained or vice versa.'\n\n if train_generator:\n gen_opt.step()\n if train_discriminator:\n disc_opt.step()\n\n for x in range(10):\n # Step 1: train generator.\n train_step(train_generator=True)\n\n # Step 2: train discriminator.\n train_step(train_discriminator=True)\n\n def test_gradient_clipping(self):\n \"\"\"Test gradient clipping example.\"\"\"\n hvd.init()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n x = torch.ones(1, 1).requires_grad_()\n y = torch.ones(1, 1).requires_grad_()\n\n model = torch.nn.Linear(1, 1)\n model.weight = torch.nn.Parameter(torch.zeros(1, 1) + 0.5)\n model.bias = torch.nn.Parameter(torch.zeros(1))\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n optimizer = hvd.DistributedOptimizer(\n optimizer, named_parameters=model.named_parameters())\n\n y_pred = model(x)\n loss = F.mse_loss(y_pred, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.synchronize()\n prior_grad = model.weight.grad.item()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)\n clipped_grad = model.weight.grad.item()\n assert abs(prior_grad) > abs(clipped_grad)\n with optimizer.skip_synchronize():\n optimizer.step()\n\n def test_synchronize_step_warning(self):\n \"\"\"\n Test that .synchronize() followed by .step() without\n optimizer.skip_synchronize() context will produce a warning.\n \"\"\"\n hvd.init()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n x = torch.zeros(1, 1).requires_grad_()\n y = torch.ones(1, 1).requires_grad_()\n\n model = torch.nn.Linear(1, 1)\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n optimizer = hvd.DistributedOptimizer(\n optimizer, named_parameters=model.named_parameters())\n\n y_pred = model(x)\n loss = F.mse_loss(y_pred, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.synchronize()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)\n with warnings.catch_warnings(record=True) as ws:\n optimizer.step()\n assert len(ws) == 1\n assert 'optimizer.step() called without optimizer.skip_synchronize()' \\\n in str(ws[0].message)\n\n def test_no_named_parameters(self):\n \"\"\"Test that leaving the default named_parameters=None will not throw an error.\"\"\"\n hvd.init()\n\n class Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = torch.nn.Conv2d(1, 100, 1)\n self.conv2 = torch.nn.Conv2d(100, 1, 1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n return x\n\n model = Net()\n inp = torch.rand([1, 1, 1000, 1000])\n\n opt = torch.optim.SGD(model.parameters(), lr=0.1)\n opt = hvd.DistributedOptimizer(opt)\n\n loss = model(inp).sum()\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n def test_missing_named_parameters(self):\n \"\"\"Test that naming half of the model parameters will throw an error.\"\"\"\n hvd.init()\n\n class Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = torch.nn.Conv2d(1, 100, 1)\n self.conv2 = torch.nn.Conv2d(100, 1, 1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n return x\n\n model = Net()\n opt = torch.optim.SGD(model.parameters(), lr=0.1)\n try:\n hvd.DistributedOptimizer(opt,\n named_parameters=list(model.named_parameters())[0:1])\n assert False, 'hvd.DistributedOptimizer did not throw error'\n except ValueError:\n pass\n\n def test_horovod_join_allreduce(self):\n \"\"\"Test Join op with allreduce.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n dtypes = [torch.IntTensor, torch.LongTensor,\n torch.FloatTensor, torch.DoubleTensor]\n if torch.cuda.is_available():\n dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,\n torch.cuda.FloatTensor, torch.cuda.DoubleTensor,\n torch.cuda.HalfTensor]\n\n integral_types = [torch.IntTensor, torch.LongTensor, torch.cuda.IntTensor, torch.cuda.LongTensor]\n\n dims = [1, 2, 3]\n first_join_ranks = [0, 1]\n cachings = [False, True]\n for dtype, dim, first_join_rank, caching in itertools.product(dtypes, dims, first_join_ranks, cachings):\n torch.manual_seed(1234)\n\n def div(t, s):\n if _1_5_api and dtype in integral_types:\n return t.floor_divide(s)\n return t / s\n\n # Use two tensors to test fusion\n tensor_a = torch.FloatTensor(*([5] * dim)).random_(-100, 100)\n tensor_a = self.cast_and_place(tensor_a, dtype)\n tensor_b = torch.FloatTensor(*([17] * dim)).random_(-100, 100)\n tensor_b = self.cast_and_place(tensor_b, dtype)\n\n if caching:\n handle_a = hvd.allreduce_async(tensor_a, name=\"tensor_a\", average=True)\n handle_b = hvd.allreduce_async(tensor_b, name=\"tensor_b\", average=True)\n averaged_a = hvd.synchronize(handle_a)\n averaged_b = hvd.synchronize(handle_b)\n\n if rank == first_join_rank:\n if dtype.is_cuda:\n ret = hvd.join(hvd.local_rank())\n else:\n ret = hvd.join()\n else:\n handle_a = hvd.allreduce_async(tensor_a, name=\"tensor_a\", average=True)\n handle_b = hvd.allreduce_async(tensor_b, name=\"tensor_b\", average=True)\n averaged_a = hvd.synchronize(handle_a)\n averaged_b = hvd.synchronize(handle_b)\n if dtype.is_cuda:\n ret = hvd.join(hvd.local_rank())\n else:\n ret = hvd.join()\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in integral_types:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n assert torch.allclose(averaged_a, div(tensor_a * (size - 1), size), threshold), \\\n 'hvd.join with hvd.allreduce produces incorrect results'\n assert torch.allclose(averaged_b, div(tensor_b * (size - 1), size), threshold), \\\n 'hvd.join with hvd.allreduce produces incorrect results'\n\n self.assertNotEqual(ret, first_join_rank,\n msg=\"The return value of hvd.join() may not be equal to first_join_rank\")\n ret_values = hvd.allgather_object(ret)\n self.assertSequenceEqual(ret_values, [ret] * size,\n msg=\"hvd.join() did not return the same value on each rank\")\n\n def test_horovod_join_allgather(self):\n \"\"\"Test Join op with allgather.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n dims = [17] * 3\n tensor = torch.FloatTensor(*dims)\n\n if rank == 0:\n if torch.cuda.is_available():\n ret = hvd.join(hvd.local_rank())\n else:\n ret = hvd.join()\n else:\n try:\n hvd.allgather(tensor)\n assert False, 'hvd.allgather did not throw error'\n except (torch.FatalError, RuntimeError):\n pass\n\n ret = hvd.join(hvd.local_rank())\n\n self.assertNotEqual(ret, 0,\n msg=\"The return value of hvd.join() may not be equal to 0 because that would be the first rank to join\")\n ret_values = hvd.allgather_object(ret)\n self.assertSequenceEqual(ret_values, [ret] * size,\n msg=\"hvd.join() did not return the same value on each rank\")\n\n def test_horovod_join_broadcast(self):\n \"\"\"Test Join op with broadcast.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n dims = [17] * 3\n tensor = torch.FloatTensor(*dims)\n\n if rank == 0:\n ret = hvd.join(hvd.local_rank())\n else:\n try:\n broadcasted_tensor = hvd.broadcast(tensor, 1, name=\"test_horovod_join_broadcast\")\n assert False, 'hvd.broadcast did not throw error'\n except (torch.FatalError, RuntimeError):\n pass\n\n if torch.cuda.is_available():\n ret = hvd.join(hvd.local_rank())\n else:\n ret = hvd.join()\n\n self.assertNotEqual(ret, 0,\n msg=\"The return value of hvd.join() may not be equal to 0 because that would be the first rank to join\")\n ret_values = hvd.allgather_object(ret)\n self.assertSequenceEqual(ret_values, [ret] * size,\n msg=\"hvd.join() did not return the same value on each rank\")\n\n def test_horovod_sync_batch_norm(self):\n \"\"\"Tests Horovod version of SyncBatchNorm.\"\"\"\n if not torch.cuda.is_available():\n self.skipTest(\"No GPUs available\")\n\n hvd.init()\n\n ts_list = [\n torch.stack([\n torch.tensor([\n [r, r + 1],\n [r * 2, r * 2 + 1],\n [r * 3, r * 3 + 1],\n [r * 4, r * 4 + 1]\n ])\n for r in range(hvd.size())\n ]),\n torch.stack([\n torch.tensor([\n [r + 1],\n [r * 2 + 1],\n [r * 3 + 1],\n [r * 4 + 1]\n ])\n for r in range(hvd.size())\n ]),\n ]\n\n for ts in ts_list:\n sync_bn = hvd.SyncBatchNorm(num_features=4)\n sync_bn.cuda(hvd.local_rank())\n\n bn = torch.nn.BatchNorm1d(num_features=4)\n bn.cuda(hvd.local_rank())\n\n ts = ts.cuda(hvd.local_rank()).float()\n ts1 = ts.clone().requires_grad_()\n ts2 = ts.clone().requires_grad_()\n\n # Training\n sync_bn_out = sync_bn(ts1[hvd.rank()].unsqueeze(0))\n bn_out = bn(ts2)\n assert torch.allclose(sync_bn_out, bn_out[hvd.rank()].unsqueeze(0), 1e-6)\n assert torch.allclose(sync_bn.running_mean, bn.running_mean, 1e-6)\n assert torch.allclose(sync_bn.running_var, bn.running_var, 1e-6)\n\n # Gradients\n sync_bn_out.sum().backward()\n bn_out.mean(dim=0).sum().backward()\n assert torch.allclose(hvd.allreduce(sync_bn.weight.grad, name='sync_bn.weight.grad'), bn.weight.grad, 1e-6)\n assert torch.allclose(hvd.allreduce(sync_bn.bias.grad, name='sync_bn.bias.grad'), bn.bias.grad, 1e-6)\n assert torch.allclose(hvd.allreduce(ts1.grad, name='ts1.grad'), ts2.grad, 1e-6)\n\n @pytest.mark.skip(reason='https://github.com/horovod/horovod/issues/2496')\n def test_timeline_api(self):\n hvd.init()\n\n def check_file(fname, check_cycle=True):\n if hvd.rank() == 0:\n with open(fname, 'r') as timeline_file:\n timeline_text = timeline_file.read()\n assert 'allreduce.test_allreduce' in timeline_text, timeline_text\n assert 'start_time_since_epoch_in_micros' in timeline_text, timeline_text\n assert 'NEGOTIATE_ALLREDUCE' in timeline_text, timeline_text\n assert 'ALLREDUCE' in timeline_text, timeline_text\n json_obj = json.loads(timeline_text)\n assert json_obj is not None\n if check_cycle:\n assert 'CYCLE_START' in timeline_text, timeline_text\n\n with temppath() as fname1:\n hvd.start_timeline(fname1, mark_cycles=True)\n hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();\n # stop timeline will immediately stop events to be registered in timeline. We are providing some time\n # before calling stop so that mark_cycle events can be registered in timeline file.\n time.sleep(0.2)\n hvd.stop_timeline()\n\n check_file(fname1)\n\n # Test resuming with a different filename.\n with temppath() as fname2:\n hvd.start_timeline(fname2, mark_cycles=True)\n time.sleep(0.2)\n hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();\n # stop timeline will immediately stop events to be registered in timeline. We are providing some time\n # before calling stop so that cycle events can be registered in timeline file.\n time.sleep(0.2)\n hvd.stop_timeline()\n check_file(fname2)\n\n # Test resuming with a different filename, but mark_cycles=False\n with temppath() as fname3:\n # Make sure that last stop timeline has been processed.\n hvd.start_timeline(fname3, mark_cycles=False)\n time.sleep(0.2)\n hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();\n # stop timeline will immediately stop events to be registered in timeline. We are providing some time\n # before calling stop so that events can be registered in timeline file.\n hvd.stop_timeline()\n check_file(fname3, check_cycle=False)\n\n # Test resuming with a different filename, but mark_cycles=True\n with temppath() as fname4:\n # Make sure that last stop timeline has been processed.\n hvd.start_timeline(fname4, mark_cycles=True)\n time.sleep(0.2)\n hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();\n # stop timeline will immediately stop events to be registered in timeline. We are providing some time\n # before calling stop so that cycle events can be registered in timeline file.\n time.sleep(0.2)\n hvd.stop_timeline()\n check_file(fname4, check_cycle=True)\n\n with temppath() as fname5:\n # Make sure that last stop timeline has been processed.\n hvd.start_timeline(fname5, mark_cycles=False)\n hvd.start_timeline(fname5, mark_cycles=False)\n time.sleep(0.2)\n hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy()\n hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy()\n time.sleep(0.2)\n hvd.stop_timeline()\n check_file(fname5, check_cycle=False)\n\n hvd.shutdown()\n\n def test_optimizer_no_named_parameters(self):\n hvd.init()\n\n model = nn.Sequential(nn.Linear(10, 10), nn.Linear(10, 10))\n optimizer = torch.optim.SGD(\n [{\"params\": model[0].parameters()}, {\"params\": model[1].parameters()}, ],\n lr=0.001,\n )\n optimizer = hvd.DistributedOptimizer(optimizer)\n\n params = optimizer._parameter_names\n self.assertEqual(len(params), len(set(params.values())))\n\n # Make sure all workers have the same set of parameter names\n all_param_names = hvd.allgather_object(set(params.values()))\n self.assertEqual(len(all_param_names), hvd.size())\n for param_names in all_param_names:\n self.assertEqual(all_param_names[0], param_names)\n\n def test_sparse_embeddings(self):\n \"\"\"Test that Horovod will correctly aggregate sparse gradients.\"\"\"\n hvd.init()\n\n for sparse_as_dense in [False, True]:\n class Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.embedding = nn.Embedding(10, 3, sparse=True)\n\n def forward(self, x):\n x = self.embedding(x)\n return x\n\n model = Net()\n\n if hvd.rank() == 0:\n inp = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])\n else:\n inp = torch.LongTensor([[1, 3, 4], [4, 7, 9]])\n\n # list() see: https://github.com/pytorch/pytorch/issues/47594\n opt = torch.optim.SparseAdam(list(model.parameters()), lr=0.1)\n opt = hvd.DistributedOptimizer(opt, sparse_as_dense=sparse_as_dense)\n\n loss = model(inp).sum()\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n def test_async_sparse_allreduce(self):\n \"\"\"Test that allgather over indices and values is equivalent to allreduce.\"\"\"\n hvd.init()\n\n # Generate random tensors, then convert them to sparse\n def random_sparse_tensor(*shape):\n t = torch.rand(*shape)\n t[t < 0.8] = 0\n return t.to_sparse()\n\n tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5\n tensors = [random_sparse_tensor(d0, 10) for d0 in tensor_sizes]\n allreduced_tensors = [hvd.allreduce(t.to_dense()) for t in tensors]\n\n handles = [hvd.sparse_allreduce_async(t, op=hvd.Average, name=str(i))\n for i, t in enumerate(tensors)]\n allgathered_tensors = [handle() for handle in handles]\n\n for reduced, gathered in zip(allreduced_tensors, allgathered_tensors):\n assert torch.allclose(reduced, gathered.to_dense(), 1e-6)\n\n def test_async_sparse_allreduce_process_sets(self):\n \"\"\"Test that allgather over indices and values is equivalent to allreduce if restricted to process sets.\"\"\"\n hvd.init()\n\n if hvd.ccl_built():\n self.skipTest(\"Multiple process sets currently do not support CCL.\")\n\n # This test does not apply if there is only one worker.\n if hvd.size() == 1:\n self.skipTest(\"Only one worker available\")\n\n even_ranks = [rk for rk in range(0, hvd.size()) if rk % 2 == 0]\n odd_ranks = [rk for rk in range(0, hvd.size()) if rk % 2 == 1]\n even_set = hvd.add_process_set(even_ranks)\n odd_set = hvd.add_process_set(odd_ranks)\n if hvd.rank() in even_ranks:\n set_ranks = even_ranks\n this_set = even_set\n elif hvd.rank() in odd_ranks:\n set_ranks = odd_ranks\n this_set = odd_set\n\n # Generate random tensors, then convert them to sparse\n def random_sparse_tensor(*shape):\n t = torch.rand(*shape)\n t[t < 0.8] = 0\n return t.to_sparse()\n\n tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5\n tensors = [random_sparse_tensor(d0, 10) for d0 in tensor_sizes]\n allreduced_tensors = [hvd.allreduce(t.to_dense(), process_set=this_set) for t in tensors]\n\n handles = [hvd.sparse_allreduce_async(t, op=hvd.Average, name=str(i), process_set=this_set)\n for i, t in enumerate(tensors)]\n allgathered_tensors = [handle() for handle in handles]\n\n for reduced, gathered in zip(allreduced_tensors, allgathered_tensors):\n assert torch.allclose(reduced, gathered.to_dense(), 1e-6)\n\n hvd.remove_process_set(odd_set)\n hvd.remove_process_set(even_set)\n\n def test_optimizer_process_sets(self):\n \"\"\"Test DistributedOptimizer restricted to a process set for an entire model.\n\n Note that this test makes the most sense when running with > 2 processes.\"\"\"\n hvd.init()\n\n if hvd.ccl_built():\n self.skipTest(\"Multiple process sets currently do not support CCL.\")\n\n # This test does not apply if there is only one worker.\n if hvd.size() == 1:\n self.skipTest(\"Only one worker available\")\n\n even_ranks = [rk for rk in range(0, hvd.size()) if rk % 2 == 0]\n odd_ranks = [rk for rk in range(0, hvd.size()) if rk % 2 == 1]\n even_set = hvd.add_process_set(even_ranks)\n odd_set = hvd.add_process_set(odd_ranks)\n if hvd.rank() in even_ranks:\n this_set = even_set\n elif hvd.rank() in odd_ranks:\n this_set = odd_set\n\n N, D_in, H, D_out = 64, 100, 10, 10\n torch.manual_seed(hvd.rank())\n x = torch.randn(N, D_in).requires_grad_()\n y = torch.randn(N, D_out).requires_grad_()\n\n def new_optimizer(cls, opt_params, model):\n p = {\n k: v for k, v in opt_params.items()\n if k in inspect.getargspec(cls.__init__).args\n }\n return cls(model.parameters(), **p)\n\n def create_model(opt_class, opt_params, process_set):\n model = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out),\n )\n\n optimizer = new_optimizer(opt_class, opt_params, model)\n optimizer = hvd.DistributedOptimizer(\n optimizer, named_parameters=model.named_parameters(),\n process_set=process_set)\n\n return model, optimizer\n\n model, optimizer = create_model(torch.optim.SGD, dict(lr=0.2, momentum=0.9, weight_decay=0.1, centered=True),\n even_set)\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n\n y_pred = model(x)\n loss = F.mse_loss(y_pred, y, size_average=False)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n v = model.state_dict()[\"2.weight\"]\n all_v = hvd.allgather(v, process_set=this_set)\n if this_set == even_set:\n for start in range(0, all_v.numel(), v.numel()):\n assert torch.allclose(v.flatten(), all_v.flatten()[start:start+v.numel()])\n else:\n for start in range(0, all_v.numel(), v.numel()):\n if start // v.numel() == this_set.rank():\n continue\n # They might randomly agree by chance, but that's extremely unlikely:\n assert not torch.allclose(v.flatten(), all_v.flatten()[start:start + v.numel()])\n\n hvd.remove_process_set(odd_set)\n hvd.remove_process_set(even_set)\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.set_default_tensor_type", "torch.cat", "torch.zeros", "torch.load", "torch.nn.Embedding", "torch.FloatTensor", "torch.cuda.is_available", "torch.allclose", "torch.save", "torch.ones", "torch.eq", "torch.randn", "torch.tensor", "torch.nn.functional.relu", "torch.rand", "torch.optim.SGD", "torch.nn.BatchNorm1d", "torch.LongTensor", "torch.nn.Parameter", "torch.empty", "torch.nn.Conv2d", "torch.is_tensor", "torch.cuda.FloatTensor", "torch.nn.Linear", "torch.nn.functional.mse_loss", "torch.cuda.device_count", "torch.Tensor", "numpy.random.seed", "torch.manual_seed", "numpy.linalg.norm", "numpy.ones", "torch.optim.Optimizer.__subclasses__", "torch.IntTensor", "numpy.random.uniform", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AI-App/Python-Control
[ "c2f6f8ab94bbc8b5ef1deb33c3d2df39e00d22bf", "c2f6f8ab94bbc8b5ef1deb33c3d2df39e00d22bf" ]
[ "control/matlab/wrappers.py", "control/tests/descfcn_test.py" ]
[ "\"\"\"\nWrappers for the MATLAB compatibility module\n\"\"\"\n\nimport numpy as np\nfrom ..statesp import ss\nfrom ..xferfcn import tf\nfrom ..ctrlutil import issys\nfrom ..exception import ControlArgument\nfrom scipy.signal import zpk2tf\nfrom warnings import warn\n\n__all__ = ['bode', 'nyquist', 'ngrid', 'dcgain']\n\ndef bode(*args, **kwargs):\n \"\"\"bode(syslist[, omega, dB, Hz, deg, ...])\n\n Bode plot of the frequency response\n\n Plots a bode gain and phase diagram\n\n Parameters\n ----------\n sys : LTI, or list of LTI\n System for which the Bode response is plotted and give. Optionally\n a list of systems can be entered, or several systems can be\n specified (i.e. several parameters). The sys arguments may also be\n interspersed with format strings. A frequency argument (array_like)\n may also be added, some examples:\n * >>> bode(sys, w) # one system, freq vector\n * >>> bode(sys1, sys2, ..., sysN) # several systems\n * >>> bode(sys1, sys2, ..., sysN, w)\n * >>> bode(sys1, 'plotstyle1', ..., sysN, 'plotstyleN') # + plot formats\n omega: freq_range\n Range of frequencies in rad/s\n dB : boolean\n If True, plot result in dB\n Hz : boolean\n If True, plot frequency in Hz (omega must be provided in rad/sec)\n deg : boolean\n If True, return phase in degrees (else radians)\n plot : boolean\n If True, plot magnitude and phase\n\n Examples\n --------\n >>> sys = ss(\"1. -2; 3. -4\", \"5.; 7\", \"6. 8\", \"9.\")\n >>> mag, phase, omega = bode(sys)\n\n .. todo::\n\n Document these use cases\n\n * >>> bode(sys, w)\n * >>> bode(sys1, sys2, ..., sysN)\n * >>> bode(sys1, sys2, ..., sysN, w)\n * >>> bode(sys1, 'plotstyle1', ..., sysN, 'plotstyleN')\n \"\"\"\n from ..freqplot import bode_plot\n\n # If first argument is a list, assume python-control calling format\n if hasattr(args[0], '__iter__'):\n return bode_plot(*args, **kwargs)\n\n # Parse input arguments\n syslist, omega, args, other = _parse_freqplot_args(*args)\n kwargs.update(other)\n\n # Call the bode command\n return bode_plot(syslist, omega, *args, **kwargs)\n\n\ndef nyquist(*args, **kwargs):\n \"\"\"nyquist(syslist[, omega])\n\n Nyquist plot of the frequency response\n\n Plots a Nyquist plot for the system over a (optional) frequency range.\n\n Parameters\n ----------\n sys1, ..., sysn : list of LTI\n List of linear input/output systems (single system is OK).\n omega : array_like\n Set of frequencies to be evaluated, in rad/sec.\n\n Returns\n -------\n real : ndarray (or list of ndarray if len(syslist) > 1))\n real part of the frequency response array\n imag : ndarray (or list of ndarray if len(syslist) > 1))\n imaginary part of the frequency response array\n omega : ndarray (or list of ndarray if len(syslist) > 1))\n frequencies in rad/s\n\n \"\"\"\n from ..freqplot import nyquist_plot\n\n # If first argument is a list, assume python-control calling format\n if hasattr(args[0], '__iter__'):\n return nyquist_plot(*args, **kwargs)\n\n # Parse arguments\n syslist, omega, args, other = _parse_freqplot_args(*args)\n kwargs.update(other)\n\n # Call the nyquist command\n kwargs['return_contour'] = True\n _, contour = nyquist_plot(syslist, omega, *args, **kwargs)\n\n # Create the MATLAB output arguments\n freqresp = syslist(contour)\n real, imag = freqresp.real, freqresp.imag\n return real, imag, contour.imag\n\n\ndef _parse_freqplot_args(*args):\n \"\"\"Parse arguments to frequency plot routines (bode, nyquist)\"\"\"\n syslist, plotstyle, omega, other = [], [], None, {}\n i = 0;\n while i < len(args):\n # Check to see if this is a system of some sort\n if issys(args[i]):\n # Append the system to our list of systems\n syslist.append(args[i])\n i += 1\n\n # See if the next object is a plotsytle (string)\n if (i < len(args) and isinstance(args[i], str)):\n plotstyle.append(args[i])\n i += 1\n\n # Go on to the next argument\n continue\n\n # See if this is a frequency list\n elif isinstance(args[i], (list, np.ndarray)):\n omega = args[i]\n i += 1\n break\n\n # See if this is a frequency range\n elif isinstance(args[i], tuple) and len(args[i]) == 2:\n other['omega_limits'] = args[i]\n i += 1\n\n else:\n raise ControlArgument(\"unrecognized argument type\")\n\n # Check to make sure that we processed all arguments\n if (i < len(args)):\n raise ControlArgument(\"not all arguments processed\")\n\n # Check to make sure we got the same number of plotstyles as systems\n if (len(plotstyle) != 0 and len(syslist) != len(plotstyle)):\n raise ControlArgument(\n \"number of systems and plotstyles should be equal\")\n\n # Warn about unimplemented plotstyles\n #! TODO: remove this when plot styles are implemented in bode()\n #! TODO: uncomment unit test code that tests this out\n if (len(plotstyle) != 0):\n warn(\"Warning (matlab.bode): plot styles not implemented\");\n\n if len(syslist) == 0:\n raise ControlArgument(\"no systems specified\")\n elif len(syslist) == 1:\n # If only one system given, retun just that system (not a list)\n syslist = syslist[0]\n\n return syslist, omega, plotstyle, other\n\n\nfrom ..nichols import nichols_grid\ndef ngrid():\n return nichols_grid()\nngrid.__doc__ = nichols_grid.__doc__\n\n\ndef dcgain(*args):\n '''\n Compute the gain of the system in steady state.\n\n The function takes either 1, 2, 3, or 4 parameters:\n\n Parameters\n ----------\n A, B, C, D: array-like\n A linear system in state space form.\n Z, P, k: array-like, array-like, number\n A linear system in zero, pole, gain form.\n num, den: array-like\n A linear system in transfer function form.\n sys: LTI (StateSpace or TransferFunction)\n A linear system object.\n\n Returns\n -------\n gain: ndarray\n The gain of each output versus each input:\n :math:`y = gain \\\\cdot u`\n\n Notes\n -----\n This function is only useful for systems with invertible system\n matrix ``A``.\n\n All systems are first converted to state space form. The function then\n computes:\n\n .. math:: gain = - C \\\\cdot A^{-1} \\\\cdot B + D\n '''\n #Convert the parameters to state space form\n if len(args) == 4:\n A, B, C, D = args\n return ss(A, B, C, D).dcgain()\n elif len(args) == 3:\n Z, P, k = args\n num, den = zpk2tf(Z, P, k)\n return tf(num, den).dcgain()\n elif len(args) == 2:\n num, den = args\n return tf(num, den).dcgain()\n elif len(args) == 1:\n sys, = args\n return sys.dcgain()\n else:\n raise ValueError(\"Function ``dcgain`` needs either 1, 2, 3 or 4 \"\n \"arguments.\")\n", "\"\"\"descfcn_test.py - test describing functions and related capabilities\n\nRMM, 23 Jan 2021\n\nThis set of unit tests covers the various operatons of the descfcn module, as\nwell as some of the support functions associated with static nonlinearities.\n\n\"\"\"\n\nimport pytest\n\nimport numpy as np\nimport control as ct\nimport math\nfrom control.descfcn import saturation_nonlinearity, \\\n friction_backlash_nonlinearity, relay_hysteresis_nonlinearity\n\n\n# Static function via a class\nclass saturation_class:\n # Static nonlinear saturation function\n def __call__(self, x, lb=-1, ub=1):\n return np.clip(x, lb, ub)\n\n # Describing function for a saturation function\n def describing_function(self, a):\n if -1 <= a and a <= 1:\n return 1.\n else:\n b = 1/a\n return 2/math.pi * (math.asin(b) + b * math.sqrt(1 - b**2))\n\n\n# Static function without a class\ndef saturation(x):\n return np.clip(x, -1, 1)\n\n\n# Static nonlinear system implementing saturation\[email protected]\ndef satsys():\n satfcn = saturation_class()\n def _satfcn(t, x, u, params):\n return satfcn(u)\n return ct.NonlinearIOSystem(None, outfcn=_satfcn, input=1, output=1)\n\n\ndef test_static_nonlinear_call(satsys):\n # Make sure that the saturation system is a static nonlinearity\n assert satsys._isstatic()\n\n # Make sure the saturation function is doing the right computation\n input = [-2, -1, -0.5, 0, 0.5, 1, 2]\n desired = [-1, -1, -0.5, 0, 0.5, 1, 1]\n for x, y in zip(input, desired):\n np.testing.assert_allclose(satsys(x), y)\n\n # Test squeeze properties\n assert satsys(0.) == 0.\n assert satsys([0.], squeeze=True) == 0.\n np.testing.assert_allclose(satsys([0.]), [0.])\n\n # Test SIMO nonlinearity\n def _simofcn(t, x, u, params):\n return np.array([np.cos(u), np.sin(u)])\n simo_sys = ct.NonlinearIOSystem(None, outfcn=_simofcn, input=1, output=2)\n np.testing.assert_allclose(simo_sys([0.]), [1, 0])\n np.testing.assert_allclose(simo_sys([0.], squeeze=True), [1, 0])\n\n # Test MISO nonlinearity\n def _misofcn(t, x, u, params={}):\n return np.array([np.sin(u[0]) * np.cos(u[1])])\n miso_sys = ct.NonlinearIOSystem(None, outfcn=_misofcn, input=2, output=1)\n np.testing.assert_allclose(miso_sys([0, 0]), [0])\n np.testing.assert_allclose(miso_sys([0, 0], squeeze=True), [0])\n\n\n# Test saturation describing function in multiple ways\ndef test_saturation_describing_function(satsys):\n satfcn = saturation_class()\n\n # Store the analytic describing function for comparison\n amprange = np.linspace(0, 10, 100)\n df_anal = [satfcn.describing_function(a) for a in amprange]\n\n # Compute describing function for a static function\n df_fcn = ct.describing_function(saturation, amprange)\n np.testing.assert_almost_equal(df_fcn, df_anal, decimal=3)\n\n # Compute describing function for a describing function nonlinearity\n df_fcn = ct.describing_function(satfcn, amprange)\n np.testing.assert_almost_equal(df_fcn, df_anal, decimal=3)\n\n # Compute describing function for a static I/O system\n df_sys = ct.describing_function(satsys, amprange)\n np.testing.assert_almost_equal(df_sys, df_anal, decimal=3)\n\n # Compute describing function on an array of values\n df_arr = ct.describing_function(satsys, amprange)\n np.testing.assert_almost_equal(df_arr, df_anal, decimal=3)\n\n # Evaluate static function at a negative amplitude\n with pytest.raises(ValueError, match=\"cannot evaluate\"):\n ct.describing_function(saturation, -1)\n\n # Create describing function nonlinearity w/out describing_function method\n # and make sure it drops through to the underlying computation\n class my_saturation(ct.DescribingFunctionNonlinearity):\n def __call__(self, x):\n return saturation(x)\n satfcn_nometh = my_saturation()\n df_nometh = ct.describing_function(satfcn_nometh, amprange)\n np.testing.assert_almost_equal(df_nometh, df_anal, decimal=3)\n\n\[email protected](\"fcn, amin, amax\", [\n [saturation_nonlinearity(1), 0, 10],\n [friction_backlash_nonlinearity(2), 1, 10],\n [relay_hysteresis_nonlinearity(1, 1), 3, 10],\n ])\ndef test_describing_function(fcn, amin, amax):\n # Store the analytic describing function for comparison\n amprange = np.linspace(amin, amax, 100)\n df_anal = [fcn.describing_function(a) for a in amprange]\n\n # Compute describing function on an array of values\n df_arr = ct.describing_function(\n fcn, amprange, zero_check=False, try_method=False)\n np.testing.assert_almost_equal(df_arr, df_anal, decimal=1)\n\n # Make sure the describing function method also works\n df_meth = ct.describing_function(fcn, amprange, zero_check=False)\n np.testing.assert_almost_equal(df_meth, df_anal)\n\n # Make sure that evaluation at negative amplitude generates an exception\n with pytest.raises(ValueError, match=\"cannot evaluate\"):\n ct.describing_function(fcn, -1)\n\n\ndef test_describing_function_plot():\n # Simple linear system with at most 1 intersection\n H_simple = ct.tf([1], [1, 2, 2, 1])\n omega = np.logspace(-1, 2, 100)\n\n # Saturation nonlinearity\n F_saturation = ct.descfcn.saturation_nonlinearity(1)\n amp = np.linspace(1, 4, 10)\n\n # No intersection\n xsects = ct.describing_function_plot(H_simple, F_saturation, amp, omega)\n assert xsects == []\n\n # One intersection\n H_larger = H_simple * 8\n xsects = ct.describing_function_plot(H_larger, F_saturation, amp, omega)\n for a, w in xsects:\n np.testing.assert_almost_equal(\n H_larger(1j*w),\n -1/ct.describing_function(F_saturation, a), decimal=5)\n\n # Multiple intersections\n H_multiple = H_simple * ct.tf(*ct.pade(5, 4)) * 4\n omega = np.logspace(-1, 3, 50)\n F_backlash = ct.descfcn.friction_backlash_nonlinearity(1)\n amp = np.linspace(0.6, 5, 50)\n xsects = ct.describing_function_plot(H_multiple, F_backlash, amp, omega)\n for a, w in xsects:\n np.testing.assert_almost_equal(\n -1/ct.describing_function(F_backlash, a),\n H_multiple(1j*w), decimal=5)\n\ndef test_describing_function_exceptions():\n # Describing function with non-zero bias\n with pytest.warns(UserWarning, match=\"asymmetric\"):\n saturation = ct.descfcn.saturation_nonlinearity(lb=-1, ub=2)\n assert saturation(-3) == -1\n assert saturation(3) == 2\n\n # Turn off the bias check\n bias = ct.describing_function(saturation, 0, zero_check=False)\n\n # Function should evaluate to zero at zero amplitude\n f = lambda x: x + 0.5\n with pytest.raises(ValueError, match=\"must evaluate to zero\"):\n bias = ct.describing_function(f, 0, zero_check=True)\n\n # Evaluate at a negative amplitude\n with pytest.raises(ValueError, match=\"cannot evaluate\"):\n ct.describing_function(saturation, -1)\n\n # Describing function with bad label\n H_simple = ct.tf([8], [1, 2, 2, 1])\n F_saturation = ct.descfcn.saturation_nonlinearity(1)\n amp = np.linspace(1, 4, 10)\n with pytest.raises(ValueError, match=\"formatting string\"):\n ct.describing_function_plot(H_simple, F_saturation, amp, label=1)\n" ]
[ [ "scipy.signal.zpk2tf" ], [ "numpy.linspace", "numpy.clip", "numpy.logspace", "numpy.cos", "numpy.sin", "numpy.testing.assert_almost_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mjpolak/K3D-jupyter
[ "539c53cab580d55b8841bb87589ab3d4cf95bdb0", "539c53cab580d55b8841bb87589ab3d4cf95bdb0", "539c53cab580d55b8841bb87589ab3d4cf95bdb0" ]
[ "k3d/objects.py", "k3d/test/test_visual_points.py", "docs/source/showcase/lines.py" ]
[ "import warnings\n\nimport ipywidgets as widgets\nimport numpy as np\nfrom traitlets import (\n Any,\n Bool,\n Bytes,\n Dict,\n Float,\n Int,\n Integer,\n List,\n TraitError,\n Unicode,\n Union,\n validate,\n)\nfrom traittypes import Array\n\nfrom ._version import __version__ as version\nfrom .helpers import (\n array_serialization_wrap,\n callback_serialization_wrap,\n get_bounding_box_point,\n get_bounding_box_points,\n get_bounding_box,\n shape_validation,\n validate_sparse_voxels,\n)\nfrom .validation.stl import AsciiStlData, BinaryStlData\n\nEPSILON = np.finfo(np.float32).eps\n\n\nclass TimeSeries(Union):\n def __init__(self, trait):\n if isinstance(trait, list):\n Union.__init__(self, trait + [Dict(t) for t in trait])\n else:\n Union.__init__(self, [trait, Dict(trait)])\n\n\nclass ListOrArray(List):\n _cast_types = (tuple, np.ndarray)\n\n def __init__(self, *args, **kwargs):\n self._empty_ok = kwargs.pop(\"empty_ok\", False)\n List.__init__(self, *args, **kwargs)\n\n def validate_elements(self, obj, value):\n if self._empty_ok and len(value) == 0:\n return list(value)\n return super(ListOrArray, self).validate_elements(obj, value)\n\n\nclass VoxelChunk(widgets.Widget):\n \"\"\"\n Voxel chunk class for selective updating voxels\n \"\"\"\n\n _model_name = Unicode(\"ChunkModel\").tag(sync=True)\n _model_module = Unicode(\"k3d\").tag(sync=True)\n _model_module_version = Unicode(version).tag(sync=True)\n\n id = Int().tag(sync=True)\n voxels = Array(dtype=np.uint8).tag(sync=True, **array_serialization_wrap(\"voxels\"))\n coord = Array(dtype=np.uint32).tag(sync=True, **array_serialization_wrap(\"coord\"))\n multiple = Int().tag(sync=True)\n compression_level = Integer().tag(sync=True)\n\n def push_data(self, field):\n self.notify_change({\"name\": field, \"type\": \"change\"})\n\n def __init__(self, **kwargs):\n self.id = id(self)\n super(VoxelChunk, self).__init__(**kwargs)\n\n def __getitem__(self, name):\n return getattr(self, name)\n\n\nclass Drawable(widgets.Widget):\n \"\"\"\n Base class for drawable objects and groups.\n \"\"\"\n\n _model_name = Unicode(\"ObjectModel\").tag(sync=True)\n _model_module = Unicode(\"k3d\").tag(sync=True)\n _model_module_version = Unicode(version).tag(sync=True)\n\n id = Integer().tag(sync=True)\n name = Unicode(default_value=None, allow_none=True).tag(sync=True)\n visible = TimeSeries(Bool(True)).tag(sync=True)\n compression_level = Integer().tag(sync=True)\n\n def __getitem__(self, name):\n return getattr(self, name)\n\n def __init__(self, **kwargs):\n self.id = id(self)\n\n super(Drawable, self).__init__(**kwargs)\n\n def __iter__(self):\n return (self,).__iter__()\n\n def __add__(self, other):\n return Group(self, other)\n\n def fetch_data(self, field):\n \"\"\"Request updating the value of a field modified in browser.\n\n For data modified in the widget on the browser side, this triggers an asynchronous\n update of the value in the Python kernel.\n\n Only specific features require this mechanism, e.g. the in-browser editing of voxels.\n\n Arguments:\n field: `str`.\n The field name.\"\"\"\n self.send({\"msg_type\": \"fetch\", \"field\": field})\n\n def push_data(self, field):\n \"\"\"Request updating the value of a field modified in backend.\n\n For data modified in the backend side, this triggers an asynchronous\n update of the value in the browser widget.\n\n Only specific features require this mechanism, e.g. the in-browser editing of voxels.\n\n Arguments:\n field: `str`.\n The field name.\"\"\"\n self.notify_change({\"name\": field, \"type\": \"change\"})\n\n def _ipython_display_(self, **kwargs):\n \"\"\"Called when `IPython.display.display` is called on the widget.\"\"\"\n import k3d\n\n plot = k3d.plot()\n plot += self\n plot.display()\n\n\nclass DrawableWithVoxelCallback(Drawable):\n \"\"\"\n Base class for drawable with voxels callback handling\n \"\"\"\n\n click_callback = None\n hover_callback = None\n\n def __init__(self, **kwargs):\n super(DrawableWithVoxelCallback, self).__init__(**kwargs)\n\n self.on_msg(self._handle_custom_msg)\n\n def _handle_custom_msg(self, content, buffers):\n if content.get(\"msg_type\", \"\") == \"click_callback\":\n if self.click_callback is not None:\n self.click_callback(\n content[\"coord\"][\"x\"], content[\"coord\"][\"y\"], content[\"coord\"][\"z\"]\n )\n\n if content.get(\"msg_type\", \"\") == \"hover_callback\":\n if self.hover_callback is not None:\n self.hover_callback(\n content[\"coord\"][\"x\"], content[\"coord\"][\"y\"], content[\"coord\"][\"z\"]\n )\n\n\nclass DrawableWithCallback(Drawable):\n \"\"\"\n Base class for drawable with callback handling\n \"\"\"\n\n click_callback = Any(default_value=None, allow_none=True).tag(\n sync=True, **callback_serialization_wrap(\"click_callback\")\n )\n hover_callback = Any(default_value=None, allow_none=True).tag(\n sync=True, **callback_serialization_wrap(\"hover_callback\")\n )\n\n def __init__(self, **kwargs):\n super(DrawableWithCallback, self).__init__(**kwargs)\n\n self.on_msg(self._handle_custom_msg)\n\n def _handle_custom_msg(self, content, buffers):\n if content.get(\"msg_type\", \"\") == \"click_callback\":\n if self.click_callback is not None:\n self.click_callback(content)\n\n if content.get(\"msg_type\", \"\") == \"hover_callback\":\n if self.hover_callback is not None:\n self.hover_callback(content)\n\n\nclass Group(Drawable):\n \"\"\"\n An aggregated group of Drawables, itself a Drawable.\n\n It can be inserted or removed from a Plot including all members.\n \"\"\"\n\n __objs = None\n\n def __init__(self, *args):\n self.__objs = tuple(\n self.__assert_drawable(drawable)\n for drawables in args\n for drawable in drawables\n )\n\n def __iter__(self):\n return self.__objs.__iter__()\n\n def __setattr__(self, key, value):\n \"\"\"Special method override which allows for setting model matrix for all members of the group.\"\"\"\n if key == \"model_matrix\":\n for d in self:\n d.model_matrix = value\n else:\n super(Group, self).__setattr__(key, value)\n\n @staticmethod\n def __assert_drawable(arg):\n assert isinstance(arg, Drawable)\n\n return arg\n\n\n# DRAWABLE OBJECTS\n\n\nclass Line(Drawable):\n \"\"\"\n A path (polyline) made up of line segments.\n\n Attributes:\n vertices: `array_like`.\n An array with (x, y, z) coordinates of segment endpoints.\n colors: `array_like`.\n Same-length array of (`int`) packed RGB color of the points (0xff0000 is red, 0xff is blue).\n color: `int`.\n Packed RGB color of the lines (0xff0000 is red, 0xff is blue) when `colors` is empty.\n attribute: `array_like`.\n Array of float attribute for the color mapping, coresponding to each vertex.\n color_map: `list`.\n A list of float quadruplets (attribute value, R, G, B), sorted by attribute value. The first\n quadruplet should have value 0.0, the last 1.0; R, G, B are RGB color components in the range 0.0 to 1.0.\n color_range: `list`.\n A pair [min_value, max_value], which determines the levels of color attribute mapped\n to 0 and 1 in the color map respectively.\n width: `float`.\n The thickness of the lines.\n shader: `str`.\n Display style (name of the shader used) of the lines.\n Legal values are:\n\n :`simple`: simple lines,\n\n :`thick`: thick lines,\n\n :`mesh`: high precision triangle mesh of segments (high quality and GPU load).\n radial_segments: 'int':\n Number of segmented faces around the circumference of the tube.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n\n vertices = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"vertices\")\n )\n colors = TimeSeries(Array(dtype=np.uint32)).tag(\n sync=True, **array_serialization_wrap(\"colors\")\n )\n color = TimeSeries(Int(min=0, max=0xFFFFFF)).tag(sync=True)\n width = TimeSeries(Float(min=EPSILON, default_value=0.01)).tag(sync=True)\n attribute = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"attribute\")\n )\n color_map = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"color_map\")\n )\n color_range = TimeSeries(ListOrArray(minlen=2, maxlen=2, empty_ok=True)).tag(\n sync=True\n )\n shader = TimeSeries(Unicode()).tag(sync=True)\n radial_segments = TimeSeries(Int()).tag(sync=True)\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"model_matrix\")\n )\n\n def __init__(self, **kwargs):\n super(Line, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"Line\")\n\n def get_bounding_box(self):\n return get_bounding_box_points(self.vertices, self.model_matrix)\n\n @validate(\"colors\")\n def _validate_colors(self, proposal):\n if type(proposal[\"value\"]) is dict or type(self.vertices) is dict:\n return proposal[\"value\"]\n\n required = self.vertices.size // 3 # (x, y, z) triplet per 1 color\n actual = proposal[\"value\"].size\n if actual != 0 and required != actual:\n raise TraitError(\n \"colors has wrong size: %s (%s required)\" % (actual, required)\n )\n return proposal[\"value\"]\n\n\nclass MarchingCubes(DrawableWithCallback):\n \"\"\"\n An isosurface in a scalar field obtained through Marching Cubes algorithm.\n\n The default domain of the scalar field is -0.5 < x, y, z < 0.5.\n If the domain should be different, the bounding box needs to be transformed using the model_matrix.\n\n Attributes:\n scalar_field: `array_like`.\n A 3D scalar field of values.\n level: `float`.\n Value at the computed isosurface.\n spacings_x: `array_like`.\n A spacings in x axis. Should match to scalar_field shape.\n spacings_y: `array_like`.\n A spacings in y axis. Should match to scalar_field shape.\n spacings_z: `array_like`.\n A spacings in z axis. Should match to scalar_field shape.\n color: `int`.\n Packed RGB color of the isosurface (0xff0000 is red, 0xff is blue).\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n flat_shading: `bool`.\n Whether mesh should display with flat shading.\n opacity: `float`.\n Opacity of mesh.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n scalar_field = Array(dtype=np.float32).tag(\n sync=True, **array_serialization_wrap(\"scalar_field\")\n )\n spacings_x = Array(dtype=np.float32).tag(\n sync=True, **array_serialization_wrap(\"spacings_x\")\n )\n spacings_y = Array(dtype=np.float32).tag(\n sync=True, **array_serialization_wrap(\"spacings_y\")\n )\n spacings_z = Array(dtype=np.float32).tag(\n sync=True, **array_serialization_wrap(\"spacings_z\")\n )\n level = Float().tag(sync=True)\n color = Int(min=0, max=0xFFFFFF).tag(sync=True)\n wireframe = Bool().tag(sync=True)\n flat_shading = Bool().tag(sync=True)\n opacity = TimeSeries(Float(min=0.0, max=1.0, default_value=1.0)).tag(sync=True)\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"model_matrix\")\n )\n\n def get_bounding_box(self):\n return get_bounding_box(self.model_matrix)\n\n def __init__(self, **kwargs):\n super(MarchingCubes, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"MarchingCubes\")\n\n\nclass Mesh(DrawableWithCallback):\n \"\"\"\n A 3D triangles mesh.\n\n Attributes:\n vertices: `array_like`.\n Array of triangle vertices: float (x, y, z) coordinate triplets.\n indices: `array_like`.\n Array of vertex indices: int triplets of indices from vertices array.\n color: `int`.\n Packed RGB color of the mesh (0xff0000 is red, 0xff is blue) when not using color maps.\n colors: `array_like`.\n Same-length array of (`int`) packed RGB color of the points (0xff0000 is red, 0xff is blue).\n attribute: `array_like`.\n Array of float attribute for the color mapping, coresponding to each vertex.\n triangles_attribute: `array_like`.\n Array of float attribute for the color mapping, coresponding to each triangle.\n color_map: `list`.\n A list of float quadruplets (attribute value, R, G, B), sorted by attribute value. The first\n quadruplet should have value 0.0, the last 1.0; R, G, B are RGB color components in the range 0.0 to 1.0.\n color_range: `list`.\n A pair [min_value, max_value], which determines the levels of color attribute mapped\n to 0 and 1 in the color map respectively.\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n flat_shading: `bool`.\n Whether mesh should display with flat shading.\n opacity: `float`.\n Opacity of mesh.\n volume: `array_like`.\n 3D array of `float`\n volume_bounds: `array_like`.\n 6-element tuple specifying the bounds of the volume data (x0, x1, y0, y1, z0, z1)\n texture: `bytes`.\n Image data in a specific format.\n texture_file_format: `str`.\n Format of the data, it should be the second part of MIME format of type 'image/',\n for example 'jpeg', 'png', 'gif', 'tiff'.\n uvs: `array_like`.\n Array of float uvs for the texturing, coresponding to each vertex.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n vertices = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"vertices\")\n )\n indices = TimeSeries(Array(dtype=np.uint32)).tag(\n sync=True, **array_serialization_wrap(\"indices\")\n )\n color = TimeSeries(Int(min=0, max=0xFFFFFF)).tag(sync=True)\n colors = TimeSeries(Array(dtype=np.uint32)).tag(\n sync=True, **array_serialization_wrap(\"colors\")\n )\n attribute = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"attribute\")\n )\n triangles_attribute = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"triangles_attribute\")\n )\n color_map = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"color_map\")\n )\n color_range = TimeSeries(ListOrArray(minlen=2, maxlen=2, empty_ok=True)).tag(\n sync=True\n )\n wireframe = TimeSeries(Bool()).tag(sync=True)\n flat_shading = TimeSeries(Bool()).tag(sync=True)\n side = TimeSeries(Unicode()).tag(sync=True)\n opacity = TimeSeries(Float(min=0.0, max=1.0, default_value=1.0)).tag(sync=True)\n volume = TimeSeries(Array()).tag(sync=True, **array_serialization_wrap(\"volume\"))\n volume_bounds = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"volume_bounds\")\n )\n texture = Bytes(allow_none=True).tag(sync=True, **array_serialization_wrap(\"texture\"))\n texture_file_format = Unicode(allow_none=True).tag(sync=True)\n uvs = TimeSeries(Array()).tag(sync=True, **array_serialization_wrap(\"uvs\"))\n opacity_function = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"opacity_function\")\n )\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"model_matrix\")\n )\n\n def __init__(self, **kwargs):\n super(Mesh, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"Mesh\")\n\n @validate(\"colors\")\n def _validate_colors(self, proposal):\n if type(proposal[\"value\"]) is dict or type(self.vertices) is dict:\n return proposal[\"value\"]\n\n required = self.vertices.size // 3 # (x, y, z) triplet per 1 color\n actual = proposal[\"value\"].size\n if actual != 0 and required != actual:\n raise TraitError(\n \"colors has wrong size: %s (%s required)\" % (actual, required)\n )\n return proposal[\"value\"]\n\n @validate(\"volume\")\n def _validate_volume(self, proposal):\n if type(proposal[\"value\"]) is dict:\n return proposal[\"value\"]\n\n if type(proposal[\"value\"]) is np.ndarray and proposal[\n \"value\"\n ].dtype is np.dtype(object):\n return proposal[\"value\"].tolist()\n\n if proposal[\"value\"].shape == (0,):\n return np.array(proposal[\"value\"], dtype=np.float32)\n\n required = [np.float16, np.float32]\n actual = proposal[\"value\"].dtype\n\n if actual not in required:\n warnings.warn(\"wrong dtype: %s (%s required)\" % (actual, required))\n\n return proposal[\"value\"].astype(np.float32)\n\n return proposal[\"value\"]\n\n def get_bounding_box(self):\n return get_bounding_box_points(self.vertices, self.model_matrix)\n\n\nclass Points(Drawable):\n \"\"\"\n A point cloud.\n\n Attributes:\n positions: `array_like`.\n Array with (x, y, z) coordinates of the points.\n colors: `array_like`.\n Same-length array of (`int`) packed RGB color of the points (0xff0000 is red, 0xff is blue).\n color: `int`.\n Packed RGB color of the points (0xff0000 is red, 0xff is blue) when `colors` is empty.\n point_size: `float`.\n Diameter of the balls representing the points in 3D space.\n point_sizes: `array_like`.\n Same-length array of `float` sizes of the points.\n shader: `str`.\n Display style (name of the shader used) of the points.\n Legal values are:\n\n :`flat`: simple circles with uniform color,\n\n :`dot`: simple dot with uniform color,\n\n :`3d`: little 3D balls,\n\n :`3dSpecular`: little 3D balls with specular lightning,\n\n :`mesh`: high precision triangle mesh of a ball (high quality and GPU load).\n mesh_detail: `int`.\n Default is 2. Setting this to a value greater than 0 adds more vertices making it no longer an\n icosahedron. When detail is greater than 1, it's effectively a sphere. Only valid if shader='mesh'\n attribute: `array_like`.\n Array of float attribute for the color mapping, coresponding to each point.\n color_map: `list`.\n A list of float quadruplets (attribute value, R, G, B), sorted by attribute value. The first\n quadruplet should have value 0.0, the last 1.0; R, G, B are RGB color components in the range 0.0 to 1.0.\n color_range: `list`.\n A pair [min_value, max_value], which determines the levels of color attribute mapped\n to 0 and 1 in the color map respectively.\n opacity_function: `array`.\n A list of float tuples (attribute value, opacity), sorted by attribute value. The first\n tuples should have value 0.0, the last 1.0; opacity is in the range 0.0 to 1.0.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n positions = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"positions\")\n )\n colors = TimeSeries(Array(dtype=np.uint32)).tag(\n sync=True, **array_serialization_wrap(\"colors\")\n )\n color = TimeSeries(Int(min=0, max=0xFFFFFF)).tag(sync=True)\n point_size = TimeSeries(Float(min=EPSILON, default_value=1.0)).tag(sync=True)\n point_sizes = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"point_sizes\")\n )\n opacity = TimeSeries(Float(min=0.0, max=1.0, default_value=1.0)).tag(sync=True)\n opacities = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"opacities\")\n )\n shader = TimeSeries(Unicode()).tag(sync=True)\n mesh_detail = TimeSeries(Int(min=0, max=8)).tag(sync=True)\n attribute = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"attribute\")\n )\n color_map = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"color_map\")\n )\n color_range = TimeSeries(ListOrArray(minlen=2, maxlen=2, empty_ok=True)).tag(\n sync=True\n )\n opacity_function = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"opacity_function\")\n )\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"model_matrix\")\n )\n\n def __init__(self, **kwargs):\n super(Points, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"Points\")\n\n @validate(\"colors\")\n def _validate_colors(self, proposal):\n if type(proposal[\"value\"]) is dict or type(self.positions) is dict:\n return proposal[\"value\"]\n\n required = self.positions.size // 3 # (x, y, z) triplet per 1 color\n actual = proposal[\"value\"].size\n if actual != 0 and required != actual:\n raise TraitError(\n \"colors has wrong size: %s (%s required)\" % (actual, required)\n )\n return proposal[\"value\"]\n\n def get_bounding_box(self):\n return get_bounding_box_points(self.positions, self.model_matrix)\n\n\nclass STL(Drawable):\n \"\"\"\n A STereoLitograpy 3D geometry.\n\n STL is a popular format introduced for 3D printing. There are two sub-formats - ASCII and binary.\n\n Attributes:\n text: `str`.\n STL data in text format (ASCII STL).\n binary: `bytes`.\n STL data in binary format (Binary STL).\n The `text` attribute should be set to None when using Binary STL.\n color: `int`.\n Packed RGB color of the resulting mesh (0xff0000 is red, 0xff is blue).\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n flat_shading: `bool`.\n Whether mesh should display with flat shading.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n text = AsciiStlData(allow_none=True, default_value=None).tag(sync=True)\n binary = BinaryStlData(allow_none=True,\n default_value=None).tag(sync=True, **array_serialization_wrap(\"binary\"))\n color = Int(min=0, max=0xFFFFFF).tag(sync=True)\n wireframe = Bool().tag(sync=True)\n flat_shading = Bool().tag(sync=True)\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"model_matrix\")\n )\n\n def __init__(self, **kwargs):\n super(STL, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"STL\")\n\n def get_bounding_box(self):\n warnings.warn(\"STL bounding box is still not supported\")\n return [-1, 1, -1, 1, -1, 1]\n\n\nclass Surface(DrawableWithCallback):\n \"\"\"\n Surface plot of a 2D function z = f(x, y).\n\n The default domain of the scalar field is -0.5 < x, y < 0.5.\n If the domain should be different, the bounding box needs to be transformed using the model_matrix.\n\n Attributes:\n heights: `array_like`.\n 2D scalar field of Z values.\n color: `int`.\n Packed RGB color of the resulting mesh (0xff0000 is red, 0xff is blue).\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n flat_shading: `bool`.\n Whether mesh should display with flat shading.\n attribute: `array_like`.\n Array of float attribute for the color mapping, coresponding to each vertex.\n color_map: `list`.\n A list of float quadruplets (attribute value, R, G, B), sorted by attribute value. The first\n quadruplet should have value 0.0, the last 1.0; R, G, B are RGB color components in the range 0.0 to 1.0.\n color_range: `list`.\n A pair [min_value, max_value], which determines the levels of color attribute mapped\n to 0 and 1 in the color map respectively.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n heights = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"heights\")\n )\n color = Int(min=0, max=0xFFFFFF).tag(sync=True)\n wireframe = Bool().tag(sync=True)\n flat_shading = Bool().tag(sync=True)\n attribute = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"attribute\")\n )\n color_map = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"color_map\")\n )\n color_range = TimeSeries(ListOrArray(minlen=2, maxlen=2, empty_ok=True)).tag(\n sync=True\n )\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"model_matrix\")\n )\n\n def __init__(self, **kwargs):\n super(Surface, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"Surface\")\n\n def get_bounding_box(self):\n return get_bounding_box(self.model_matrix)\n\n\nclass Text(Drawable):\n \"\"\"\n Text rendered using KaTeX with a 3D position.\n\n Attributes:\n text: `str`.\n Content of the text.\n position: `list`.\n Coordinates (x, y, z) of the text's position.\n color: `int`.\n Packed RGB color of the text (0xff0000 is red, 0xff is blue).\n is_html: `Boolean`.\n Whether text should be interpreted as HTML insted of KaTeX.\n on_top: `Boolean`.\n Render order with 3d object\n reference_point: `str`.\n Two-letter string representing the text's alignment.\n\n First letter: 'l', 'c' or 'r': left, center or right\n\n Second letter: 't', 'c' or 'b': top, center or bottom.\n size: `float`.\n Font size in 'em' HTML units.\n label_box: `Boolean`.\n Label background box.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n text = TimeSeries(Unicode()).tag(sync=True)\n position = TimeSeries(ListOrArray(minlen=3, maxlen=3)).tag(sync=True)\n is_html = Bool(False).tag(sync=True)\n color = Int(min=0, max=0xFFFFFF).tag(sync=True)\n reference_point = Unicode().tag(sync=True)\n size = TimeSeries(Float(min=EPSILON, default_value=1.0)).tag(sync=True)\n on_top = Bool().tag(sync=True)\n label_box = Bool().tag(sync=True)\n\n def __init__(self, **kwargs):\n super(Text, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"Text\")\n\n def get_bounding_box(self):\n return get_bounding_box_point(self.position)\n\n\nclass Text2d(Drawable):\n \"\"\"\n Text rendered using KaTeX with a fixed 2D position, independent of camera settings.\n\n Attributes:\n text: `str`.\n Content of the text.\n position: `list`.\n Ratios (r_x, r_y) of the text's position in range (0, 1) - relative to canvas size.\n color: `int`.\n Packed RGB color of the text (0xff0000 is red, 0xff is blue).\n is_html: `Boolean`.\n Whether text should be interpreted as HTML insted of KaTeX.\n reference_point: `str`.\n Two-letter string representing the text's alignment.\n\n First letter: 'l', 'c' or 'r': left, center or right\n\n Second letter: 't', 'c' or 'b': top, center or bottom.\n size: `float`.\n Font size in 'em' HTML units.\n label_box: `Boolean`.\n Label background box.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n color = Int(min=0, max=0xFFFFFF).tag(sync=True)\n size = TimeSeries(Float(min=EPSILON, default_value=1.0)).tag(sync=True)\n is_html = Bool(False).tag(sync=True)\n reference_point = Unicode().tag(sync=True)\n position = TimeSeries(ListOrArray(minlen=2, maxlen=2)).tag(sync=True)\n text = TimeSeries(Unicode()).tag(sync=True)\n label_box = Bool().tag(sync=True)\n\n def __init__(self, **kwargs):\n super(Text2d, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"Text2d\")\n\n def get_bounding_box(self):\n return get_bounding_box_point(self.position)\n\n\nclass Label(Drawable):\n \"\"\"\n Label rendered using KaTeX with a 3D position.\n\n Attributes:\n text: `str`.\n Content of the text.\n position: `list`.\n Coordinates (x, y, z) of the text's position.\n color: `int`.\n Packed RGB color of the text (0xff0000 is red, 0xff is blue).\n on_top: `Boolean`.\n Render order with 3d object\n label_box: `Boolean`.\n Label background box.\n mode: `str`.\n Label node. Can be 'dynamic', 'local' or 'side'.\n is_html: `Boolean`.\n Whether text should be interpreted as HTML insted of KaTeX.\n max_length: `float`.\n Maximum length of line in % of half screen size.\n size: `float`.\n Font size in 'em' HTML units.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n mode = Unicode().tag(sync=True)\n text = TimeSeries(Unicode()).tag(sync=True)\n is_html = Bool(False).tag(sync=True)\n position = TimeSeries(ListOrArray(minlen=3, maxlen=3)).tag(sync=True)\n color = Int(min=0, max=0xFFFFFF).tag(sync=True)\n max_length = Float(min=0, max=1.0).tag(sync=True)\n size = TimeSeries(Float(min=EPSILON, default_value=1.0)).tag(sync=True)\n on_top = Bool().tag(sync=True)\n label_box = Bool().tag(sync=True)\n\n def __init__(self, **kwargs):\n super(Label, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"Label\")\n\n def get_bounding_box(self):\n return get_bounding_box_point(self.position)\n\n\nclass Texture(DrawableWithCallback):\n \"\"\"\n A 2D image displayed as a texture.\n\n By default, the texture image is mapped into the square: -0.5 < x, y < 0.5, z = 1.\n If the size (scale, aspect ratio) or position should be different then the texture should be transformed\n using the model_matrix.\n\n Attributes:\n binary: `bytes`.\n Image data in a specific format.\n file_format: `str`.\n Format of the data, it should be the second part of MIME format of type 'image/',\n for example 'jpeg', 'png', 'gif', 'tiff'.\n attribute: `array_like`.\n Array of float attribute for the color mapping, coresponding to each pixels.\n color_map: `list`.\n A list of float quadruplets (attribute value, R, G, B), sorted by attribute value. The first\n quadruplet should have value 0.0, the last 1.0; R, G, B are RGB color components in the range 0.0 to 1.0.\n opacity_function: `array`.\n A list of float tuples (attribute value, opacity), sorted by attribute value. The first\n tuples should have value 0.0, the last 1.0; opacity is in the range 0.0 to 1.0.\n color_range: `list`.\n A pair [min_value, max_value], which determines the levels of color attribute mapped\n to 0 and 1 in the color map respectively.\n interpolation: `bool`.\n Whether data should be interpolatedor not.\n puv: `list`.\n A list of float triplets (x,y,z). The first triplet mean a position of left-bottom corner of texture.\n Second and third triplets means a base of coordinate system for texture.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n binary = Bytes(allow_none=True).tag(sync=True, **array_serialization_wrap(\"binary\"))\n file_format = Unicode(allow_none=True).tag(sync=True)\n attribute = Array().tag(sync=True, **array_serialization_wrap(\"attribute\"))\n puv = Array(dtype=np.float32).tag(sync=True, **array_serialization_wrap(\"puv\"))\n color_map = Array(dtype=np.float32).tag(\n sync=True, **array_serialization_wrap(\"color_map\")\n )\n color_range = ListOrArray(minlen=2, maxlen=2, empty_ok=True).tag(sync=True)\n interpolation = TimeSeries(Bool()).tag(sync=True)\n opacity_function = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"opacity_function\")\n )\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"model_matrix\")\n )\n\n def __init__(self, **kwargs):\n super(Texture, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"Texture\")\n\n def get_bounding_box(self):\n return get_bounding_box(self.model_matrix)\n\n\nclass TextureText(Drawable):\n \"\"\"\n A text in the 3D space rendered using a texture.\n\n Compared to Text and Text2d this drawable has less features (no KaTeX support), but the labels are located\n in the GPU memory, and not the browser's DOM tree. This has performance consequences, and may be preferable when\n many simple labels need to be displayed.\n\n Attributes:\n text: `str`.\n Content of the text.\n position: `list`.\n Coordinates (x, y, z) of the text's position.\n color: `int`.\n Packed RGB color of the text (0xff0000 is red, 0xff is blue).\n size: `float`.\n Size of the texture sprite containing the text.\n font_face: `str`.\n Name of the font to use for rendering the text.\n font_weight: `int`.\n Thickness of the characters in HTML-like units from the range (100, 900), where\n 400 is normal and 600 is bold font.\n font_size: `int`.\n The font size inside the sprite texture in px units. This does not affect the size of the\n text in the scene, only the accuracy and raster size of the texture.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n text = TimeSeries(Unicode()).tag(sync=True)\n position = TimeSeries(ListOrArray(minlen=3, maxlen=3)).tag(sync=True)\n color = TimeSeries(Int(min=0, max=0xFFFFFF)).tag(sync=True)\n size = TimeSeries(Float(min=EPSILON, default_value=1.0)).tag(sync=True)\n font_face = Unicode().tag(sync=True)\n font_weight = Int().tag(sync=True)\n font_size = Int().tag(sync=True)\n\n def __init__(self, **kwargs):\n super(TextureText, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"TextureText\")\n\n def get_bounding_box(self):\n return get_bounding_box_point(self.position)\n\n\nclass VectorField(Drawable):\n \"\"\"\n A dense 3D or 2D vector field.\n\n By default, the origins of the vectors are assumed to be a grid inscribed in the -0.5 < x, y, z < 0.5 cube\n or -0.5 < x, y < 0.5 square, regardless of the passed vector field shape (aspect ratio etc.).\n Different grid size, shape and rotation can be obtained using the model_matrix.\n\n The color of the vectors is a gradient from origin_color to head_color. Heads, when used, have uniform head_color.\n\n For sparse (i.e. not forming a grid) 3D vectors, use the `Vectors` drawable.\n\n Attributes:\n vectors: `array_like`.\n Vector field of shape (L, H, W, 3) for 3D fields or (H, W, 2) for 2D fields.\n colors: `array_like`.\n Twice the length of vectors array of int: packed RGB colors\n (0xff0000 is red, 0xff is blue).\n The array has consecutive pairs (origin_color, head_color) for vectors in row-major order.\n origin_color: `int`.\n Packed RGB color of the origins (0xff0000 is red, 0xff is blue) when `colors` is empty.\n head_color: `int`.\n Packed RGB color of the vector heads (0xff0000 is red, 0xff is blue) when `colors` is empty.\n use_head: `bool`.\n Whether vectors should display an arrow head.\n head_size: `float`.\n The size of the arrow heads.\n scale: `float`.\n Scale factor for the vector lengths, for artificially scaling the vectors in place.\n line_width: `float`.\n Width of the vector segments.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n vectors = Array(dtype=np.float32).tag(\n sync=True, **array_serialization_wrap(\"vectors\")\n )\n colors = Array(dtype=np.uint32).tag(sync=True, **array_serialization_wrap(\"colors\"))\n origin_color = Int(min=0, max=0xFFFFFF).tag(sync=True)\n head_color = Int(min=0, max=0xFFFFFF).tag(sync=True)\n use_head = Bool().tag(sync=True)\n head_size = Float(min=EPSILON, default_value=1.0).tag(sync=True)\n scale = Float().tag(sync=True)\n line_width = Float(min=EPSILON, default_value=0.01).tag(sync=True)\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"model_matrix\")\n )\n\n def __init__(self, **kwargs):\n super(VectorField, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"VectorField\")\n\n @validate(\"vectors\")\n def _validate_vectors(self, proposal):\n shape = proposal[\"value\"].shape\n if len(shape) not in (3, 4) or len(shape) != shape[-1] + 1:\n raise TraitError(\n \"Vector field has invalid shape: {}, \"\n \"expected (L, H, W, 3) for a 3D or (H, W, 2) for a 2D field\".format(\n shape\n )\n )\n return np.array(proposal[\"value\"], np.float32)\n\n def get_bounding_box(self):\n return get_bounding_box(self.model_matrix)\n\n\nclass Vectors(Drawable):\n \"\"\"\n 3D vectors.\n\n The color of the vectors is a gradient from origin_color to head_color. Heads, when used, have uniform head_color.\n\n For dense (i.e. forming a grid) 3D or 2D vectors, use the `VectorField` drawable.\n\n Attributes:\n vectors: `array_like`.\n The vectors as (dx, dy, dz) float triples.\n origins: `array_like`.\n Same-size array of (x, y, z) coordinates of vector origins.\n colors: `array_like`.\n Twice the length of vectors array of int: packed RGB colors\n (0xff0000 is red, 0xff is blue).\n The array has consecutive pairs (origin_color, head_color) for vectors in row-major order.\n origin_color: `int`.\n Packed RGB color of the origins (0xff0000 is red, 0xff is blue), default: same as color.\n head_color: `int`.\n Packed RGB color of the vector heads (0xff0000 is red, 0xff is blue), default: same as color.\n use_head: `bool`.\n Whether vectors should display an arrow head.\n head_size: `float`.\n The size of the arrow heads.\n labels: `list` of `str`.\n Captions to display next to the vectors.\n label_size: `float`.\n Label font size in 'em' HTML units.\n line_width: `float`.\n Width of the vector segments.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n origins = Array(dtype=np.float32).tag(\n sync=True, **array_serialization_wrap(\"origins\")\n )\n vectors = Array(dtype=np.float32).tag(\n sync=True, **array_serialization_wrap(\"vectors\")\n )\n colors = Array(dtype=np.uint32).tag(sync=True, **array_serialization_wrap(\"colors\"))\n origin_color = Int(min=0, max=0xFFFFFF).tag(sync=True)\n head_color = Int(min=0, max=0xFFFFFF).tag(sync=True)\n use_head = Bool().tag(sync=True)\n head_size = Float(min=EPSILON, default_value=1.0).tag(sync=True)\n labels = List().tag(sync=True)\n label_size = Float(min=EPSILON, default_value=1.0).tag(sync=True)\n line_width = Float(min=EPSILON, default_value=0.01).tag(sync=True)\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"model_matrix\")\n )\n\n def __init__(self, **kwargs):\n super(Vectors, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"Vectors\")\n\n def get_bounding_box(self):\n return get_bounding_box_points(\n np.stack([self.origins, self.vectors]), self.model_matrix\n )\n\n\nclass Volume(Drawable):\n \"\"\"\n 3D volumetric data.\n\n By default, the volume are a grid inscribed in the -0.5 < x, y, z < 0.5 cube\n regardless of the passed voxel array shape (aspect ratio etc.).\n\n Attributes:\n volume: `array_like`.\n 3D array of `float`.\n color_map: `array_like`.\n A list of float quadruplets (attribute value, R, G, B), sorted by attribute value. The first\n quadruplet should have value 0.0, the last 1.0; R, G, B are RGB color components in the range 0.0 to 1.0.\n opacity_function: `array`.\n A list of float tuples (attribute value, opacity), sorted by attribute value. The first\n typles should have value 0.0, the last 1.0; opacity is in the range 0.0 to 1.0.\n color_range: `list`.\n A pair [min_value, max_value], which determines the levels of color attribute mapped\n to 0 and 1 in the color map respectively.\n samples: `float`.\n Number of iteration per 1 unit of space.\n alpha_coef: `float`.\n Alpha multiplier.\n shadow: `str`.\n Type of shadow on volume.\n\n Legal values are:\n\n :`off`: shadow disabled,\n\n :`on_demand`: update shadow map on demand ( self.shadow_map_update() ),\n\n :`dynamic`: update shadow map automaticaly every shadow_delay.\n shadow_delay: `float`.\n Minimum number of miliseconds between shadow map updates.\n shadow_res: `int`.\n Resolution of shadow map.\n interpolation: `bool`.\n Whether volume raycasting should interpolate data or not.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n volume = TimeSeries(Array()).tag(sync=True, **array_serialization_wrap(\"volume\"))\n color_map = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"color_map\")\n )\n opacity_function = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"opacity_function\")\n )\n color_range = TimeSeries(ListOrArray(minlen=2, maxlen=2, empty_ok=True)).tag(\n sync=True\n )\n samples = TimeSeries(Float()).tag(sync=True)\n alpha_coef = TimeSeries(Float()).tag(sync=True)\n gradient_step = TimeSeries(Float()).tag(sync=True)\n shadow = TimeSeries(Unicode()).tag(sync=True)\n shadow_res = TimeSeries(Int(min=31, max=513, default_value=128)).tag(sync=True)\n shadow_delay = TimeSeries(Float()).tag(sync=True)\n ray_samples_count = TimeSeries(Int(min=1, max=128, default_value=16)).tag(sync=True)\n focal_length = TimeSeries(Float()).tag(sync=True)\n focal_plane = TimeSeries(Float()).tag(sync=True)\n interpolation = TimeSeries(Bool()).tag(sync=True)\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"model_matrix\")\n )\n\n def __init__(self, **kwargs):\n super(Volume, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"Volume\")\n\n @validate(\"volume\")\n def _validate_volume(self, proposal):\n if type(proposal[\"value\"]) is dict:\n return proposal[\"value\"]\n\n if type(proposal[\"value\"]) is np.ndarray and proposal[\n \"value\"\n ].dtype is np.dtype(object):\n return proposal[\"value\"].tolist()\n\n required = [np.float16, np.float32]\n actual = proposal[\"value\"].dtype\n\n if actual not in required:\n warnings.warn(\"wrong dtype: %s (%s required)\" % (actual, required))\n\n return proposal[\"value\"].astype(np.float32)\n\n return proposal[\"value\"]\n\n def shadow_map_update(self, direction=None):\n \"\"\"Request updating the shadow map in browser.\"\"\"\n\n self.send({\"msg_type\": \"shadow_map_update\", \"direction\": direction})\n\n def get_bounding_box(self):\n return get_bounding_box(self.model_matrix)\n\n\nclass MIP(Drawable):\n \"\"\"\n 3D volumetric data.\n\n By default, the volume are a grid inscribed in the -0.5 < x, y, z < 0.5 cube\n regardless of the passed voxel array shape (aspect ratio etc.).\n\n Attributes:\n volume: `array_like`.\n 3D array of `float`.\n color_map: `array_like`.\n A list of float quadruplets (attribute value, R, G, B), sorted by attribute value. The first\n quadruplet should have value 0.0, the last 1.0; R, G, B are RGB color components in the range 0.0 to 1.0.\n opacity_function: `array`.\n A list of float tuples (attribute value, opacity), sorted by attribute value. The first\n typles should have value 0.0, the last 1.0; opacity is in the range 0.0 to 1.0.\n color_range: `list`.\n A pair [min_value, max_value], which determines the levels of color attribute mapped\n to 0 and 1 in the color map respectively.\n samples: `float`.\n Number of iteration per 1 unit of space.\n gradient_step: `float`\n Gradient light step.\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n volume = TimeSeries(Array()).tag(sync=True, **array_serialization_wrap(\"volume\"))\n color_map = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"color_map\")\n )\n opacity_function = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"opacity_function\")\n )\n color_range = TimeSeries(ListOrArray(minlen=2, maxlen=2, empty_ok=True)).tag(\n sync=True\n )\n gradient_step = TimeSeries(Float()).tag(sync=True)\n samples = TimeSeries(Float()).tag(sync=True)\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"model_matrix\")\n )\n\n def __init__(self, **kwargs):\n super(MIP, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"MIP\")\n\n @validate(\"volume\")\n def _validate_volume(self, proposal):\n if type(proposal[\"value\"]) is dict:\n return proposal[\"value\"]\n\n if type(proposal[\"value\"]) is np.ndarray and proposal[\n \"value\"\n ].dtype is np.dtype(object):\n return proposal[\"value\"].tolist()\n\n required = [np.float16, np.float32]\n actual = proposal[\"value\"].dtype\n\n if actual not in required:\n warnings.warn(\"wrong dtype: %s (%s required)\" % (actual, required))\n\n return proposal[\"value\"].astype(np.float32)\n\n return proposal[\"value\"]\n\n def get_bounding_box(self):\n return get_bounding_box(self.model_matrix)\n\n\nclass Voxels(DrawableWithVoxelCallback):\n \"\"\"\n 3D volumetric data.\n\n Different grid size, shape and rotation can be obtained using model_matrix.\n\n Attributes:\n voxels: `array_like`.\n 3D array of `int` in range (0, 255).\n 0 means empty voxel, 1 and above refer to consecutive color_map entries.\n color_map: `array_like`.\n Flat array of `int` packed RGB colors (0xff0000 is red, 0xff is blue).\n\n The color defined at index i is for voxel value (i+1), e.g.:\n\n | color_map = [0xff, 0x00ff]\n | voxels =\n | [\n | 0, # empty voxel\n | 1, # blue voxel\n | 2 # red voxel\n | ]\n\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n opacity: `float`.\n Opacity of voxels.\n outlines: `bool`.\n Whether mesh should display with outlines.\n outlines_color: `int`.\n Packed RGB color of the resulting outlines (0xff0000 is red, 0xff is blue)\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n voxels = Array(dtype=np.uint8).tag(sync=True, **array_serialization_wrap(\"voxels\"))\n color_map = Array(dtype=np.uint32).tag(\n sync=True, **array_serialization_wrap(\"voxels\")\n )\n wireframe = Bool().tag(sync=True)\n outlines = Bool().tag(sync=True)\n outlines_color = Int(min=0, max=0xFFFFFF).tag(sync=True)\n opacity = TimeSeries(Float(min=0.0, max=1.0, default_value=1.0)).tag(sync=True)\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"model_matrix\")\n )\n\n def __init__(self, **kwargs):\n super(Voxels, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"Voxels\")\n\n def get_bounding_box(self):\n return get_bounding_box(self.model_matrix)\n\n\nclass SparseVoxels(DrawableWithVoxelCallback):\n \"\"\"\n 3D volumetric data.\n\n By default, the voxels are a grid inscribed in the -0.5 < x, y, z < 0.5 cube\n regardless of the passed voxel array shape (aspect ratio etc.).\n Different grid size, shape and rotation can be obtained using the model_matrix.\n\n Attributes:\n sparse_voxels: `array_like`.\n 2D array of `coords` in format [[x,y,z,v],[x,y,z,v]].\n v = 0 means empty voxel, 1 and above refer to consecutive color_map entries.\n space_size: `array_like`.\n Width, Height, Length of space\n color_map: `array_like`.\n Flat array of `int` packed RGB colors (0xff0000 is red, 0xff is blue).\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n opacity: `float`.\n Opacity of voxels.\n outlines: `bool`.\n Whether mesh should display with outlines.\n outlines_color: `int`.\n Packed RGB color of the resulting outlines (0xff0000 is red, 0xff is blue)\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n sparse_voxels = (\n Array(dtype=np.uint16)\n .tag(sync=True, **array_serialization_wrap(\"sparse_voxels\"))\n .valid(validate_sparse_voxels)\n )\n space_size = (\n Array(dtype=np.uint32)\n .tag(sync=True, **array_serialization_wrap(\"space_size\"))\n .valid(shape_validation(3))\n )\n color_map = Array(dtype=np.uint32).tag(\n sync=True, **array_serialization_wrap(\"color_map\")\n )\n wireframe = Bool().tag(sync=True)\n outlines = Bool().tag(sync=True)\n outlines_color = Int(min=0, max=0xFFFFFF).tag(sync=True)\n opacity = TimeSeries(Float(min=0.0, max=1.0, default_value=1.0)).tag(sync=True)\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"model_matrix\")\n )\n\n def __init__(self, **kwargs):\n super(SparseVoxels, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"SparseVoxels\")\n\n def get_bounding_box(self):\n return get_bounding_box(self.model_matrix)\n\n\nclass VoxelsGroup(DrawableWithVoxelCallback):\n \"\"\"\n 3D volumetric data.\n\n By default, the voxels are a grid inscribed in the -0.5 < x, y, z < 0.5 cube\n regardless of the passed voxel array shape (aspect ratio etc.).\n Different grid size, shape and rotation can be obtained using the model_matrix.\n\n Attributes:\n voxels_group: `array_like`.\n List of `chunks` in format {voxels: np.array, coord: [x,y,z], multiple: number}.\n space_size: `array_like`.\n Width, Height, Length of space\n color_map: `array_like`.\n Flat array of `int` packed RGB colors (0xff0000 is red, 0xff is blue).\n model_matrix: `array_like`.\n 4x4 model transform matrix.\n wireframe: `bool`.\n Whether mesh should display as wireframe.\n opacity: `float`.\n Opacity of voxels.\n outlines: `bool`.\n Whether mesh should display with outlines.\n outlines_color: `int`.\n Packed RGB color of the resulting outlines (0xff0000 is red, 0xff is blue)\n \"\"\"\n\n type = Unicode(read_only=True).tag(sync=True)\n\n _hold_remeshing = Bool(default_value=False).tag(sync=True)\n\n voxels_group = List().tag(sync=True, **array_serialization_wrap(\"voxels_group\"))\n chunks_ids = List().tag(sync=True)\n\n space_size = Array(dtype=np.uint32).tag(\n sync=True, **array_serialization_wrap(\"space_size\")\n )\n color_map = Array(dtype=np.uint32).tag(\n sync=True, **array_serialization_wrap(\"color_map\")\n )\n wireframe = Bool().tag(sync=True)\n outlines = Bool().tag(sync=True)\n outlines_color = Int(min=0, max=0xFFFFFF).tag(sync=True)\n opacity = TimeSeries(Float(min=0.0, max=1.0, default_value=1.0)).tag(sync=True)\n model_matrix = TimeSeries(Array(dtype=np.float32)).tag(\n sync=True, **array_serialization_wrap(\"model_matrix\")\n )\n\n def __init__(self, **kwargs):\n super(VoxelsGroup, self).__init__(**kwargs)\n\n self.set_trait(\"type\", \"VoxelsGroup\")\n\n def get_bounding_box(self):\n return get_bounding_box(self.model_matrix)\n", "import k3d\nimport numpy as np\nimport pytest\nfrom .plot_compare import *\nimport math\n\nv = []\ns = []\no = []\n\nfor i in range(10):\n for fi in np.arange(0, 2 * math.pi, 0.1):\n v.append([\n math.sin(fi * 4 + i),\n math.cos(fi * 7 + i),\n math.cos(fi * 3) + fi * 1.5\n ])\n s.append(math.sin(fi * i))\n o.append(abs(math.cos(fi * i)))\n\nv = np.array(v, dtype=np.float32)\ns = np.array(s, dtype=np.float32)\no = np.array(o, dtype=np.float32)\n\n\ndef test_points_flat():\n global v, s, o\n\n prepare()\n\n points = k3d.points(v, shader='flat', point_size=0.1, opacities=o,\n color=0xff0000)\n\n pytest.plot += points\n\n compare('points_flat')\n\n\ndef test_points_3d():\n global v, s, o\n\n prepare()\n\n points = k3d.points(v, shader='3d', point_size=0.2, opacities=o,\n color=0xff0000)\n\n pytest.plot += points\n\n compare('points_3d')\n\n\ndef test_points_3d_clipping_plane():\n global v, s, o\n\n prepare()\n\n points = k3d.points(v, shader='3d', point_size=0.2, opacities=o,\n color=0xff0000)\n pytest.plot.clipping_planes = [\n [1, 1, 0, 0]\n ]\n pytest.plot += points\n\n compare('points_3d_clipping_plane')\n\n\ndef test_points_3dSpecular():\n global v, s, o\n\n prepare()\n\n points = k3d.points(v, shader='3dSpecular', point_size=0.2, opacities=o,\n color=0xff0000)\n\n pytest.plot += points\n\n compare('points_3dSpecular')\n\n\ndef test_points_3dSpecular_sizes():\n global v, s, o\n\n prepare()\n\n points = k3d.points(v, shader='3dSpecular', opacities=o,\n point_sizes=np.linspace(0, 0.2, v.shape[0]), color=0xff0000)\n\n pytest.plot += points\n\n compare('points_3dSpecular_sizes')\n\n\ndef test_points_mesh_sizes():\n global v, s, o\n\n prepare()\n\n points = k3d.points(v, shader='mesh', opacities=o,\n point_sizes=np.linspace(0, 0.2, v.shape[0]), color=0xff0000)\n\n pytest.plot += points\n\n compare('points_mesh_sizes')\n\n\ndef test_points_mesh():\n global v, s, o\n\n prepare()\n\n points = k3d.points(v, shader='mesh', point_size=0.2, opacities=o,\n color=0xff0000)\n\n pytest.plot += points\n\n compare('points_mesh')\n\n\ndef test_points_mesh_clipping_plane():\n global v, s, o\n\n prepare()\n\n points = k3d.points(v, shader='mesh', point_size=0.2, opacities=o,\n color=0xff0000)\n pytest.plot.clipping_planes = [\n [1, 1, 0, 0]\n ]\n pytest.plot += points\n\n compare('points_mesh_clipping_plane')\n\n\ndef test_points_mesh_low_detail():\n prepare()\n\n points = k3d.points(np.array([[0, 0, 0], [1, 0, 0]]), shader='mesh', point_size=0.3,\n mesh_detail=1, color=0xff0000)\n\n pytest.plot += points\n\n compare('points_mesh_low_detail')\n\n\ndef test_points_mesh_high_detail():\n prepare()\n\n points = k3d.points(np.array([[0, 0, 0], [1, 0, 0]]), shader='mesh', point_size=0.3,\n mesh_detail=8, color=0xff0000)\n\n pytest.plot += points\n\n compare('points_mesh_high_detail')\n\n\ndef test_points_dot():\n global v, s\n\n prepare()\n\n points = k3d.points(v, shader='dot', point_size=3, opacities=o,\n color=0xff0000)\n\n pytest.plot += points\n\n compare('points_dot')\n", "import numpy as np\nimport k3d\nfrom k3d.headless import k3d_remote, get_headless_driver\n\n\ndef generate():\n plot = k3d.plot(screenshot_scale=1.0)\n headless = k3d_remote(plot, get_headless_driver(), width=320, height=226)\n\n theta = np.linspace(-4 * np.pi, 4 * np.pi, 100, dtype=np.float32)\n z = np.linspace(-2, 2, 100, dtype=np.float32)\n r = z ** 2 + 1\n x = r * np.sin(theta)\n y = r * np.cos(theta)\n\n line = k3d.line(np.vstack([x, y, z]).T, width=0.2, scaling=[1, 1, 2])\n\n plot += line\n\n headless.sync(hold_until_refreshed=True)\n headless.camera_reset(1.0)\n\n screenshot = headless.get_screenshot()\n headless.close()\n\n return screenshot\n" ]
[ [ "numpy.dtype", "numpy.array", "numpy.stack", "numpy.finfo" ], [ "numpy.arange", "numpy.array", "numpy.linspace" ], [ "numpy.vstack", "numpy.cos", "numpy.linspace", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
richiurb/physics
[ "46e8a44790a7c6c29af11f29a846057026348cd2" ]
[ "heat_distribution_in_the_rod/graphic.py" ]
[ "import tkinter\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom config import Config\nfrom matrix import Matrix\n\n\nclass Graphic:\n TITLE = \"Heat distribution in the rod(t = \"\n\n def __init__(self, a, u):\n self.root = tkinter.Tk()\n self.root.title(self.TITLE + str(0) + \")\")\n self.a = a\n self.u = u\n self.click = 0\n self.button = tkinter.Button(\n text=\"Next\",\n background=\"#555\",\n foreground=\"#ccc\",\n padx=\"20\",\n pady=\"8\",\n font=\"16\",\n command=self.button\n )\n\n def get(self):\n self.button.pack()\n\n self.root.mainloop()\n\n def button(self):\n if self.click != 0:\n self.root.title(self.TITLE + str(self.click * Config.DELTA_T) + \")\")\n\n fig, ax = plt.subplots()\n print(\"t = \" + str(self.click * Config.DELTA_T))\n print(self.u)\n print(\"--------\")\n for i in range(len(self.u)):\n current_x = i * Config.DELTA_X\n ax.scatter(current_x, self.u[i])\n\n ax.set_xlim([0, Config.LENGTH])\n ax.set_ylim([-1, 1])\n ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(0.2))\n ax.grid()\n plt.show()\n\n self.click += 1\n\n self.u = Matrix(self.u, self.a).solve()\n" ]
[ [ "matplotlib.ticker.MultipleLocator", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lucasiscovici/cvopt
[ "b74dcdc07a66456c1a52dc4a13df20f2d5fe7071", "b74dcdc07a66456c1a52dc4a13df20f2d5fe7071", "b74dcdc07a66456c1a52dc4a13df20f2d5fe7071" ]
[ "cvopt_study/model_selection/_search.py", "cvopt_study/model_selection/_forest.py", "cvopt_study/utils/_logger.py" ]
[ "import numpy as np\n\nfrom hyperopt import fmin, tpe, hp\nfrom GPyOpt.methods import BayesianOptimization\nimport GPyOpt#.optimization.optimizer as GPyOO\nfrom GPyOpt.optimization.optimizer import OptLbfgs, OptDirect, OptCma, Optimizer\nfrom GPyOpt.core.errors import InvalidVariableNameError\nfrom ._base import BaseSearcher, fit_and_score, mk_feature_select_index, mk_objfunc\nfrom ._ga import gamin\nfrom ..utils._base import compress\nfrom ..utils._logger import CVSummarizer, NoteBookVisualizer\nfrom ._forest import RFModel, ETModel\nfrom ._gbrt import GBRTModel\nfrom ._hyperband import Hyperband\nfrom hyperopt.pyll.stochastic import sample\nfrom GPyOpt.core.task.cost import CostModel\n#ADD optSampling to GPYopt\nclass OptSampling(Optimizer):\n '''\nOptSampling\n '''\n def __init__(self, bounds, maxiter=1000):\n super(OptSampling, self).__init__(bounds)\n self.maxiter = maxiter\n\n def optimize(self, x0, f=None, df=None, f_df=None):\n \"\"\"\n :param x0: initial point for a local optimizer.\n :param f: function to optimize.\n :param df: gradient of the function to optimize.\n :param f_df: returns both the function to optimize and its gradient.\n \"\"\"\n #X[np.argmin(values)]\n return np.atleast_2d(x0), np.atleast_2d(f(x0))\n\ndef choose_optimizer2(optimizer_name, bounds):\n \"\"\"\n Selects the type of local optimizer\n \"\"\"\n if optimizer_name == 'lbfgs':\n optimizer = OptLbfgs(bounds)\n\n elif optimizer_name == 'DIRECT':\n optimizer = OptDirect(bounds)\n\n elif optimizer_name == 'CMA':\n optimizer = OptCma(bounds)\n\n elif optimizer_name == 'sampling':\n optimizer = OptSampling(bounds)\n else:\n if hasattr(optimizer_name,\"optimize\") :\n optimizer=optimizer_name\n else:\n raise InvalidVariableNameError('Invalid optimizer selected.')\n return optimizer\nfrom GPyOpt.optimization import AcquisitionOptimizer\nfrom GPyOpt.optimization.optimizer import apply_optimizer\nfrom GPyOpt.optimization.anchor_points_generator import ObjectiveAnchorPointsGenerator, ThompsonSamplingAnchorPointsGenerator\nmax_objective_anchor_points_logic = \"max_objective\"\nthompson_sampling_anchor_points_logic = \"thompson_sampling\"\nsobol_design_type = \"sobol\"\nrandom_design_type = \"random\"\nclass AcquisitionOptimizer2(AcquisitionOptimizer):\n def optimize(self, f=None, df=None, f_df=None, duplicate_manager=None):\n \"\"\"\n Optimizes the input function.\n :param f: function to optimize.\n :param df: gradient of the function to optimize.\n :param f_df: returns both the function to optimize and its gradient.\n \"\"\"\n self.f = f\n self.df = df\n self.f_df = f_df\n # raise Exception([self.f])\n\n ## --- Update the optimizer, in case context has beee passed.\n self.optimizer = choose_optimizer2(self.optimizer_name, self.context_manager.noncontext_bounds)\n\n ## --- Selecting the anchor points and removing duplicates\n if self.type_anchor_points_logic == max_objective_anchor_points_logic:\n anchor_points_generator = ObjectiveAnchorPointsGenerator(self.space, random_design_type, f)\n elif self.type_anchor_points_logic == thompson_sampling_anchor_points_logic:\n anchor_points_generator = ThompsonSamplingAnchorPointsGenerator(self.space, sobol_design_type, self.model)\n\n ## -- Select the anchor points (with context)\n anchor_points = anchor_points_generator.get(duplicate_manager=duplicate_manager, context_manager=self.context_manager)\n\n ## --- Applying local optimizers at the anchor points and update bounds of the optimizer (according to the context)\n optimized_points = [apply_optimizer(self.optimizer, a, f=f, df=None, f_df=f_df, duplicate_manager=duplicate_manager, context_manager=self.context_manager, space = self.space) for a in anchor_points]\n x_min, fx_min = min(optimized_points, key=lambda t:t[1])\n\n #x_min, fx_min = min([apply_optimizer(self.optimizer, a, f=f, df=None, f_df=f_df, duplicate_manager=duplicate_manager, context_manager=self.context_manager, space = self.space) for a in anchor_points], key=lambda t:t[1])\n\n return x_min, fx_min\n\n\n\nclass SimpleoptCV():\n \"\"\"\n Each cross validation optimizer class's wrapper.\n\n This class allow unified handling in different type backend.\n\n For each backend optimizer class, refer to each class`s page.\n\n Parameters\n ----------\n estimator\n scikit-learn estimator like.\n\n param_distributions: dict.\n Search space.\n\n scoring: string or sklearn.metrics.make_scorer.\n Evaluation index of search.\n When scoring is None, use stimator default scorer and this score greater is better.\n \n cv: scikit-learn cross-validator or int(number of folds), default=5.\n Cross validation setting.\n\n max_iter: int, default=32.\n Number of search.\n\n random_state: int or None, default=None.\n The seed used by the random number generator.\n\n n_jobs: int, default=1.\n Number of jobs to run in parallel.\n\n pre_dispatch: int or string, default=\"2*n_jobs\".\n Controls the number of jobs that get dispatched during parallel.\n\n verbose: int(0, 1 or 2), default=0.\n Controls the verbosity\n \n 0: don't display status.\n\n 1: display status by stdout.\n \n 2: display status by graph.\n\n logdir: str or None, default=None.\n Path of directory to save log file.\n When logdir is None, log is not saved.\n \n [directory structure]\n \n logdir\n \n |-cv_results\n \n | |-{model_id}.csv : search log\n \n | ...\n\n |-cv_results_graph\n \n | |-{model_id}.html : search log(graph)\n \n | ...\n \n |-estimators_{model_id}\n \n |-{model_id}_index{search count}_split{fold count}.pkl: an estimator which is fitted fold train data\n \n ...\n \n |-{model_id}_index{search count}_test.pkl : an estimator which is fitted whole train data.\n\n save_estimator: int, default=0.\n estimator save setting.\n \n 0: An estimator is not saved.\n \n 1: An estimator which is fitted fold train data is saved per cv-fold.\n \n 2: In addition to 1, an estimator which is fitted whole train data is saved per cv.\n\n saver: str or function, default=\"sklearn\".\n estimator`s saver.\n \n * `sklearn`: use `sklearn.externals.joblib.dump`. Basically for scikit-learn.\n\n * function: function whose variable are model class and save path.\n\n Examples\n --------\n >>> def saver(model, path):\n >>> save_model(model, path+\".h5\")\n\n model_id: str or None, default=None.\n This is used to log filename.\n When model_id is None, this is generated by date time.\n\n cloner: str or function, default=\"sklearn\".\n estimator`s cloner.\n \n * `sklearn`: use try:`sklearn.base.clone`, except:`copy.deepcopy`. Basically for scikit-learn.\n\n * function: function whose variable is model.\n\n Examples\n --------\n >>> def cloner(model):\n >>> clone_model(model)\n\n refit: bool, default=True.\n Refit an estimator using the best found parameters on all train data(=X).\n\n backend: str, default=\"hyperopt\".\n backend optimeizer. Supports the following back ends.\n\n * `hyperopt`: Sequential Model Based Global Optimization\n\n * `bayesopt`: Bayesian Optimization\n\n * `gaopt`: Genetic Algorithm\n\n * `randomopt`: Random Search\n\n Attributes\n ----------\n cv_results_ : dict of numpy (masked) ndarrays\n A dict with keys as column headers and values as columns, that can be\n imported into a pandas ``DataFrame``.\n\n best_estimator_ : estimator or dict\n Estimator that was chosen by the search.\n\n best_score_ : float\n Cross-validated score of the best_estimator.\n\n best_params_ : dict\n Parameter setting that gave the best results on the hold out data.\n \"\"\" \n def __init__(self, estimator, param_distributions, \n scoring=None, cv=5, max_iter=32, \n random_state=None, n_jobs=1, pre_dispatch=\"2*n_jobs\", \n verbose=0, logdir=None, save_estimator=0, saver=\"sklearn\", model_id=None, \n cloner=\"sklearn\", refit=True, backend=\"hyperopt\", **kwargs): \n if backend == \"hyperopt\":\n self.optcv = HyperoptCV(estimator, param_distributions, \n scoring=scoring, cv=cv, max_iter=max_iter, random_state=random_state, \n n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose, logdir=logdir, \n save_estimator=save_estimator, saver=saver, model_id=model_id, \n cloner=cloner, refit=refit, \n **kwargs)\n elif backend == \"bayesopt\":\n self.optcv = BayesoptCV(estimator, param_distributions, \n scoring=scoring, cv=cv, max_iter=max_iter, random_state=random_state, \n n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose, logdir=logdir, \n save_estimator=save_estimator, saver=saver, model_id=model_id, refit=refit, \n cloner=cloner, \n **kwargs)\n elif backend == \"gaopt\":\n self.optcv = GAoptCV(estimator, param_distributions, \n scoring=scoring, cv=cv, max_iter=max_iter, random_state=random_state, \n n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose, logdir=logdir, \n save_estimator=save_estimator, saver=saver, model_id=model_id, refit=refit, \n cloner=cloner, \n **kwargs)\n elif backend == \"randomopt\":\n self.optcv = RandomoptCV(estimator, param_distributions, \n scoring=scoring, cv=cv, max_iter=max_iter, random_state=random_state, \n n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose, logdir=logdir, \n save_estimator=save_estimator, saver=saver, model_id=model_id, refit=refit, \n cloner=cloner, \n **kwargs)\n elif backend == \"hyperbandopt\":\n self.optcv = HyperbandoptCV(estimator, param_distributions, \n scoring=scoring, cv=cv, max_iter=max_iter, random_state=random_state, \n n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose, logdir=logdir, \n save_estimator=save_estimator, saver=saver, model_id=model_id, refit=refit, \n cloner=cloner, \n **kwargs)\n else:\n raise Exception(\"`backend` \"+str(backend)+\" is not supported.\")\n\n self.backend = backend\n \n def __getattr__(self, name):\n if name.startswith('__') and name.endswith('__'):\n return super().__getattr__(name)\n return getattr(self.optcv, name)\n\nclass HyperoptCV(BaseSearcher):\n \"\"\"\n Cross validation optimize by Hyperopt(Sequential Model Based Global Optimization).\n\n Parameters\n ----------\n estimator\n scikit-learn estimator like.\n\n param_distributions: dict.\n Search space.\n\n scoring: string or sklearn.metrics.make_scorer.\n Evaluation index of search.\n When scoring is None, use stimator default scorer and this score greater is better.\n \n cv: scikit-learn cross-validator or int(number of folds), default=5.\n Cross validation setting.\n\n max_iter: int, default=32.\n Number of search.\n\n random_state: int or None, default=None.\n The seed used by the random number generator.\n\n n_jobs: int, default=1.\n Number of jobs to run in parallel.\n\n pre_dispatch: int or string, default=\"2*n_jobs\".\n Controls the number of jobs that get dispatched during parallel.\n\n verbose: int(0, 1 or 2), default=0.\n Controls the verbosity\n \n 0: don't display status.\n\n 1: display status by stdout.\n \n 2: display status by graph.\n\n logdir: str or None, default=None.\n Path of directory to save log file.\n When logdir is None, log is not saved.\n \n [directory structure]\n \n logdir\n \n |-cv_results\n \n | |-{model_id}.csv : search log\n \n | ...\n\n |-cv_results_graph\n \n | |-{model_id}.html : search log(graph)\n \n | ...\n \n |-estimators_{model_id}\n \n |-{model_id}_index{search count}_split{fold count}.pkl: an estimator which is fitted fold train data\n \n ...\n \n |-{model_id}_index{search count}_test.pkl : an estimator which is fitted whole train data.\n\n save_estimator: int, default=0.\n estimator save setting.\n \n 0: An estimator is not saved.\n \n 1: An estimator which is fitted fold train data is saved per cv-fold.\n \n 2: In addition to 1, an estimator which is fitted whole train data is saved per cv.\n\n saver: str or function, default=\"sklearn\".\n estimator`s saver.\n \n * `sklearn`: use `sklearn.externals.joblib.dump`. Basically for scikit-learn.\n\n * function: function whose variable are model class and save path.\n\n Examples\n --------\n >>> def saver(model, path):\n >>> save_model(model, path+\".h5\")\n\n model_id: str or None, default=None.\n This is used to log filename.\n When model_id is None, this is generated by date time.\n\n cloner: str or function, default=\"sklearn\".\n estimator`s cloner.\n \n * `sklearn`: use try:`sklearn.base.clone`, except:`copy.deepcopy`. Basically for scikit-learn.\n\n * function: function whose variable is model.\n\n Examples\n --------\n >>> def cloner(model):\n >>> clone_model(model)\n\n refit: bool, default=True.\n Refit an estimator using the best found parameters on all train data(=X).\n\n algo: hyperopt search algorithm class, default=tpe.suggest.\n Hyperopt's parameter. Search algorithm.\n\n Attributes\n ----------\n cv_results_ : dict of numpy (masked) ndarrays\n A dict with keys as column headers and values as columns, that can be\n imported into a pandas ``DataFrame``.\n\n best_estimator_ : estimator or dict\n Estimator that was chosen by the search.\n\n best_score_ : float\n Cross-validated score of the best_estimator.\n\n best_params_ : dict\n Parameter setting that gave the best results on the hold out data.\n \"\"\"\n def __init__(self, estimator, param_distributions, \n scoring=None, cv=5, max_iter=32, \n random_state=None, n_jobs=1, pre_dispatch=\"2*n_jobs\", \n verbose=0, logdir=None, save_estimator=0, saver=\"sklearn\", model_id=None, refit=True, \n cloner=\"sklearn\", algo=tpe.suggest):\n super().__init__(estimator=estimator, param_distributions=param_distributions, \n scoring=scoring, cv=cv, n_jobs=n_jobs, pre_dispatch=pre_dispatch, \n verbose=verbose, logdir=logdir, save_estimator=save_estimator, saver=saver, \n model_id=model_id, refit=refit, cloner=\"sklearn\", backend=\"hyperopt\")\n\n self.max_iter = max_iter\n self.algo = algo\n if random_state is None:\n self.random_state = random_state\n else:\n self.random_state = np.random.RandomState(int(random_state))\n self.search_algo = \"hyperopt\"\n\n def fit(self, X, y=None, validation_data=None, groups=None, \n feature_groups=None, min_n_features=2, *args, **kwargs):\n \"\"\"\n Run fit.\n\n Parameters\n ---------- \n X :numpy.array, pandas.DataFrame or scipy.sparse, shape(axis=0) = (n_samples)\n Features. Detail depends on estimator.\n\n y: np.ndarray or pd.core.frame.DataFrame, shape(axis=0) = (n_samples) or None, default=None.\n Target variable. detail depends on estimator.\n\n validation_data: tuple(X, y) or None, default=None.\n Data to compute validation score. detail depends on estimator.\n When validation_data is None, computing validation score is not run.\n\n groups: array-like, shape = (n_samples,) or None, default=None.\n Group labels for the samples used while splitting the dataset into train/test set.\n (input of scikit-learn cross-validator)\n\n feature_groups: array-like, shape = (n_samples,) or None, default=None.\n Group labels for the features used while fearture select.\n When feature_groups is None, fearture selection is not run.\n\n When feature_group's value is -1, this group's features always are used.\n\n min_n_features: int, default=2.\n When number of X's feature cols is less than min_n_features, return search failure.\n \n e.g. If estimator has columns sampling function, use this option to avoid X become too small and error.\n \"\"\"\n X, y, Xvalid, yvalid, cv, param_distributions = self._preproc_fit(X=X, y=y, validation_data=validation_data, feature_groups=feature_groups)\n\n obj = mk_objfunc(X=X, y=y, groups=groups, feature_groups=feature_groups, feature_axis=BaseSearcher.feature_axis, \n estimator=self.estimator, scoring=self.scoring, cv=cv, \n param_distributions=param_distributions, backend=self.backend, failedscore=np.nan, \n saver=self.saver, cloner=self._cloner, score_summarizer=BaseSearcher.score_summarizer, \n Xvalid=Xvalid, yvalid=yvalid, n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch, \n cvsummarizer=self._cvs, save_estimator=self.save_estimator, min_n_features=min_n_features)\n\n try :\n fmin(obj, param_distributions, algo=self.algo, max_evals=self.max_iter, rstate=self.random_state, *args, **kwargs)\n except KeyboardInterrupt:\n pass\n\n self._postproc_fit(X=X, y=y, feature_groups=feature_groups, \n best_params=self._cvs.best_params_, best_score=self._cvs.best_score_)\n return self\n\n\n\nclass BayesoptCV(BaseSearcher):\n \"\"\"\n Cross validation optimizer by Gpyopt.BayesianOptimization.\n\n Parameters\n ----------\n estimator\n scikit-learn estimator like.\n\n param_distributions: dict.\n Search space.\n\n scoring: string or sklearn.metrics.make_scorer.\n Evaluation index of search.\n When scoring is None, use stimator default scorer and this score greater is better.\n \n cv: scikit-learn cross-validator or int(number of folds), default=5.\n Cross validation setting.\n\n max_iter: int, default=32.\n Number of search.\n\n random_state: int or None, default=None.\n The seed used by the random number generator.\n\n n_jobs: int, default=1.\n Number of jobs to run in parallel.\n\n pre_dispatch: int or string, default=\"2*n_jobs\".\n Controls the number of jobs that get dispatched during parallel.\n\n verbose: int(0, 1 or 2), default=0.\n Controls the verbosity\n \n 0: don't display status.\n\n 1: display status by stdout.\n \n 2: display status by graph.\n\n logdir: str or None, default=None.\n Path of directory to save log file.\n When logdir is None, log is not saved.\n \n [directory structure]\n \n logdir\n \n |-cv_results\n \n | |-{model_id}.csv : search log\n \n | ...\n\n |-cv_results_graph\n \n | |-{model_id}.html : search log(graph)\n \n | ...\n \n |-estimators_{model_id}\n \n |-{model_id}_index{search count}_split{fold count}.pkl: an estimator which is fitted fold train data\n \n ...\n \n |-{model_id}_index{search count}_test.pkl : an estimator which is fitted whole train data.\n\n save_estimator: int, default=0.\n estimator save setting.\n \n 0: An estimator is not saved.\n \n 1: An estimator which is fitted fold train data is saved per cv-fold.\n \n 2: In addition to 1, an estimator which is fitted whole train data is saved per cv.\n\n saver: str or function, default=\"sklearn\".\n estimator`s saver.\n \n * `sklearn`: use `sklearn.externals.joblib.dump`. Basically for scikit-learn.\n\n * function: function whose variable are model class and save path.\n\n Examples\n --------\n >>> def saver(model, path):\n >>> save_model(model, path+\".h5\")\n\n model_id: str or None, default=None.\n This is used to log filename.\n When model_id is None, this is generated by date time.\n\n cloner: str or function, default=\"sklearn\".\n estimator`s cloner.\n \n * `sklearn`: use try:`sklearn.base.clone`, except:`copy.deepcopy`. Basically for scikit-learn.\n\n * function: function whose variable is model.\n\n Examples\n --------\n >>> def cloner(model):\n >>> clone_model(model)\n\n refit: bool, default=True.\n Refit an estimator using the best found parameters on all train data(=X).\n\n max_time: float, default=numpy.inf.\n GpyOpt`s parameter. Maximum exploration horizon in seconds.\n\n model_type: str, default=\"GP\".\n GpyOpt`s parameter. Type of model to use as surrogate.\n\n * 'GP', standard Gaussian process.\n\n * 'GP_MCMC', Gaussian process with prior in the hyper-parameters.\n\n * 'sparseGP', sparse Gaussian process.\n\n * 'warperdGP', warped Gaussian process.\n\n * 'InputWarpedGP', input warped Gaussian process\n\n * 'RF', random forest (scikit-learn).\n\n initial_params: numpy.array or None, default=None.\n GpyOpt`s parameter. Initial inputs of the Gpy model.\n\n initial_score: numpy.array or None, default=None.\n GpyOpt`s parameter. Initial outputs of the Gpy model.\n\n initial_design_numdata: int, default=5.\n GpyOpt`s parameter. Number of initial points that are collected jointly before start running the optimization.\n\n initial_design_type: str, default=\"random\".\n GpyOpt`s parameter. Type of initial design.\n\n * 'random', to collect points in random locations.\n\n * 'latin', to collect points in a Latin hypercube (discrete variables are sampled randomly.)\n\n acquisition_type: str, default=\"EI\".\n GpyOpt`s parameter. Type of acquisition function to use.\n\n * 'EI', expected improvement.\n\n * 'EI_MCMC', integrated expected improvement (requires GP_MCMC model).\n\n * 'MPI', maximum probability of improvement.\n\n * 'MPI_MCMC', maximum probability of improvement (requires GP_MCMC model).\n\n * 'LCB', GP-Lower confidence bound.\n\n * 'LCB_MCMC', integrated GP-Lower confidence bound (requires GP_MCMC model).\n\n normalize_Y: bool, default=True.\n GpyOpt`s parameter. Whether to normalize the outputs before performing any optimization.\n\n exact_feval: bool, default=False.\n GpyOpt`s parameter. Whether the outputs are exact.\n\n acquisition_optimizer_type: str. default=\"lbfgs\".\n GpyOpt`s parameter. Type of acquisition function to use.\n\n * 'lbfgs': L-BFGS.\n\n * 'DIRECT': Dividing Rectangles.\n\n * 'CMA': covariance matrix adaptation.\n\n model_update_interval: int. default=1.\n GpyOpt`s parameter. Interval of collected observations after which the model is updated.\n\n evaluator_type: str, default=\"sequential\".\n GpyOpt`s parameter. Determines the way the objective is evaluated (all methods are equivalent if the batch size is one).\n\n * 'sequential', sequential evaluations.\n\n * 'random': synchronous batch that selects the first element as in a sequential policy and the rest randomly.\n\n * 'local_penalization': batch method proposed in (Gonzalez et al. 2016).\n\n * 'thompson_sampling': batch method using Thompson sampling.\n\n batch_size: int, default=1. \n GpyOpt`s parameter. Size of the batch in which the objective is evaluated.\n\n Attributes\n ----------\n cv_results_ : dict of numpy (masked) ndarrays\n A dict with keys as column headers and values as columns, that can be\n imported into a pandas ``DataFrame``.\n\n best_estimator_ : estimator or dict\n Estimator that was chosen by the search.\n\n best_score_ : float\n Cross-validated score of the best_estimator.\n\n best_params_ : dict\n Parameter setting that gave the best results on the hold out data.\n \"\"\"\n\n def __init__(self, estimator, param_distributions, \n scoring=None, cv=5, max_iter=32, \n random_state=None, n_jobs=1, pre_dispatch=\"2*n_jobs\", \n verbose=0, logdir=None, save_estimator=0, saver=\"sklearn\", model_id=None, \n cloner=\"sklearn\", refit=True, \n max_time=np.inf, model=None, model_type=\"GP\", initial_params=None, initial_score=None, \n initial_design_numdata=5, initial_design_type=\"random\", \n acquisition_type=\"EI\", normalize_Y=True, exact_feval=False, \n acquisition_optimizer_type=\"lbfgs\", model_update_interval=1, \n evaluator_type=\"sequential\", batch_size=1,modelXargs={},customFun=lambda a:None,**blabla):\n\n super().__init__(estimator=estimator, param_distributions=param_distributions, \n scoring=scoring, cv=cv, n_jobs=n_jobs, pre_dispatch=pre_dispatch, \n verbose=verbose, logdir=logdir, save_estimator=save_estimator, saver=saver, \n model_id=model_id, cloner= cloner, refit=refit, backend=\"bayesopt\")\n \n self.random_state = random_state\n self.max_iter = max_iter\n self.max_time = max_time\n self.model_type = model_type\n self.model = model\n self.initial_params = initial_params\n self.initial_score = initial_score\n self.initial_design_numdata = initial_design_numdata\n self.initial_design_type = initial_design_type\n self.acquisition_type = acquisition_type\n self.normalize_Y = normalize_Y\n self.exact_feval = exact_feval\n self.acquisition_optimizer_type = acquisition_optimizer_type\n self.model_update_interval = model_update_interval\n self.evaluator_type = evaluator_type\n self.batch_size = batch_size\n self.blabla=blabla\n self.failedscore = None\n self.modelXargs=modelXargs\n self.search_algo = \"bayesopt\"\n self.acquisition_optimizer=None\n self.find_model()\n customFun(self)\n # self.check_acq_opimizer()\n\n def find_model(self):\n if self.model is None and self.model_type in [\"RF\",\"ET\",\"GBRT\"]:\n if self.model_type in [\"RF\"]:\n self.model = RFModel\n elif self.model_type in [\"ET\"]:\n self.model = ETModel\n elif self.model_type in [\"GBRT\"]:\n self.model=GBRTModel\n\n self.model=self.model(**self.modelXargs)\n self.model_type=None\n self.acquisition_optimizer_type = \"sampling\"\n self.acquisition_optimizer=AcquisitionOptimizer2\n\n\n def fit(self, X, y=None, validation_data=None, groups=None, \n feature_groups=None, min_n_features=2, methodArgs={}, *args, **kwargs):\n \"\"\"\n Run fit.\n\n Parameters\n ---------- \n X :numpy.array, pandas.DataFrame or scipy.sparse, shape(axis=0) = (n_samples)\n Features. Detail depends on estimator.\n\n y: np.ndarray or pd.core.frame.DataFrame, shape(axis=0) = (n_samples) or None, default=None.\n Target variable. detail depends on estimator.\n\n validation_data: tuple(X, y) or None, default=None.\n Data to compute validation score. detail depends on estimator.\n When validation_data is None, computing validation score is not run.\n\n groups: array-like, shape = (n_samples,) or None, default=None.\n Group labels for the samples used while splitting the dataset into train/test set.\n (input of scikit-learn cross-validator)\n\n feature_groups: array-like, shape = (n_samples,) or None, default=None.\n Group labels for the features used while fearture select.\n When feature_groups is None, fearture selection is not run.\n\n When feature_group's value is -1, this group's features always are used.\n\n min_n_features: int, default=2.\n When number of X's feature cols is less than min_n_features, return search failure.\n \n e.g. If estimator has columns sampling function, use this option to avoid X become too small and error.\n \"\"\"\n X, y, Xvalid, yvalid, cv, param_distributions = self._preproc_fit(X=X, y=y, validation_data=validation_data, feature_groups=feature_groups)\n np.random.seed(self.random_state)\n\n if self.failedscore is None:\n # If search is failed, Return random score.\n # random score is fixed at first fit.\n self.failedscore = self._random_scoring(X, y)\n\n obj = mk_objfunc(X=X, y=y, groups=groups, feature_groups=feature_groups, feature_axis=BaseSearcher.feature_axis, \n estimator=self.estimator, scoring=self.scoring, cv=cv, \n param_distributions=param_distributions, backend=self.backend, failedscore=self.failedscore, \n saver=self.saver, cloner=self._cloner, score_summarizer=BaseSearcher.score_summarizer, \n Xvalid=Xvalid, yvalid=yvalid, n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch, \n cvsummarizer=self._cvs, save_estimator=self.save_estimator, min_n_features=min_n_features)\n self.opt = BayesianOptimization(obj, domain=param_distributions, constraints=None, cost_withGradients=None, \n model_type=self.model_type,model=self.model, X=self.initial_params, Y=self.initial_score,\n initial_design_numdata=self.initial_design_numdata, \n initial_design_type=self.initial_design_type, \n acquisition_type=self.acquisition_type, normalize_Y=self.normalize_Y,\n exact_feval=self.exact_feval, acquisition_optimizer_type=self.acquisition_optimizer_type, \n model_update_interval=self.model_update_interval, evaluator_type=self.evaluator_type, \n batch_size=self.batch_size, num_cores=self.n_jobs, verbosity=False, verbosity_model=False, \n maximize=False, de_duplication=False,**self.blabla,**methodArgs) \n\n if self.acquisition_optimizer is not None:\n self.opt.cost = CostModel(None)\n self.opt.acquisition_optimizer = self.acquisition_optimizer(self.opt.space, self.opt.acquisition_optimizer_type, model=self.opt.model ) ## more arguments may come here\n \n if 'acquisition' not in methodArgs and \"acquisition\" not in self.blabla:\n self.opt.acquisition = self.opt._acquisition_chooser()\n self.opt.evaluator = self.opt._evaluator_chooser()\n super(BayesianOptimization,self.opt).__init__( model = self.opt.model,\n space = self.opt.space,\n objective = self.opt.objective,\n acquisition = self.opt.acquisition,\n evaluator = self.opt.evaluator,\n X_init = self.opt.X,\n Y_init = self.opt.Y,\n cost = self.opt.cost,\n normalize_Y = self.opt.normalize_Y,\n model_update_interval = self.opt.model_update_interval,\n de_duplication = self.opt.de_duplication)\n\n try :\n self.opt.run_optimization(max_iter=self.max_iter, max_time=self.max_time, *args, **kwargs)\n except KeyboardInterrupt:\n pass\n \n self._postproc_fit(X=X, y=y, feature_groups=feature_groups, \n best_params=self._cvs.best_params_, best_score=self._cvs.best_score_)\n return self\n\n\nclass GAoptCV(BaseSearcher):\n \"\"\"\n Cross validation optimizer by Genetic Algorithm.\n\n Parameters\n ----------\n estimator\n scikit-learn estimator like.\n\n param_distributions: dict.\n Search space.\n\n scoring: string or sklearn.metrics.make_scorer.\n Evaluation index of search.\n When scoring is None, use stimator default scorer and this score greater is better.\n \n cv: scikit-learn cross-validator or int(number of folds), default=5.\n Cross validation setting.\n\n max_iter: int, default=32.\n Number of search.\n\n random_state: int or None, default=None.\n The seed used by the random number generator.\n\n n_jobs: int, default=1.\n Number of jobs to run in parallel.\n\n pre_dispatch: int or string, default=\"2*n_jobs\".\n Controls the number of jobs that get dispatched during parallel.\n\n verbose: int(0, 1 or 2), default=0.\n Controls the verbosity\n \n 0: don't display status.\n\n 1: display status by stdout.\n \n 2: display status by graph.\n\n logdir: str or None, default=None.\n Path of directory to save log file.\n When logdir is None, log is not saved.\n \n [directory structure]\n \n logdir\n \n |-cv_results\n \n | |-{model_id}.csv : search log\n \n | ...\n\n |-cv_results_graph\n \n | |-{model_id}.html : search log(graph)\n \n | ...\n \n |-estimators_{model_id}\n \n |-{model_id}_index{search count}_split{fold count}.pkl: an estimator which is fitted fold train data\n \n ...\n \n |-{model_id}_index{search count}_test.pkl : an estimator which is fitted whole train data.\n\n save_estimator: int, default=0.\n estimator save setting.\n \n 0: An estimator is not saved.\n \n 1: An estimator which is fitted fold train data is saved per cv-fold.\n \n 2: In addition to 1, an estimator which is fitted whole train data is saved per cv.\n\n saver: str or function, default=\"sklearn\".\n estimator`s saver.\n \n * `sklearn`: use `sklearn.externals.joblib.dump`. Basically for scikit-learn.\n\n * function: function whose variable are model class and save path.\n\n Examples\n --------\n >>> def saver(model, path):\n >>> save_model(model, path+\".h5\")\n\n model_id: str or None, default=None.\n This is used to log filename.\n When model_id is None, this is generated by date time.\n\n cloner: str or function, default=\"sklearn\".\n estimator`s cloner.\n \n * `sklearn`: use try:`sklearn.base.clone`, except:`copy.deepcopy`. Basically for scikit-learn.\n\n * function: function whose variable is model.\n\n Examples\n --------\n >>> def cloner(model):\n >>> clone_model(model)\n\n refit: bool, default=True.\n Refit an estimator using the best found parameters on all train data(=X).\n\n iter_pergeneration: int, default=8.\n Genetic algorithm's parameter. Number of iteration per generation (it corresponds to number of population.).\n\n param_crossover_proba: float or function, default=0.5.\n Genetic algorithm's parameter. Probability which a certain parameter becomes another parent value.\n\n If this value 0 or 1, paramaters is not changed by crossover.\n\n Function whose variable is number of generation could be passed to this variable.\n Number of generation' s start is 0. But create population by random sampling in generation 0, so this function is used from generation 1.\n\n Examples\n --------\n >>> def f(generaion):\n >>> return 0.5 / generaion\n\n param_mutation_proba: float or function, default=0.01.\n Genetic algorithm's parameter. Probability which a certain parameter is mutated.\n\n Function whose variable is number of generation Could be passed to this variable.\n\n random_sampling_proba: float or function, default=0.01.\n Genetic algorithm's parameter. In a certain generation, probability which individual is created by random sampling.\n\n Function whose variable is number of generation Could be passed to this variable.\n\n Attributes\n ----------\n cv_results_ : dict of numpy (masked) ndarrays\n A dict with keys as column headers and values as columns, that can be\n imported into a pandas ``DataFrame``.\n\n best_estimator_ : estimator or dict\n Estimator that was chosen by the search.\n\n best_score_ : float\n Cross-validated score of the best_estimator.\n\n best_params_ : dict\n Parameter setting that gave the best results on the hold out data.\n \"\"\"\n def __init__(self, estimator, param_distributions, \n scoring=None, cv=5, max_iter=32, \n random_state=None, n_jobs=1, pre_dispatch=\"2*n_jobs\", \n verbose=0, logdir=None, save_estimator=0, saver=\"sklearn\", model_id=None, \n cloner=\"sklearn\", refit=True, \n iter_pergeneration=8, param_crossover_proba=0.5, param_mutation_proba=0.01, \n random_sampling_proba=0.01):\n super().__init__(estimator=estimator, param_distributions=param_distributions, \n scoring=scoring, cv=cv, n_jobs=n_jobs, pre_dispatch=pre_dispatch, \n verbose=verbose, logdir=logdir, save_estimator=save_estimator, saver=saver, \n model_id=model_id, cloner=cloner, refit=refit, backend=\"gaopt\")\n\n self.max_iter = max_iter\n self.iter_pergeneration = iter_pergeneration\n self.param_crossover_proba = param_crossover_proba\n self.param_mutation_proba = param_mutation_proba\n self.random_sampling_proba = random_sampling_proba\n if random_state is None:\n self.random_state = random_state\n else:\n self.random_state = np.random.RandomState(int(random_state))\n self.search_algo = \"gaopt\"\n\n def fit(self, X, y=None, validation_data=None, groups=None, \n feature_groups=None, min_n_features=2, *args, **kwargs):\n \"\"\"\n Run fit.\n\n Parameters\n ---------- \n X :numpy.array, pandas.DataFrame or scipy.sparse, shape(axis=0) = (n_samples)\n Features. Detail depends on estimator.\n\n y: np.ndarray or pd.core.frame.DataFrame, shape(axis=0) = (n_samples) or None, default=None.\n Target variable. detail depends on estimator.\n\n validation_data: tuple(X, y) or None, default=None.\n Data to compute validation score. detail depends on estimator.\n When validation_data is None, computing validation score is not run.\n\n groups: array-like, shape = (n_samples,) or None, default=None.\n Group labels for the samples used while splitting the dataset into train/test set.\n (input of scikit-learn cross-validator)\n\n feature_groups: array-like, shape = (n_samples,) or None, default=None.\n Group labels for the features used while fearture select.\n When feature_groups is None, fearture selection is not run.\n\n When feature_group's value is -1, this group's features always are used.\n\n min_n_features: int, default=2.\n When number of X's feature cols is less than min_n_features, return search failure.\n \n e.g. If estimator has columns sampling function, use this option to avoid X become too small and error.\n \"\"\"\n X, y, Xvalid, yvalid, cv, param_distributions = self._preproc_fit(X=X, y=y, validation_data=validation_data, feature_groups=feature_groups)\n np.random.seed(self.random_state)\n\n obj = mk_objfunc(X=X, y=y, groups=groups, feature_groups=feature_groups, feature_axis=BaseSearcher.feature_axis, \n estimator=self.estimator, scoring=self.scoring, cv=cv, \n param_distributions=param_distributions, backend=self.backend, failedscore=np.nan, \n saver=self.saver, cloner=self._cloner, score_summarizer=BaseSearcher.score_summarizer, \n Xvalid=Xvalid, yvalid=yvalid, n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch, \n cvsummarizer=self._cvs, save_estimator=self.save_estimator, min_n_features=min_n_features)\n\n try :\n gamin(obj, param_distributions, max_iter=self.max_iter, iter_pergeneration=self.iter_pergeneration, \n param_crossover_proba=self.param_crossover_proba, param_mutation_proba=self.param_mutation_proba, \n random_sampling_proba=self.random_sampling_proba, cvsummarizer=self._cvs, *args, **kwargs)\n except KeyboardInterrupt:\n pass\n\n self._postproc_fit(X=X, y=y, feature_groups=feature_groups, \n best_params=self._cvs.best_params_, best_score=self._cvs.best_score_)\n return self\n\n\n\nclass RandomoptCV(BaseSearcher):\n \"\"\"\n Cross validation optimizer by Random Search.\n\n Parameters\n ----------\n estimator\n scikit-learn estimator like.\n\n param_distributions: dict.\n Search space.\n\n scoring: string or sklearn.metrics.make_scorer.\n Evaluation index of search.\n When scoring is None, use stimator default scorer and this score greater is better.\n \n cv: scikit-learn cross-validator or int(number of folds), default=5.\n Cross validation setting.\n\n max_iter: int, default=32.\n Number of search.\n\n random_state: int or None, default=None.\n The seed used by the random number generator.\n\n n_jobs: int, default=1.\n Number of jobs to run in parallel.\n\n pre_dispatch: int or string, default=\"2*n_jobs\".\n Controls the number of jobs that get dispatched during parallel.\n\n verbose: int(0, 1 or 2), default=0.\n Controls the verbosity\n \n 0: don't display status.\n\n 1: display status by stdout.\n \n 2: display status by graph.\n\n logdir: str or None, default=None.\n Path of directory to save log file.\n When logdir is None, log is not saved.\n \n [directory structure]\n \n logdir\n \n |-cv_results\n \n | |-{model_id}.csv : search log\n \n | ...\n\n |-cv_results_graph\n \n | |-{model_id}.html : search log(graph)\n \n | ...\n \n |-estimators_{model_id}\n \n |-{model_id}_index{search count}_split{fold count}.pkl: an estimator which is fitted fold train data\n \n ...\n \n |-{model_id}_index{search count}_test.pkl : an estimator which is fitted whole train data.\n\n save_estimator: int, default=0.\n estimator save setting.\n \n 0: An estimator is not saved.\n \n 1: An estimator which is fitted fold train data is saved per cv-fold.\n \n 2: In addition to 1, an estimator which is fitted whole train data is saved per cv.\n\n saver: str or function, default=\"sklearn\".\n estimator`s saver.\n \n * `sklearn`: use `sklearn.externals.joblib.dump`. Basically for scikit-learn.\n\n * function: function whose variable are model class and save path.\n\n Examples\n --------\n >>> def saver(model, path):\n >>> save_model(model, path+\".h5\")\n\n model_id: str or None, default=None.\n This is used to log filename.\n When model_id is None, this is generated by date time.\n\n cloner: str or function, default=\"sklearn\".\n estimator`s cloner.\n \n * `sklearn`: use try:`sklearn.base.clone`, except:`copy.deepcopy`. Basically for scikit-learn.\n\n * function: function whose variable is model.\n\n Examples\n --------\n >>> def cloner(model):\n >>> clone_model(model)\n\n refit: bool, default=True.\n Refit an estimator using the best found parameters on all train data(=X).\n\n Attributes\n ----------\n cv_results_ : dict of numpy (masked) ndarrays\n A dict with keys as column headers and values as columns, that can be\n imported into a pandas ``DataFrame``.\n\n best_estimator_ : estimator or dict\n Estimator that was chosen by the search.\n\n best_score_ : float\n Cross-validated score of the best_estimator.\n\n best_params_ : dict\n Parameter setting that gave the best results on the hold out data.\n \"\"\"\n def __init__(self, estimator, param_distributions, \n scoring=None, cv=5, max_iter=32, \n random_state=None, n_jobs=1, pre_dispatch=\"2*n_jobs\", \n verbose=0, logdir=None, save_estimator=0, saver=\"sklearn\", model_id=None, \n cloner=\"sklearn\", refit=True):\n super().__init__(estimator=estimator, param_distributions=param_distributions, \n scoring=scoring, cv=cv, n_jobs=n_jobs, pre_dispatch=pre_dispatch, \n verbose=verbose, logdir=logdir, save_estimator=save_estimator, saver=saver, \n model_id=model_id, cloner=cloner, refit=refit, backend=\"gaopt\")\n\n self.max_iter = max_iter\n if random_state is None:\n self.random_state = random_state\n else:\n self.random_state = np.random.RandomState(int(random_state))\n self.search_algo = \"randomopt\"\n\n def fit(self, X, y=None, validation_data=None, groups=None, \n feature_groups=None, min_n_features=2, *args, **kwargs):\n \"\"\"\n Run fit.\n\n Parameters\n ---------- \n X :numpy.array, pandas.DataFrame or scipy.sparse, shape(axis=0) = (n_samples)\n Features. Detail depends on estimator.\n\n y: np.ndarray or pd.core.frame.DataFrame, shape(axis=0) = (n_samples) or None, default=None.\n Target variable. detail depends on estimator.\n\n validation_data: tuple(X, y) or None, default=None.\n Data to compute validation score. detail depends on estimator.\n When validation_data is None, computing validation score is not run.\n\n groups: array-like, shape = (n_samples,) or None, default=None.\n Group labels for the samples used while splitting the dataset into train/test set.\n (input of scikit-learn cross-validator)\n\n feature_groups: array-like, shape = (n_samples,) or None, default=None.\n Group labels for the features used while fearture select.\n When feature_groups is None, fearture selection is not run.\n\n When feature_group's value is -1, this group's features always are used.\n\n min_n_features: int, default=2.\n When number of X's feature cols is less than min_n_features, return search failure.\n \n e.g. If estimator has columns sampling function, use this option to avoid X become too small and error.\n \"\"\"\n X, y, Xvalid, yvalid, cv, param_distributions = self._preproc_fit(X=X, y=y, validation_data=validation_data, feature_groups=feature_groups)\n np.random.seed(self.random_state)\n\n obj = mk_objfunc(X=X, y=y, groups=groups, feature_groups=feature_groups, feature_axis=BaseSearcher.feature_axis, \n estimator=self.estimator, scoring=self.scoring, cv=cv, \n param_distributions=param_distributions, backend=self.backend, failedscore=np.nan, \n saver=self.saver, cloner=self._cloner, score_summarizer=BaseSearcher.score_summarizer, \n Xvalid=Xvalid, yvalid=yvalid, n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch, \n cvsummarizer=self._cvs, save_estimator=self.save_estimator, min_n_features=min_n_features)\n\n try :\n gamin(obj, param_distributions, max_iter=self.max_iter, iter_pergeneration=1, \n param_crossover_proba=0, param_mutation_proba=0, \n random_sampling_proba=1, cvsummarizer=self._cvs, *args, **kwargs)\n except KeyboardInterrupt:\n pass\n\n self._postproc_fit(X=X, y=y, feature_groups=feature_groups, \n best_params=self._cvs.best_params_, best_score=self._cvs.best_score_)\n return self\n\nclass HyperbandoptCV(BaseSearcher):\n \"\"\"\n Cross validation optimizer by HyperbandoptCV.\n\n Parameters\n ----------\n estimator\n scikit-learn estimator like.\n\n param_distributions: dict.\n Search space.\n\n scoring: string or sklearn.metrics.make_scorer.\n Evaluation index of search.\n When scoring is None, use stimator default scorer and this score greater is better.\n \n cv: scikit-learn cross-validator or int(number of folds), default=5.\n Cross validation setting.\n\n max_iter: int, default=32.\n Number of search.\n\n random_state: int or None, default=None.\n The seed used by the random number generator.\n\n n_jobs: int, default=1.\n Number of jobs to run in parallel.\n\n pre_dispatch: int or string, default=\"2*n_jobs\".\n Controls the number of jobs that get dispatched during parallel.\n\n verbose: int(0, 1 or 2), default=0.\n Controls the verbosity\n \n 0: don't display status.\n\n 1: display status by stdout.\n \n 2: display status by graph.\n\n logdir: str or None, default=None.\n Path of directory to save log file.\n When logdir is None, log is not saved.\n \n [directory structure]\n \n logdir\n \n |-cv_results\n \n | |-{model_id}.csv : search log\n \n | ...\n\n |-cv_results_graph\n \n | |-{model_id}.html : search log(graph)\n \n | ...\n \n |-estimators_{model_id}\n \n |-{model_id}_index{search count}_split{fold count}.pkl: an estimator which is fitted fold train data\n \n ...\n \n |-{model_id}_index{search count}_test.pkl : an estimator which is fitted whole train data.\n\n save_estimator: int, default=0.\n estimator save setting.\n \n 0: An estimator is not saved.\n \n 1: An estimator which is fitted fold train data is saved per cv-fold.\n \n 2: In addition to 1, an estimator which is fitted whole train data is saved per cv.\n\n saver: str or function, default=\"sklearn\".\n estimator`s saver.\n \n * `sklearn`: use `sklearn.externals.joblib.dump`. Basically for scikit-learn.\n\n * function: function whose variable are model class and save path.\n\n Examples\n --------\n >>> def saver(model, path):\n >>> save_model(model, path+\".h5\")\n\n model_id: str or None, default=None.\n This is used to log filename.\n When model_id is None, this is generated by date time.\n\n cloner: str or function, default=\"sklearn\".\n estimator`s cloner.\n \n * `sklearn`: use try:`sklearn.base.clone`, except:`copy.deepcopy`. Basically for scikit-learn.\n\n * function: function whose variable is model.\n\n Examples\n --------\n >>> def cloner(model):\n >>> clone_model(model)\n\n refit: bool, default=True.\n Refit an estimator using the best found parameters on all train data(=X).\n \n eta : float, default=3\n The inverse of the proportion of configurations that are discarded\n in each round of hyperband.\n Attributes\n ----------\n cv_results_ : dict of numpy (masked) ndarrays\n A dict with keys as column headers and values as columns, that can be\n imported into a pandas ``DataFrame``.\n\n best_estimator_ : estimator or dict\n Estimator that was chosen by the search.\n\n best_score_ : float\n Cross-validated score of the best_estimator.\n\n best_params_ : dict\n Parameter setting that gave the best results on the hold out data.\n \"\"\"\n def __init__(self, estimator, param_distributions, \n scoring=None, cv=5, max_iter=32, \n random_state=None, n_jobs=1, pre_dispatch=\"2*n_jobs\", \n verbose=0, logdir=None, save_estimator=0, saver=\"sklearn\", model_id=None, \n cloner=\"sklearn\", refit=True,eta=3):\n\n super().__init__(estimator=estimator, param_distributions=param_distributions, \n scoring=scoring, cv=cv, n_jobs=n_jobs, pre_dispatch=pre_dispatch, \n verbose=verbose, logdir=logdir, save_estimator=save_estimator, saver=saver, \n model_id=model_id, cloner=cloner, refit=refit, backend=\"hyperbandopt\")\n\n self.max_iter = max_iter\n self.eta=eta\n if random_state is None:\n self.random_state = random_state\n else:\n self.random_state = np.random.RandomState(int(random_state))\n self.search_algo = \"hyperbandopt\"\n\n\n def fit(self, X, y=None, validation_data=None, groups=None, \n feature_groups=None, min_n_features=2,skip_last = 0, dry_run = False , *args, **kwargs):\n \"\"\"\n Run fit.\n\n Parameters\n ---------- \n X :numpy.array, pandas.DataFrame or scipy.sparse, shape(axis=0) = (n_samples)\n Features. Detail depends on estimator.\n\n y: np.ndarray or pd.core.frame.DataFrame, shape(axis=0) = (n_samples) or None, default=None.\n Target variable. detail depends on estimator.\n\n validation_data: tuple(X, y) or None, default=None.\n Data to compute validation score. detail depends on estimator.\n When validation_data is None, computing validation score is not run.\n\n groups: array-like, shape = (n_samples,) or None, default=None.\n Group labels for the samples used while splitting the dataset into train/test set.\n (input of scikit-learn cross-validator)\n\n feature_groups: array-like, shape = (n_samples,) or None, default=None.\n Group labels for the features used while fearture select.\n When feature_groups is None, fearture selection is not run.\n\n When feature_group's value is -1, this group's features always are used.\n\n min_n_features: int, default=2.\n When number of X's feature cols is less than min_n_features, return search failure.\n \n e.g. If estimator has columns sampling function, use this option to avoid X become too small and error.\n\n skip_last : int, default=0\n The number of last rounds to skip. For example, this can be used\n to skip the last round of hyperband, which is standard randomized\n search. \n dry_run = False \n \"\"\"\n X, y, Xvalid, yvalid, cv, param_distributions = self._preproc_fit(X=X, y=y, validation_data=validation_data, feature_groups=feature_groups)\n np.random.seed(self.random_state)\n\n\n obj = mk_objfunc(X=X, y=y, groups=groups, feature_groups=feature_groups, feature_axis=BaseSearcher.feature_axis, \n estimator=self.estimator, scoring=self.scoring, cv=cv, \n param_distributions=param_distributions, backend=self.backend, failedscore=np.nan, \n saver=self.saver, cloner=self._cloner, score_summarizer=BaseSearcher.score_summarizer, \n Xvalid=Xvalid, yvalid=yvalid, n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch, \n cvsummarizer=self._cvs, save_estimator=self.save_estimator, min_n_features=min_n_features)\n\n\n\n self.opt = Hyperband(lambda: sample(param_distributions),\n lambda nb,params: obj(params),max_iter=self.max_iter,eta=self.eta,verbose=self.verbose)\n \n try :\n self.opt.run(X,y, skip_last=skip_last, dry_run=dry_run)\n # gamin(obj, param_distributions, max_iter=self.max_iter, iter_pergeneration=1, \n # param_crossover_proba=0, param_mutation_proba=0, \n # random_sampling_proba=1, cvsummarizer=self._cvs, *args, **kwargs)\n except KeyboardInterrupt:\n pass\n\n self._postproc_fit(X=X, y=y, feature_groups=feature_groups, \n best_params=self._cvs.best_params_, best_score=self._cvs.best_score_)\n return self\n", "### SOURCE FROM scipy-optimize\n\n\nimport numpy as np\nfrom sklearn.ensemble import RandomForestRegressor as _sk_RandomForestRegressor\nfrom sklearn.ensemble import ExtraTreesRegressor as _sk_ExtraTreesRegressor\nfrom GPyOpt.models.base import BOModel\n\ndef _return_std(X, trees, predictions, min_variance):\n \"\"\"\n Returns `std(Y | X)`.\n Can be calculated by E[Var(Y | Tree)] + Var(E[Y | Tree]) where\n P(Tree) is `1 / len(trees)`.\n Parameters\n ----------\n X : array-like, shape=(n_samples, n_features)\n Input data.\n trees : list, shape=(n_estimators,)\n List of fit sklearn trees as obtained from the ``estimators_``\n attribute of a fit RandomForestRegressor or ExtraTreesRegressor.\n predictions : array-like, shape=(n_samples,)\n Prediction of each data point as returned by RandomForestRegressor\n or ExtraTreesRegressor.\n Returns\n -------\n std : array-like, shape=(n_samples,)\n Standard deviation of `y` at `X`. If criterion\n is set to \"mse\", then `std[i] ~= std(y | X[i])`.\n \"\"\"\n # This derives std(y | x) as described in 4.3.2 of arXiv:1211.0906\n std = np.zeros(len(X))\n\n for tree in trees:\n var_tree = tree.tree_.impurity[tree.apply(X)]\n\n # This rounding off is done in accordance with the\n # adjustment done in section 4.3.3\n # of http://arxiv.org/pdf/1211.0906v2.pdf to account\n # for cases such as leaves with 1 sample in which there\n # is zero variance.\n var_tree[var_tree < min_variance] = min_variance\n mean_tree = tree.predict(X)\n std += var_tree + mean_tree ** 2\n\n std /= len(trees)\n std -= predictions ** 2.0\n std[std < 0.0] = 0.0\n std = std ** 0.5\n return std\n\n\nclass RandomForestRegressor(_sk_RandomForestRegressor):\n \"\"\"\n RandomForestRegressor that supports conditional std computation.\n Parameters\n ----------\n n_estimators : integer, optional (default=10)\n The number of trees in the forest.\n criterion : string, optional (default=\"mse\")\n The function to measure the quality of a split. Supported criteria\n are \"mse\" for the mean squared error, which is equal to variance\n reduction as feature selection criterion, and \"mae\" for the mean\n absolute error.\n max_features : int, float, string or None, optional (default=\"auto\")\n The number of features to consider when looking for the best split:\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a percentage and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=n_features`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n .. note::\n The search for a split does not stop until at least one\n valid partition of the node samples is found, even if it\n requires to effectively inspect more than ``max_features``\n features.\n max_depth : integer or None, optional (default=None)\n The maximum depth of the tree. If None, then nodes are expanded until\n all leaves are pure or until all leaves contain less than\n min_samples_split samples.\n min_samples_split : int, float, optional (default=2)\n The minimum number of samples required to split an internal node:\n - If int, then consider `min_samples_split` as the minimum number.\n - If float, then `min_samples_split` is a percentage and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n min_samples_leaf : int, float, optional (default=1)\n The minimum number of samples required to be at a leaf node:\n - If int, then consider `min_samples_leaf` as the minimum number.\n - If float, then `min_samples_leaf` is a percentage and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n min_weight_fraction_leaf : float, optional (default=0.)\n The minimum weighted fraction of the sum total of weights (of all\n the input samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided.\n max_leaf_nodes : int or None, optional (default=None)\n Grow trees with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n min_impurity_decrease : float, optional (default=0.)\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n The weighted impurity decrease equation is the following::\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n where ``N`` is the total number of samples, ``N_t`` is the number of\n samples at the current node, ``N_t_L`` is the number of samples in the\n left child, and ``N_t_R`` is the number of samples in the right child.\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n bootstrap : boolean, optional (default=True)\n Whether bootstrap samples are used when building trees.\n oob_score : bool, optional (default=False)\n whether to use out-of-bag samples to estimate\n the R^2 on unseen data.\n n_jobs : integer, optional (default=1)\n The number of jobs to run in parallel for both `fit` and `predict`.\n If -1, then the number of jobs is set to the number of cores.\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n verbose : int, optional (default=0)\n Controls the verbosity of the tree building process.\n warm_start : bool, optional (default=False)\n When set to ``True``, reuse the solution of the previous call to fit\n and add more estimators to the ensemble, otherwise, just fit a whole\n new forest.\n Attributes\n ----------\n estimators_ : list of DecisionTreeRegressor\n The collection of fitted sub-estimators.\n feature_importances_ : array of shape = [n_features]\n The feature importances (the higher, the more important the feature).\n n_features_ : int\n The number of features when ``fit`` is performed.\n n_outputs_ : int\n The number of outputs when ``fit`` is performed.\n oob_score_ : float\n Score of the training dataset obtained using an out-of-bag estimate.\n oob_prediction_ : array of shape = [n_samples]\n Prediction computed with out-of-bag estimate on the training set.\n Notes\n -----\n The default values for the parameters controlling the size of the trees\n (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and\n unpruned trees which can potentially be very large on some data sets. To\n reduce memory consumption, the complexity and size of the trees should be\n controlled by setting those parameter values.\n The features are always randomly permuted at each split. Therefore,\n the best found split may vary, even with the same training data,\n ``max_features=n_features`` and ``bootstrap=False``, if the improvement\n of the criterion is identical for several splits enumerated during the\n search of the best split. To obtain a deterministic behaviour during\n fitting, ``random_state`` has to be fixed.\n References\n ----------\n .. [1] L. Breiman, \"Random Forests\", Machine Learning, 45(1), 5-32, 2001.\n \"\"\"\n\n def __init__(self, n_estimators=10, criterion='mse', max_depth=None,\n min_samples_split=2, min_samples_leaf=1,\n min_weight_fraction_leaf=0.0, max_features='auto',\n max_leaf_nodes=None, min_impurity_decrease=0.,\n bootstrap=True, oob_score=False,\n n_jobs=1, random_state=None, verbose=0, warm_start=False,\n min_variance=0.0):\n self.min_variance = min_variance\n super(RandomForestRegressor, self).__init__(\n n_estimators=n_estimators, criterion=criterion,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features, max_leaf_nodes=max_leaf_nodes,\n min_impurity_decrease=min_impurity_decrease,\n bootstrap=bootstrap, oob_score=oob_score,\n n_jobs=n_jobs, random_state=random_state,\n verbose=verbose, warm_start=warm_start)\n\n\n def predict(self, X, return_std=False):\n \"\"\"Predict continuous output for X.\n Parameters\n ----------\n X : array of shape = (n_samples, n_features)\n Input data.\n return_std : boolean\n Whether or not to return the standard deviation.\n Returns\n -------\n predictions : array-like of shape = (n_samples,)\n Predicted values for X. If criterion is set to \"mse\",\n then `predictions[i] ~= mean(y | X[i])`.\n std : array-like of shape=(n_samples,)\n Standard deviation of `y` at `X`. If criterion\n is set to \"mse\", then `std[i] ~= std(y | X[i])`.\n \"\"\"\n mean = super(RandomForestRegressor, self).predict(X)\n\n if return_std:\n if self.criterion != \"mse\":\n raise ValueError(\n \"Expected impurity to be 'mse', got %s instead\"\n % self.criterion)\n std = _return_std(X, self.estimators_, mean, self.min_variance)\n return mean, std\n return mean\n\n\nclass RFModel(BOModel):\n \"\"\"\n General class for handling a Random Forest in GPyOpt.\n .. Note:: The model has beed wrapper 'as it is' from Scikit-learn. Check\n http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html\n for further details.\n \"\"\"\n\n analytical_gradient_prediction = False\n\n def __init__(self, n_estimators=10, criterion='mse', max_depth=None,\n min_samples_split=2, min_samples_leaf=1,\n min_weight_fraction_leaf=0.0, max_features='auto',\n max_leaf_nodes=None, min_impurity_decrease=0.,\n bootstrap=True, oob_score=False,\n n_jobs=1, random_state=None, verbose=0, warm_start=False,\n min_variance=0.0):\n\n self.bootstrap = bootstrap\n self.criterion = criterion\n self.max_depth = max_depth\n self.max_features = max_features\n self.max_leaf_nodes = max_leaf_nodes\n self.min_samples_leaf = min_samples_leaf\n self.min_samples_split = min_samples_split\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.n_estimators = n_estimators\n self.n_jobs = n_jobs\n self.oob_score = oob_score\n self.random_state = random_state\n self.verbose = verbose\n self.warm_start = warm_start\n self.min_variance = min_variance\n \n self.model = None\n\n def _create_model(self, X, Y):\n \"\"\"\n Creates the model given some input data X and Y.\n \"\"\"\n self.X = X\n self.Y = Y\n self.model = RandomForestRegressor(bootstrap = self.bootstrap,\n criterion = self.criterion,\n max_depth = self.max_depth,\n max_features = self.max_features,\n max_leaf_nodes = self.max_leaf_nodes,\n min_samples_leaf = self.min_samples_leaf,\n min_samples_split = self.min_samples_split,\n min_weight_fraction_leaf = self.min_weight_fraction_leaf,\n n_estimators = self.n_estimators,\n n_jobs = self.n_jobs,\n oob_score = self.oob_score,\n random_state = self.random_state,\n verbose = self.verbose,\n warm_start = self.warm_start,\n min_variance = self.min_variance\n )\n\n self.model.fit(X,Y.flatten())\n\n\n def updateModel(self, X_all, Y_all, X_new, Y_new):\n \"\"\"\n Updates the model with new observations.\n \"\"\"\n self.X = X_all\n self.Y = Y_all\n if self.model is None:\n self._create_model(X_all, Y_all)\n else:\n self.model.fit(X_all, Y_all.flatten())\n\n def predict(self, X):\n \"\"\"\n Predictions with the model. Returns posterior means and standard deviations at X.\n \"\"\"\n rep=self.model.predict(X,return_std=True)\n return np.reshape(rep[0],(-1,1)),np.reshape(rep[1],(-1,1))\n\n\n def get_fmin(self):\n rep= self.model.predict(self.X).min()\n # print(rep,np.shape(rep))\n return rep\n \n def get_model_parameters(self):\n \"\"\"\n Returns a 2D numpy array with the parameters of the model\n \"\"\"\n return np.atleast_2d(list(self.model.get_params().values()))\n \n def get_model_parameters_names(self):\n \"\"\"\n Returns a list with the names of the parameters of the model\n \"\"\"\n return list(self.model.get_params().keys())\n\nclass ExtraTreesRegressor(_sk_ExtraTreesRegressor):\n \"\"\"\n ExtraTreesRegressor that supports conditional standard deviation.\n Parameters\n ----------\n n_estimators : integer, optional (default=10)\n The number of trees in the forest.\n criterion : string, optional (default=\"mse\")\n The function to measure the quality of a split. Supported criteria\n are \"mse\" for the mean squared error, which is equal to variance\n reduction as feature selection criterion, and \"mae\" for the mean\n absolute error.\n max_features : int, float, string or None, optional (default=\"auto\")\n The number of features to consider when looking for the best split:\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a percentage and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=n_features`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n .. note::\n The search for a split does not stop until at least one\n valid partition of the node samples is found, even if it\n requires to effectively inspect more than ``max_features``\n features.\n max_depth : integer or None, optional (default=None)\n The maximum depth of the tree. If None, then nodes are expanded until\n all leaves are pure or until all leaves contain less than\n min_samples_split samples.\n min_samples_split : int, float, optional (default=2)\n The minimum number of samples required to split an internal node:\n - If int, then consider `min_samples_split` as the minimum number.\n - If float, then `min_samples_split` is a percentage and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n min_samples_leaf : int, float, optional (default=1)\n The minimum number of samples required to be at a leaf node:\n - If int, then consider `min_samples_leaf` as the minimum number.\n - If float, then `min_samples_leaf` is a percentage and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n min_weight_fraction_leaf : float, optional (default=0.)\n The minimum weighted fraction of the sum total of weights (of all\n the input samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided.\n max_leaf_nodes : int or None, optional (default=None)\n Grow trees with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n min_impurity_decrease : float, optional (default=0.)\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n The weighted impurity decrease equation is the following::\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n where ``N`` is the total number of samples, ``N_t`` is the number of\n samples at the current node, ``N_t_L`` is the number of samples in the\n left child, and ``N_t_R`` is the number of samples in the right child.\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n bootstrap : boolean, optional (default=True)\n Whether bootstrap samples are used when building trees.\n oob_score : bool, optional (default=False)\n whether to use out-of-bag samples to estimate\n the R^2 on unseen data.\n n_jobs : integer, optional (default=1)\n The number of jobs to run in parallel for both `fit` and `predict`.\n If -1, then the number of jobs is set to the number of cores.\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n verbose : int, optional (default=0)\n Controls the verbosity of the tree building process.\n warm_start : bool, optional (default=False)\n When set to ``True``, reuse the solution of the previous call to fit\n and add more estimators to the ensemble, otherwise, just fit a whole\n new forest.\n Attributes\n ----------\n estimators_ : list of DecisionTreeRegressor\n The collection of fitted sub-estimators.\n feature_importances_ : array of shape = [n_features]\n The feature importances (the higher, the more important the feature).\n n_features_ : int\n The number of features when ``fit`` is performed.\n n_outputs_ : int\n The number of outputs when ``fit`` is performed.\n oob_score_ : float\n Score of the training dataset obtained using an out-of-bag estimate.\n oob_prediction_ : array of shape = [n_samples]\n Prediction computed with out-of-bag estimate on the training set.\n Notes\n -----\n The default values for the parameters controlling the size of the trees\n (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and\n unpruned trees which can potentially be very large on some data sets. To\n reduce memory consumption, the complexity and size of the trees should be\n controlled by setting those parameter values.\n The features are always randomly permuted at each split. Therefore,\n the best found split may vary, even with the same training data,\n ``max_features=n_features`` and ``bootstrap=False``, if the improvement\n of the criterion is identical for several splits enumerated during the\n search of the best split. To obtain a deterministic behaviour during\n fitting, ``random_state`` has to be fixed.\n References\n ----------\n .. [1] L. Breiman, \"Random Forests\", Machine Learning, 45(1), 5-32, 2001.\n \"\"\"\n def __init__(self, n_estimators=10, criterion='mse', max_depth=None,\n min_samples_split=2, min_samples_leaf=1,\n min_weight_fraction_leaf=0.0, max_features='auto',\n max_leaf_nodes=None, min_impurity_decrease=0.,\n bootstrap=False, oob_score=False,\n n_jobs=1, random_state=None, verbose=0, warm_start=False,\n min_variance=0.0):\n self.min_variance = min_variance\n super(ExtraTreesRegressor, self).__init__(\n n_estimators=n_estimators, criterion=criterion,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features, max_leaf_nodes=max_leaf_nodes,\n min_impurity_decrease=min_impurity_decrease,\n bootstrap=bootstrap, oob_score=oob_score,\n n_jobs=n_jobs, random_state=random_state,\n verbose=verbose, warm_start=warm_start)\n\n\n\n def predict(self, X, return_std=False):\n \"\"\"\n Predict continuous output for X.\n Parameters\n ----------\n X : array-like of shape=(n_samples, n_features)\n Input data.\n return_std : boolean\n Whether or not to return the standard deviation.\n Returns\n -------\n predictions : array-like of shape=(n_samples,)\n Predicted values for X. If criterion is set to \"mse\",\n then `predictions[i] ~= mean(y | X[i])`.\n std : array-like of shape=(n_samples,)\n Standard deviation of `y` at `X`. If criterion\n is set to \"mse\", then `std[i] ~= std(y | X[i])`.\n \"\"\"\n mean = super(ExtraTreesRegressor, self).predict(X)\n\n if return_std:\n if self.criterion != \"mse\":\n raise ValueError(\n \"Expected impurity to be 'mse', got %s instead\"\n % self.criterion)\n std = _return_std(X, self.estimators_, mean, self.min_variance)\n return mean, std\n\n return mean\n \n \nclass ETModel(BOModel):\n \"\"\"\n General class for handling a Extra Tree in GPyOpt.\n .. Note:: The model has beed wrapper 'as it is' from Scikit-learn. Check\n http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html\n for further details.\n \"\"\"\n\n analytical_gradient_prediction = False\n\n def __init__(self, n_estimators=10, criterion='mse', max_depth=None,\n min_samples_split=2, min_samples_leaf=1,\n min_weight_fraction_leaf=0.0, max_features='auto',\n max_leaf_nodes=None, min_impurity_decrease=0.,\n bootstrap=False, oob_score=False,\n n_jobs=1, random_state=None, verbose=0, warm_start=False,\n min_variance=0.0):\n\n self.bootstrap = bootstrap\n self.criterion = criterion\n self.max_depth = max_depth\n self.max_features = max_features\n self.max_leaf_nodes = max_leaf_nodes\n self.min_samples_leaf = min_samples_leaf\n self.min_samples_split = min_samples_split\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.n_estimators = n_estimators\n self.n_jobs = n_jobs\n self.oob_score = oob_score\n self.random_state = random_state\n self.verbose = verbose\n self.warm_start = warm_start\n self.min_variance = min_variance\n \n self.model = None\n\n def _create_model(self, X, Y):\n \"\"\"\n Creates the model given some input data X and Y.\n \"\"\"\n self.X = X\n self.Y = Y\n self.model = ExtraTreesRegressor(bootstrap = self.bootstrap,\n criterion = self.criterion,\n max_depth = self.max_depth,\n max_features = self.max_features,\n max_leaf_nodes = self.max_leaf_nodes,\n min_samples_leaf = self.min_samples_leaf,\n min_samples_split = self.min_samples_split,\n min_weight_fraction_leaf = self.min_weight_fraction_leaf,\n n_estimators = self.n_estimators,\n n_jobs = self.n_jobs,\n oob_score = self.oob_score,\n random_state = self.random_state,\n verbose = self.verbose,\n warm_start = self.warm_start,\n min_variance = self.min_variance)\n\n self.model.fit(X,Y.flatten())\n\n\n def updateModel(self, X_all, Y_all, X_new, Y_new):\n \"\"\"\n Updates the model with new observations.\n \"\"\"\n self.X = X_all\n self.Y = Y_all\n if self.model is None:\n self._create_model(X_all, Y_all)\n else:\n self.model.fit(X_all, Y_all.flatten())\n\n def predict(self, X):\n \"\"\"\n Predictions with the model. Returns posterior means and standard deviations at X.\n \"\"\"\n rep=self.model.predict(X,return_std=True)\n return np.reshape(rep[0],(-1,1)),np.reshape(rep[1],(-1,1))\n\n def get_fmin(self):\n return self.model.predict(self.X).min()\n \n def get_model_parameters(self):\n \"\"\"\n Returns a 2D numpy array with the parameters of the model\n \"\"\"\n return np.atleast_2d(list(self.model.get_params().values()))\n \n def get_model_parameters_names(self):\n \"\"\"\n Returns a list with the names of the parameters of the model\n \"\"\"\n return list(self.model.get_params().keys())\n", "import os, sys, warnings, time, copy\nimport pandas as pd, numpy as np\nfrom collections import OrderedDict\nfrom datetime import datetime, timedelta, timezone\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom bokeh import layouts\nfrom bokeh.io import output_file, output_notebook, push_notebook, show, save\nfrom bokeh.plotting import figure, ColumnDataSource, curdoc\nfrom bokeh.resources import INLINE\nfrom bokeh.models import HoverTool, SaveTool, WheelZoomTool, ResetTool, PanTool, BoxZoomTool, LabelSet, CustomJS\nfrom bokeh.models.ranges import DataRange1d, FactorRange\nfrom bokeh.models.widgets import Div, PreText\n\nfrom tzlocal import get_localzone\n\nfrom ._base import mk_dir\nfrom ..model_selection import _setting as st\nfrom ._html import arrang_graph_file\nfrom ..utils import _htmlsrc as hs\n\ndef randomString(stringLength=10):\n \"\"\"Generate a random string of fixed length \"\"\"\n import string, random\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))\n\nclass OrderedDict2(OrderedDict):\n \n def __setstate__(self,state):\n import json\n data=json.loads(state)\n self.update(data)\n return self\n \n def __getstate__(self):\n import json\n return json.dumps(list(self.items()))\n\nclass CVSummarizer:\n \"\"\"\n Summarize cross validation results.\n\n Parameters\n ----------\n cvsize: int.\n Number of folds.\n\n valid: bool\n Flag whether validation data is input or not.\n\n sign: 1 or -1.\n Attribute of sklearn.metrics.make_scorer .\n Flag whether greater is better or not.\n \"\"\"\n def __init__(self, paraname_list, cvsize, score_summarizer, score_summarizer_name, valid, \n sign, model_id, search_algo, verbose, save_estimator,nbTot=0, logdir=None):\n self.score_summarizer = score_summarizer\n self.score_summarizer_name = str(score_summarizer_name)\n self.valid = valid\n self.sign = sign\n self.model_id = str(model_id)\n self.search_algo = str(search_algo)\n\n self.verbose = verbose\n self.save_estimator = save_estimator\n self.logdir = logdir\n self.nbTot=nbTot\n self.save_path = None\n self.save_graph_path = None\n if self.logdir is not None:\n path = os.path.join(self.logdir, \"cv_results\")\n mk_dir(path, error_level=0)\n self.save_path = os.path.join(path, str(self.model_id)+\".csv\")\n\n if self.verbose == 2:\n path = os.path.join(self.logdir, \"cv_results_graph\")\n mk_dir(path, error_level=0)\n self.save_graph_path = os.path.join(path, str(self.model_id)+\".html\")\n \n if self.save_estimator > 0:\n mk_dir(os.path.join(self.logdir, \"estimators\", self.model_id), \n error_level=1, msg=\"save in this directory.\")\n \n\n self.params_keys = [\"param_\" + str(i) for i in paraname_list]\n self.train_score_keys = [\"split\"+str(i)+\"_train_score\" for i in range(cvsize)]\n self.test_score_keys = [\"split\"+str(i)+\"_test_score\" for i in range(cvsize)]\n self.cv_results_ = OrderedDict2({\"index\":[], \"params\":[]})\n self.next_elapsed_time = np.nan\n self.nbv = None\n\n self.best_params_ = None\n self.best_score_ = np.nan\n\n def __call__(self):\n return self.cv_results_\n\n def _store(self, key, value):\n if key in self.cv_results_:\n self.cv_results_[key].append(value)\n else:\n self.cv_results_[key] = [value]\n\n def _save(self):\n if self.logdir is not None: \n if len(pd.DataFrame(self.cv_results_)) == 1:\n if os.path.isfile(self.save_path):\n warnings.warn(\"A log file(%s) is already exist. cv result is append to this file\" %self.save_path)\n pd.DataFrame(self.cv_results_).iloc[[-1]].to_csv(self.save_path, index=False, encoding=\"cp932\", \n mode=\"a\", header=(len(pd.DataFrame(self.cv_results_))==1))\n\n def _init_score(self, cv_train_scores, cv_test_scores, train_score, validation_score):\n if self.sign == 1:\n return cv_train_scores, cv_test_scores, train_score, validation_score\n else:\n cv_train_scores = list(-1*np.array(cv_train_scores))\n cv_test_scores = list(-1*np.array(cv_test_scores))\n train_score = -1*train_score\n validation_score = -1*validation_score\n return cv_train_scores, cv_test_scores, train_score, validation_score\n\n def _update_best(self):\n if not np.isnan(self.cv_results_[self.score_summarizer_name+\"_test_score\"]).all():\n if self.sign == 1:\n index = np.nanargmax(self.cv_results_[self.score_summarizer_name+\"_test_score\"])\n else:\n index = np.nanargmin(self.cv_results_[self.score_summarizer_name+\"_test_score\"])\n\n self.best_params_ = self.cv_results_[\"params\"][index]\n self.best_score_ = self.cv_results_[self.score_summarizer_name+\"_test_score\"][index]\n \n def store_cv_result(self, cv_train_scores, cv_test_scores, params, fit_times, score_times, \n feature_select, X_shape, start_time,\n end_time, train_score, validation_score):\n cv_train_scores, cv_test_scores, train_score, validation_score = self._init_score(cv_train_scores, cv_test_scores, train_score, validation_score)\n\n # Summary\n self._store(\"index\", len(self.cv_results_[\"index\"]))\n self._store(\"params\", params)\n self._store(\"start_time\", start_time)\n self._store(\"end_time\", end_time)\n\n self._store(self.score_summarizer_name+\"_train_score\", self.score_summarizer(cv_train_scores))\n self._store(\"std_train_score\", np.std(cv_train_scores))\n self._store(self.score_summarizer_name+\"_test_score\", self.score_summarizer(cv_test_scores))\n self._store(\"std_test_score\", np.std(cv_test_scores))\n self._store(\"train_score(whole)\", train_score)\n self._store(\"validation_score\", validation_score)\n\n # Score details\n for i , key in enumerate(self.train_score_keys):\n self._store(key, cv_train_scores[i])\n for i , key in enumerate (self.test_score_keys):\n self._store(key, cv_test_scores[i])\n\n # Parameter details\n self._store(\"X_shape\", X_shape)\n self._store(\"feature_select\", feature_select)\n for key in self.params_keys:\n self._store(key, params[key.split(\"param_\")[1]])\n\n # Time details\n self._store(\"mean_fit_time\", np.mean(fit_times))\n self._store(\"mean_score_time\", np.mean(score_times))\n self._store(\"std_fit_time\", np.std(fit_times))\n self._store(\"std_score_time\", np.std(score_times))\n self._store(\"elapsed_time_sec(estimated)\", self.next_elapsed_time)\n if isinstance(start_time, datetime) & isinstance(end_time, datetime):\n self._store(\"elapsed_time_sec\", (end_time-start_time).seconds)\n else:\n self._store(\"elapsed_time_sec\", np.nan)\n\n self._store(\"model_id\", self.model_id)\n self._store(\"search_algo\", self.search_algo)\n self._save()\n self._update_best()\n def __getstate__(self):\n rep=self.__dict__.copy()\n rep[\"nbv\"]=\"<notPickable\"\n return rep\n def _estimate_time_sec(self, params):\n df = pd.DataFrame(self.cv_results_[\"params\"]+[params])\n\n strcols = df.columns[df.dtypes==object].tolist()\n df[strcols] = df[strcols].fillna(\"none\")\n le = LabelEncoder()\n for strcol in strcols:\n df.loc[:, strcol] = le.fit_transform(df.loc[:, strcol].tolist())\n\n Xtrain = df.iloc[:df.index.max()].dropna(axis=0, inplace=False)\n Xtest = df.iloc[[df.index.max()]].dropna(axis=0, inplace=False)\n\n if (len(Xtrain) ==0) or (len(Xtest)==0):\n self.next_elapsed_time = np.nan\n else:\n try:\n estimator = RandomForestRegressor()\n estimator.fit(Xtrain, self.cv_results_[\"elapsed_time_sec\"])\n self.next_elapsed_time = int(estimator.predict(Xtest))\n except:\n self.next_elapsed_time=np.nan\n\n def display_status(self, params, start_time=None):\n if self.verbose > 0:\n self._estimate_time_sec(params)\n if start_time is None:\n start_time = datetime.now()\n\n n_search = len(self.cv_results_[\"params\"])\n if np.isnan(self.next_elapsed_time):\n estimated_end_time = np.nan\n else:\n estimated_end_time = start_time + timedelta(seconds=self.next_elapsed_time)\n estimated_end_time = estimated_end_time.astimezone(get_localzone()).strftime(\"%m/%d %H:%M\")\n\n if self.verbose == 1:\n start_time = start_time.astimezone(get_localzone()).strftime(\"%m/%d %H:%M\")\n sys.stdout.write(\"\\rNum_of_search:%s Start:%s End(estimated):%s Best_score:%s\" \n %(n_search, start_time, estimated_end_time, np.round(self.best_score_, 2)))\n elif self.verbose == 2:\n if self.nbv is None:\n if n_search > 0:\n self.nbv = NoteBookVisualizer(cv_results_cols=self.cv_results_.keys(), sign=self.sign, valid=self.valid, \n model_id=self.model_id, savepath=self.save_graph_path,nbTot=self.nbTot)\n else:\n self.nbv.fit(cv_results=self.cv_results_, estimeted_end_time=estimated_end_time)\n \n \n\nclass NoteBookVisualizer():\n \"\"\"\n Visualize cross validation results.\n \"\"\"\n time_col = \"end_time\"\n score_cols = dict(train=\"mean_train_score\", test=\"mean_test_score\", valid=\"validation_score\")\n score_std_cols = dict(train=\"std_train_score\", test=\"std_test_score\")\n colors = dict(train=\"#1f77b4\", test=\"#ff7f0e\", valid=\"#2ca02c\")\n display_width = 950\n n_col_param = 5\n stream_rollover = 256\n title = \"\"\"\n <div style=\"font-family:segoe ui, sans-serif; font-style:italic; color:#1987E5; padding:0px;\">\n <font style=\"font-size:xx-large;\"> Search Results</font>\n <font style=\"font-size:large;\"> - TEXT -</font></div>\n \"\"\"\n headline = \"\"\"\n <div style=\"font-family:segoe ui, sans-serif; font-style:italic; font-size:x-large; \n border-bottom:solid 2.5px #7f7f7f; color:#1987E5; padding-bottom: 3px;\">TEXT</div>\n \"\"\"\n\n def _update_cv_score_std_src(self, cv_score_std):\n patches = dict()\n for key in cv_score_std.keys():\n patches[key] = [(slice(NoteBookVisualizer.stream_rollover*2), cv_score_std[key])]\n self.cv_score_std_src.patch(patches)\n push_notebook(handle=self.bokeh_handle)\n \n def _update_param_srcs(self, param_dists):\n for key in param_dists.keys():\n new_data = dict()\n patches = dict()\n for inner_key in list(self.param_srcs[key].data.keys()):\n old_len = len(self.param_srcs[key].data[inner_key])\n new_len = len(param_dists[key][inner_key])\n patches[inner_key] = [(slice(old_len), param_dists[key][inner_key][:old_len])]\n if old_len == new_len:\n pass\n elif old_len < new_len:\n new_data[inner_key] = param_dists[key][inner_key][old_len:]\n else:\n raise Exception(\"Inner Error: param_dists[\", key, \"] 's length must not decrease.\")\n self.param_srcs[key].patch(patches)\n\n if len(new_data) > 0:\n self.param_srcs[key].stream(new_data)\n push_notebook(handle=self.bokeh_handle)\n \n def _mk_partial_dict(self, tgt_dict, tgt_keys):\n return dict(zip(tgt_keys, [tgt_dict[key] for key in tgt_keys]))\n \n def _mk_score_source(self, cv_results, xcol, score_cols, hover_cols=None):\n if isinstance(cv_results, pd.core.frame.DataFrame):\n cv_results = cv_results.to_dict(orient=\"list\")\n src_dict = self._mk_partial_dict(tgt_dict=cv_results, tgt_keys=[xcol]+score_cols)\n if hover_cols is None:\n return ColumnDataSource(src_dict)\n else:\n tooltips = []\n for col in hover_cols:\n tooltips.append((str(col) ,\"@\"+str(col)))\n src_dict[col] = cv_results[col]\n return ColumnDataSource(src_dict), HoverTool(tooltips=tooltips)\n \n\n def _add_line(self, p, xcol, ycol, score_source, score_std_source=None, color=\"black\", legend=None):\n if score_std_source is not None:\n p.patch(x=xcol, y=ycol, fill_alpha=0.5, line_alpha=0, source=score_std_source, \n fill_color=color, line_color=color, legend=legend) \n p.line(x=xcol, y=ycol, source=score_source, \n line_color=color, legend=legend, line_width=1)\n p.circle(x=xcol, y=ycol, source=score_source, \n line_color=color, legend=legend, fill_color=\"white\", size=4)\n return p\n\n def _arrange_fig(self, p):\n p.toolbar.logo = None\n p.xaxis.axis_label_text_font = \"segoe ui\"\n p.yaxis.axis_label_text_font = \"segoe ui\"\n p.yaxis.axis_label_text_font_style = \"normal\"\n p.axis.major_label_text_font = \"segoe ui\"\n p.legend.click_policy=\"hide\"\n p.background_fill_alpha = 0.7\n p.border_fill_alpha = 0.7\n p.legend.background_fill_alpha = 0.7\n return p\n \n def _init_cv_results(self, cv_results):\n cv_results = pd.DataFrame(cv_results)\n cv_results = cv_results[~cv_results[[NoteBookVisualizer.time_col, NoteBookVisualizer.score_cols[\"test\"], NoteBookVisualizer.score_std_cols[\"test\"]]].isnull().any(axis=1)]\n\n cv_results.reset_index(drop=True, inplace=True)\n if len(cv_results) == 0:\n return None, None, None\n\n cv_results.rename(columns=dict([(col, col.split(\"param_\")[-1]) for col in cv_results.columns if \"param_\" in col]), inplace=True)\n\n if self.sign == 1:\n cv_results[[\"best_train\", \"best_test\", \"best_valid\"]] = cv_results[[NoteBookVisualizer.score_cols[\"train\"], NoteBookVisualizer.score_cols[\"test\"], NoteBookVisualizer.score_cols[\"valid\"]]].cummax()\n else:\n cv_results[[\"best_train\", \"best_test\", \"best_valid\"]] = cv_results[[NoteBookVisualizer.score_cols[\"train\"], NoteBookVisualizer.score_cols[\"test\"], NoteBookVisualizer.score_cols[\"valid\"]]].cummin()\n \n cv_score_std = {NoteBookVisualizer.time_col:cv_results[NoteBookVisualizer.time_col].tolist()+cv_results[NoteBookVisualizer.time_col].tolist()[::-1],}\n for data_type in [\"train\", \"test\"]:\n cv_score_std[self.score_cols[data_type]] = (cv_results[self.score_cols[data_type]]+cv_results[self.score_std_cols[data_type]]).tolist()\n cv_score_std[self.score_cols[data_type]] += (cv_results[self.score_cols[data_type]]-cv_results[self.score_std_cols[data_type]]).iloc[::-1].tolist()\n \n # to support stream_rollover\n css_length = int(len(cv_score_std[NoteBookVisualizer.time_col])/2)\n if css_length > NoteBookVisualizer.stream_rollover:\n for key in cv_score_std.keys():\n cv_score_std[key] = cv_score_std[key][css_length - NoteBookVisualizer.stream_rollover :css_length + NoteBookVisualizer.stream_rollover]\n else:\n for key in cv_score_std.keys():\n cv_score_std[key].extend([\"nan\"]*((NoteBookVisualizer.stream_rollover - css_length)*2))\n\n param_dists = dict()\n if len(self.param_feature_cols) > 1:\n param_dists[st.FEATURE_SELECT_PARAMNAME_PREFIX] = dict(label=[i.split(st.FEATURE_SELECT_PARAMNAME_PREFIX)[-1] for i in self.param_feature_cols], \n x=[int(i.split(st.FEATURE_SELECT_PARAMNAME_PREFIX)[-1])-1 for i in self.param_feature_cols], \n top=cv_results[self.param_feature_cols].sum(0).values.tolist())\n for param_col in self.param_cols:\n if cv_results[param_col].dtypes == \"object\":\n vc = cv_results[param_col].value_counts(dropna=False).sort_index()\n obj_param_dist = dict(label=vc.index.fillna(\"none\").tolist(), top=vc.values.tolist())\n try:\n chk_dict = dict(zip(self.param_srcs[\"label\"], self.param_srcs[\"x\"]))\n for label in list(obj_param_dist[\"label\"]):\n if not(label in self.param_srcs[\"label\"]):\n chk_dict[label] = len(chk_dict[label]) - 1\n\n obj_param_dist[\"label\"] = list(chk_dict.keys())\n obj_param_dist[\"x\"] = list(chk_dict.values()) \n except (AttributeError, KeyError):\n obj_param_dist[\"x\"] = [i for i in range(len(obj_param_dist[\"label\"]))]\n param_dists[param_col] = copy.deepcopy(obj_param_dist)\n else:\n hist, edges = np.histogram(cv_results[param_col], density=False, bins=10)\n param_dists[param_col] = dict(left=list(edges[:-1]), right=list(edges[1:]), top=list(hist))\n \n cv_results[self.param_cols] = cv_results[self.param_cols].fillna(\"none\")\n \n return cv_results, cv_score_std, param_dists\n \n def __init__(self, cv_results_cols, sign, valid, model_id, savepath, nbTot=0):\n if valid:\n self.data_types = [\"train\", \"test\", \"valid\"]\n else:\n self.data_types = [\"train\", \"test\"]\n self.nbTot=nbTot\n self.param_feature_cols = [i.split(\"param_\")[-1] for i in cv_results_cols if(\"param_\"+st.FEATURE_SELECT_PARAMNAME_PREFIX in i)&(i!=\"param_\"+st.FEATURE_SELECT_PARAMNAME_PREFIX+str(st.ALWAYS_USED_FEATURE_GROUP_ID))]\n self.all_param_cols = [i.split(\"param_\")[-1] for i in cv_results_cols if(\"param_\" in i)&(i!=\"param_\"+st.FEATURE_SELECT_PARAMNAME_PREFIX+str(st.ALWAYS_USED_FEATURE_GROUP_ID))]\n self.param_cols = list(set(self.all_param_cols)-set(self.param_feature_cols))\n self.param_cols.sort()\n \n self.sign = sign\n self.cv_results_cols = list(cv_results_cols)\n self.valid = valid\n self.model_id = str(model_id)\n self.savepath = savepath\n\n self.bokeh_handle = None\n self.last=0\n \n def fit(self, cv_results, estimeted_end_time):\n cv_results, cv_score_std, param_dists = self._init_cv_results(cv_results)\n nbi=len(cv_results)\n tot=self.nbTot\n if self.bokeh_handle is None:\n if cv_results is None:\n return\n\n # mk bokeh source\n self.cv_src, cv_hover = self._mk_score_source(cv_results, xcol=NoteBookVisualizer.time_col, score_cols=[NoteBookVisualizer.score_cols[i] for i in self.data_types], \n hover_cols=self.all_param_cols)\n \n self.end_time_src = ColumnDataSource(data=dict(text=[\"This search end time(estimated): {}\".format(estimeted_end_time)]))\n self.cv_score_std_src = ColumnDataSource(data=cv_score_std)\n self.best_src = self._mk_score_source(cv_results, xcol=NoteBookVisualizer.time_col, \n score_cols=[\"best_\"+i for i in self.data_types])\n \n self.param_srcs = dict()\n for key in param_dists.keys():\n self.param_srcs[key] = ColumnDataSource(data= param_dists[key])\n \n\n # CV Score transition\n cv_p = figure(title=\"CV Score transition\", x_axis_label=\"time\", y_axis_label=\"score\", \n x_axis_type=\"datetime\", plot_width=int(NoteBookVisualizer.display_width/2), plot_height=275, \n toolbar_location=\"above\", \n tools=[SaveTool(), ResetTool(), PanTool(), WheelZoomTool()])\n\n for data_type in self.data_types:\n if data_type==\"valid\":\n cv_p = self._add_line(cv_p, xcol=NoteBookVisualizer.time_col, ycol=NoteBookVisualizer.score_cols[data_type], \n score_source=self.cv_src, \n color=NoteBookVisualizer.colors[data_type], legend=data_type)\n else:\n cv_p = self._add_line(cv_p, xcol=NoteBookVisualizer.time_col, ycol=NoteBookVisualizer.score_cols[data_type], \n score_source=self.cv_src, score_std_source=self.cv_score_std_src, \n color=NoteBookVisualizer.colors[data_type], legend=data_type)\n\n display_etime = LabelSet(x=0, y=0, x_offset=80, y_offset=20, \n x_units=\"screen\", y_units=\"screen\", render_mode=\"canvas\",\n text=\"text\", source=self.end_time_src, \n text_font=\"segoe ui\", text_font_style =\"italic\", \n background_fill_color=\"white\", background_fill_alpha=0.5)\n cv_p.add_layout(display_etime)\n\n cv_p.add_tools(cv_hover)\n cv_p.legend.location = \"top_left\"\n cv_p.xaxis.minor_tick_line_color = None\n cv_p.yaxis.minor_tick_line_color = None\n cv_p = self._arrange_fig(cv_p)\n \n \n # Best Score transition\n best_p = figure(title=\"Best Score transition\", x_axis_label=\"time\", y_axis_label=\"score\", \n x_range=cv_p.x_range, y_range=cv_p.y_range, \n x_axis_type=\"datetime\", plot_width=int(NoteBookVisualizer.display_width/2), plot_height=275, \n toolbar_location=\"above\", tools=[PanTool(), WheelZoomTool(), SaveTool(), ResetTool()])\n for data_type in self.data_types:\n best_p = self._add_line(best_p, xcol=NoteBookVisualizer.time_col, ycol=\"best_\"+data_type, \n score_source=self.best_src, color=NoteBookVisualizer.colors[data_type], legend=data_type)\n # best_p.add_tools(cv_hover) #TODO HOVER\n best_p.legend.location = \"top_left\"\n best_p.xaxis.minor_tick_line_color = None\n best_p.yaxis.minor_tick_line_color = None\n best_p = self._arrange_fig(best_p)\n\n \n # Param distributions\n param_vbar_ps = dict()\n param_hist_ps = dict()\n\n tmp = list(self.param_cols)\n if st.FEATURE_SELECT_PARAMNAME_PREFIX in self.param_srcs.keys():\n tmp = [st.FEATURE_SELECT_PARAMNAME_PREFIX] + tmp\n for param_col in tmp:\n if \"label\" in list(param_dists[param_col].keys()):\n # Bar graph\n param_vbar_ps[param_col] = figure(title=param_col, y_axis_label=\"frequency\", \n plot_width=int(NoteBookVisualizer.display_width/NoteBookVisualizer.n_col_param), \n plot_height=int(NoteBookVisualizer.display_width/NoteBookVisualizer.n_col_param), \n #x_range=FactorRange(factors=self.param_srcs[param_col].data[\"x\"]), \n y_range=DataRange1d(min_interval=1.0, start=0, default_span=1.0), \n toolbar_location=\"above\", \n tools=[SaveTool(), HoverTool(tooltips=[(\"label\",\"@label\"), (\"top\",\"@top\")])])\n param_vbar_ps[param_col].vbar(x=\"x\", top=\"top\", \n source=self.param_srcs[param_col], \n width=0.5, bottom=0, color=\"#9467bd\", fill_alpha=0.5)\n\n labels = LabelSet(x=\"x\", y=0, level=\"glyph\", text=\"label\", text_align=\"center\", \n text_font=\"segoe ui\", text_font_style=\"normal\", text_font_size=\"8pt\", \n x_offset=0, y_offset=0, source=self.param_srcs[param_col], render_mode=\"canvas\")\n param_vbar_ps[param_col].add_layout(labels)\n\n param_vbar_ps[param_col].xaxis.major_label_text_font_size = \"0pt\"\n param_vbar_ps[param_col].xaxis.major_tick_line_color = None\n param_vbar_ps[param_col].xaxis.minor_tick_line_color = None\n param_vbar_ps[param_col].yaxis.minor_tick_line_color = None\n param_vbar_ps[param_col] = self._arrange_fig(param_vbar_ps[param_col])\n else: \n # Histgram\n param_hist_ps[param_col] = figure(title=param_col, y_axis_label=\"frequency\", \n plot_width=int(NoteBookVisualizer.display_width/NoteBookVisualizer.n_col_param), \n plot_height=int(NoteBookVisualizer.display_width/NoteBookVisualizer.n_col_param), \n y_range=DataRange1d(min_interval=1.0, start=0), \n toolbar_location=\"above\", \n tools=[SaveTool(), HoverTool(tooltips=[(\"left\",\"@left\"), (\"right\",\"@right\"), (\"top\",\"@top\")])])\n param_hist_ps[param_col].quad(top=\"top\", bottom=0, left=\"left\", right=\"right\", \n source=self.param_srcs[param_col], \n color=\"#17becf\", fill_alpha=0.5)\n param_hist_ps[param_col].xaxis.minor_tick_line_color = None \n param_hist_ps[param_col].yaxis.minor_tick_line_color = None \n param_hist_ps[param_col] = self._arrange_fig(param_hist_ps[param_col])\n self.stri=randomString()\n title = Div(text=NoteBookVisualizer.title.replace(\"TEXT\", self.model_id), width=int(NoteBookVisualizer.display_width))\n scores_headline = Div(text=NoteBookVisualizer.headline.replace(\"TEXT\", \"<span id='ooi_{}'> Score History ({}/{})</span>\".format(self.stri,nbi,tot)), width=int(NoteBookVisualizer.display_width*0.9))\n params_headline = Div(text=NoteBookVisualizer.headline.replace(\"TEXT\", \" Parameter History\"), width=int(NoteBookVisualizer.display_width*0.9))\n self.p = layouts.layout([title, [scores_headline]]+[[cv_p, best_p]]+[[params_headline]]+\\\n [list(param_vbar_ps.values())[i:i+NoteBookVisualizer.n_col_param] for i in range(0, len(param_vbar_ps), NoteBookVisualizer.n_col_param)]+\\\n [list(param_hist_ps.values())[i:i+NoteBookVisualizer.n_col_param] for i in range(0, len(param_hist_ps), NoteBookVisualizer.n_col_param)])\n self.bokeh_handle = show(self.p, notebook_handle=True)\n else:\n # update bokeh src\n self.end_time_src.patch({\"text\":[(0, \"This search end time(estimated): {}\".format(estimeted_end_time))]})\n if len(cv_results) != len(self.cv_src.data[NoteBookVisualizer.time_col]):\n self.cv_src.stream(cv_results[list(self.cv_src.data.keys())].iloc[-1:].to_dict(orient=\"list\"), \n rollover=NoteBookVisualizer.stream_rollover)\n self.best_src.stream(cv_results[list(self.best_src.data.keys())].iloc[-1:].to_dict(orient=\"list\"), \n rollover=NoteBookVisualizer.stream_rollover)\n push_notebook(handle=self.bokeh_handle)\n\n self._update_cv_score_std_src(cv_score_std)\n self._update_param_srcs(param_dists)\n \n try:\n nbi=len(self.cv_src.data[NoteBookVisualizer.time_col])+1\n if nbi > self.last:\n from IPython.display import Javascript,display\n display(Javascript(\"\"\"\n document.getElementById(\"ooi_{}\").innerHTML=\" Score History ({}/{})\";\n \"\"\".format(self.stri,nbi,tot)))\n self.last=nbi\n except:\n pass\n \n if self.savepath is not None:\n self._save_graph(search_algo=str(cv_results[\"search_algo\"].iloc[0]), n_iter=int(cv_results[\"index\"].iloc[-1]))\n\n def _save_graph(self, search_algo, n_iter):\n save(self.p, filename=self.savepath, resources=INLINE)\n arrang_graph_file(graph=self.savepath, model_id=self.model_id, \n add_head=hs.additional_head, pjs=hs.pjs, \n search_algo=search_algo, n_iter=n_iter)\n\n def close(self, search_algo, n_iter):\n self.end_time_src.patch({\"text\":[(0, \"This search end time(estimated): finished\")]}) \n push_notebook(handle=self.bokeh_handle) \n try:\n from IPython.display import Javascript,display\n display(Javascript(\"\"\"\n document.getElementById(\"ooi_{}\").innerHTML=\" Score History\";\n \"\"\".format(self.stri)))\n except:\n pass\n if self.savepath is not None:\n self._save_graph(search_algo=search_algo, n_iter=n_iter)\n" ]
[ [ "numpy.atleast_2d", "numpy.random.seed" ], [ "numpy.reshape" ], [ "sklearn.ensemble.RandomForestRegressor", "numpy.nanargmax", "numpy.isnan", "pandas.DataFrame", "numpy.round", "numpy.std", "numpy.nanargmin", "numpy.mean", "numpy.array", "sklearn.preprocessing.LabelEncoder", "numpy.histogram" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
jhchung/summary-gwas-imputation
[ "f860475fdd714de90fa66a1c6e5e2ff59d631d97", "f860475fdd714de90fa66a1c6e5e2ff59d631d97" ]
[ "src/slice_gwas_by_region.py", "src/genomic_tools_lib/data_management/TextFileTools.py" ]
[ "#!/usr/bin/env python\n__author__ = \"alvaro barbeira\"\nimport os\nimport logging\n\nimport numpy\nimport pandas\n\nfrom genomic_tools_lib import Utilities, Logging\n\ndef run(args):\n if os.path.exists(args.output):\n logging.info(\"%s exists. Nope.\", args.output)\n return\n\n logging.info(\"Loading regions\")\n regions = pandas.read_table(args.region_file).rename(columns={\"chr\":\"chromosome\"})\n regions.dropna(inplace=True)\n regions.start = regions.start.astype(int)\n regions.stop = regions.stop.astype(int)\n\n logging.info(\"Loading gwas\")\n gwas = pandas.read_table(args.gwas_file, usecols=[\"panel_variant_id\", \"chromosome\", \"position\", \"zscore\"])\n gwas.dropna(inplace=True)\n\n logging.info(\"Processing\")\n sliced = []\n for i,region in enumerate(regions.itertuples()):\n logging.log(8, \"Processing region %d\", i+1)\n if numpy.isnan(region.start) or numpy.isnan(region.stop) or \\\n (type(region.chromosome) != str and numpy.isnan(region.chromosome)):\n logging.log(8, \"skipping incomplete region\")\n continue\n slice = gwas[(gwas.chromosome == region.chromosome) & (gwas.position >= region.start) & (gwas.position < region.stop)]\n slice = slice.sort_values(by = \"position\")\n if slice.shape[0] == 0:\n continue\n slice = slice.assign(region = \"region-{}-{}-{}\".format(region.chromosome, region.start, region.stop), r=i)\n\n slice = slice[[\"panel_variant_id\", \"region\", \"r\", \"zscore\"]]\n sliced.append(slice)\n\n sliced = pandas.concat(sliced).sort_values(by=\"r\")\n if args.output_format == \"dapg\":\n sliced.region = sliced.r.apply(lambda x: \"region{}\".format(x))\n sliced = sliced.drop([\"r\"], axis=1)\n Utilities.save_dataframe(sliced, args.output, header=False)\n elif args.output_format == \"gtex_eqtl\":\n sliced = sliced.assign(gene_id = sliced.region, variant_id=sliced.panel_variant_id, tss_distance = numpy.nan, ma_samples = numpy.nan, ma_count= numpy.nan, maf = numpy.nan, pval_nominal = numpy.nan, slope= sliced.zscore, slope_se=1)\n sliced = sliced[[\"gene_id\", \"variant_id\", \"tss_distance\", \"ma_samples\", \"ma_count\", \"maf\", \"pval_nominal\", \"slope\", \"slope_se\"]]\n Utilities.save_dataframe(sliced, args.output, header=True)\n logging.info(\"Finished slicing gwas\")\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(\"Group gwas results by region\")\n parser.add_argument(\"-region_file\", help=\"Non-overlapping regions\")\n parser.add_argument(\"-gwas_file\", help=\"GWAS file, in fixed format (imputed) for now\")\n parser.add_argument(\"-output\", help=\"Where to save the result\")\n parser.add_argument(\"-parsimony\", help=\"How much logging to output\", type=int, default=10)\n parser.add_argument(\"--output_format\", default=\"dapg\")\n\n args = parser.parse_args()\n Logging.configure_logging(args.parsimony)\n run(args)", "__author__ = \"alvaro barbeira\"\n\nimport gzip\nimport logging\nimport re\n\nimport numpy\nimport pandas\n\nfrom .. import Utilities\nfrom ..Exceptions import ReportableException\nfrom .. import DataSink\n\ndef parse_spec(spec, order=None):\n if type(spec) == list:\n order = [x[1] for x in spec] if not order else order\n spec = {x[0]:x[1] for x in spec}\n else:\n raise RuntimeError(\"Unsupported datframe spec\")\n return spec, order\n\ndef load_list(path):\n entries=[]\n for i,line in Utilities.iterate_file(path):\n entries.append(line.strip())\n return entries\n\ndef load_column(path, column, unique_entries=True, white_list=None):\n r = set() if unique_entries else list()\n l = (lambda x: r.add(x)) if unique_entries else (lambda x: r.append(x))\n index = None\n for i,line in Utilities.iterate_file(path):\n comps = line.strip().split()\n\n if i==0:\n index = comps.index(column)\n continue\n v = comps[index]\n\n if white_list and not v in white_list:\n continue\n\n l(v)\n return r\n\ndef load_dataframe(path, spec=None, order=None, force_special_handling=False, skip_until_header=None, keys=None, key_column_name=None, separator=None, handle_empty_columns=False, additional_filter=None, columns=None):\n #TODO: think of this bash-python command line kink\n if separator == \"ANY_WHITESPACE\":\n separator = \"\\s+\"\n\n if force_special_handling or skip_until_header or handle_empty_columns or (keys and key_column_name) or additional_filter:\n d = dataframe_from_text_data_source(path, keys=keys, key_column_name=key_column_name,\n skip_until_header=skip_until_header, separator=separator, handle_empty_columns=handle_empty_columns, additional_filter=additional_filter,\n columns=columns)\n else:\n if separator is None: separator = \"\\t\"\n d = pandas.read_table(path, sep=separator, usecols=columns)\n\n if spec:\n spec, order = parse_spec(spec, order)\n d = d.rename(columns=spec)\n for c in order:\n if not c in d:\n d= d.assign(**{c:numpy.nan})\n d = d[order]\n\n return d\n\n#Very much like the previous one but faster, less flexible\ndef load_dataframe_2(path, keys, key_column_name, spec=None, order=None, to_numeric=None):\n index_column=None\n d = []\n for i, line in Utilities.iterate_file(path):\n if i==0:\n header = line.strip().split()\n if key_column_name and keys:\n index_column = header.index(key_column_name)\n continue\n comps = tuple(line.strip().split())\n\n if index_column:\n key = comps[index_column]\n if not key in keys:\n continue\n\n d.append(comps)\n\n d = Utilities.to_dataframe(data=d, columns=header, to_numeric=to_numeric)\n\n if spec:\n spec, order = parse_spec(spec, order)\n d = d.rename(columns=spec)\n for c in order:\n if not c in d:\n d= d.assign(**{c:numpy.nan})\n d = d[order]\n\n return d\n\ndef dataframe_from_text_data_source(path, keys=None, key_column_name=None, skip_until_header=None, separator=None, handle_empty_columns=False, sanitize=False, additional_filter=None, columns=None):\n if columns:\n columns = {x for x in columns}\n\n s = {}\n gz_ = \".gz\" in path\n o = gzip.open if gz_ else open\n with o(path) as file:\n header = None\n if skip_until_header:\n _s = skip_until_header if not gz_ else skip_until_header.encode()\n for line in file:\n if _s in line:\n header = _s\n c = line.split(_s)\n if len(c) > 1: header += c[1]\n break\n\n if header is None: raise ReportableException(\"Did not find specified header\")\n else:\n header = file.readline()\n\n if gz_: header = header.decode()\n\n header_comps = header.strip().split(separator)\n if columns:\n s = {c: [] for c in header_comps if c in columns}\n else:\n s = {c: [] for c in header_comps}\n\n index = -1\n if key_column_name:\n if not key_column_name in header_comps: raise ReportableException(\"Did not find key colum name\")\n index = header_comps.index(key_column_name)\n\n header_count = {k:header_comps.count(k) for k in header_comps}\n if len(header_count) < len(header_comps):\n duplicated = [k for k,v in header_count.items() if v>1]\n logging.info(\"The input GWAS has duplicated columns: %s, will only use the first one in each case\", str(duplicated))\n\n if handle_empty_columns:\n split_r = re.compile(separator) if separator is not None else re.compile(\"\\s\")\n\n for i,line in enumerate(file):\n if gz_: line = line.decode()\n if handle_empty_columns:\n line = line.replace(\"\\n\", \"\")\n comps = split_r.split(line)\n else:\n comps = line.strip().split(separator)\n\n if sanitize:\n comps = [sanitize_component(x) for x in comps]\n\n #Yeah, there are those kinds of files\n if not len(comps) == len(header_comps):\n logging.log(8, \"Found line with less components than headers, line %i\", i)\n continue\n\n if keys and not comps[index] in keys:\n continue\n\n if additional_filter and additional_filter(comps):\n continue\n\n # Load only the first column if in presence of duplicated columns. Yuck!\n sentinel=set()\n for i,c in enumerate(comps):\n comp = header_comps[i]\n if columns and not comp in columns: continue\n if comp in sentinel: continue\n sentinel.add(comp)\n s[comp].append(c)\n\n\n for c in header_comps:\n if columns and not c in columns: continue\n s[c] = numpy.array(pandas.to_numeric(s[c], errors='ignore'))\n\n return pandas.DataFrame(s)\n\nnon_en_number = re.compile(\"^[-\\+]?[0-9]*,{1}[0-9]+([eE]{1}[-\\+]?[0-9]+)?$\")\ndef sanitize_component(c):\n if non_en_number.match(c): c = c.replace(\",\",\".\")\n if c == \"\": c = None\n elif c == \"NA\": c = None\n elif c == \".\": c = None\n elif c == \"\\\\N\": c = None\n elif c == \"\": c= None\n elif c == \"-nan\": c = None\n\n return c\n\ndef sanitize_components(comps):\n return [sanitize_component(x) for x in comps]\n\ndef to_numeric(d, column):\n if column in d:\n a = [sanitize_component(x) for x in d[column]]\n d[column] = numpy.array(a, dtype=numpy.float64)\n\n#TODO: keep the file object open. Look into pandas code.\nclass TextDataFrameSink(DataSink.DataFrameSink):\n def __init__(self, path, write_header=True):\n self.path = path\n self.write_header = write_header\n self._wrote_header = False\n self.compression = Utilities._compression(path)\n\n def sink(self, d):\n header = self.write_header and not self._wrote_header\n mode = \"w\" if header else \"a\"\n d.to_csv(self.path, mode=mode, header=header, compression=self.compression, sep=\"\\t\", index=False)\n if not self._wrote_header and self.write_header:\n self._wrote_header = True\n\n def __enter__(self):\n self.initialize()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.finalize()\n\n def initialize(self):\n pass\n\n def finalize(self):\n pass\n\nclass TextDataSink(DataSink.DataSink):\n def __init__(self, path, header):\n self.path = path\n self.header = header\n self.file = None\n\n def initialize(self):\n self.file = gzip.open(self.path, \"w\")\n self.sink(self.header)\n\n def finalize(self):\n self.file.close()\n\n def sink(self, d):\n for _d in d:\n l = \"{}\\n\".format(\"\\t\".join(_d)).encode()\n self.file.write(l)\n\n def __enter__(self):\n self.initialize()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.finalize()" ]
[ [ "numpy.isnan", "pandas.read_table", "pandas.concat" ], [ "pandas.read_table", "numpy.array", "pandas.to_numeric", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
KiLJ4EdeN/DeepCOVID
[ "3df626f09658c4c0d8f91a00886d9ab04106154b" ]
[ "create_dataset.py" ]
[ "# for this to work download the dataset from the provided link.\n# then cd in the Images_Processed directory.\n\nimport os\nimport numpy as np\nimport cv2\nfrom scipy.io import savemat\n\nC = np.ones((349,))\nN = np.zeros((397,))\nlabels = np.concatenate((C, N), axis=0)\ncovid = os.listdir('CT_COVID')\nn_covid = os.listdir('CT_NonCOVID')\ndata=[]\nfor img_path in covid:\n img = cv2.imread('CT_COVID/'+img_path, cv2.IMREAD_COLOR)\n data.append(cv2.resize(img, (224, 224)))\n \n\nfor img_path in n_covid:\n img = cv2.imread('CT_NonCOVID/'+img_path, cv2.IMREAD_COLOR)\n data.append(cv2.resize(img, (224, 224)))\n \ndata = np.array(data)/255.\nprint(data.shape)\nprint(labels.shape)\n\nsavemat('images.mat', {'data': data,\n 'labels': labels})\n" ]
[ [ "numpy.ones", "numpy.concatenate", "scipy.io.savemat", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
etalab-ia/pseudo_conseil_etat
[ "c2d8be0289049fe29c3cf5179415a8452605c22e" ]
[ "src/results/conll_evaluate_results.py" ]
[ "'''\nEvaluates the performance of a CoNLL format annotated file. Also shows the errors that were found in the file.\nThe file should have three columns (token, true tag, predicted tag).\n\nUsage:\n conll_evaluate_results.py <conll_file_path> <output_results_path> [options]\n\nArguments:\n <conll_file_path> Annotated CoNLL file using the model\n <output_results_path> Output text fiile where to save the analysis\n\n --window=<p> If equal to \"single\" print the single tokens that were misclassified. [default: single]\n If it is an int, show the previous and following n tokens around the error.\n --type_error=<n> What type of errors to show. For ex., \"B-PER,O\" will show the errors when\n the true label was B-PER but the predicted label is O (default: None)\n'''\n\nimport pandas as pd\nfrom argopt import argopt\n\nfrom seqeval.metrics import classification_report, f1_score\n\nfrom src.results.confusion_matrix_pretty_print import print_confusion_matrix\n\n\ndef print_results(y_true, y_pred):\n classif_report = classification_report(y_true, y_pred)\n print(classif_report)\n\n fscore = f1_score(y_true, y_pred)\n print(f\"F-score (micro): {fscore:.2f}\")\n fscore_str = f\"F-score (micro): {fscore:.2f}\"\n\n labels = list(set(y_true))\n labels.pop(labels.index(\"O\"))\n labels = sorted(labels, key=lambda x: (x[2:], x[0])) + [\"O\"]\n\n cm = print_confusion_matrix(y_true=y_true, y_pred=y_pred,\n labels=labels,\n return_string=True)\n print(cm)\n\n return classif_report, fscore_str, cm\n\n\ndef print_errors(results_df: pd.DataFrame, type_error=None, window=\"single\", return_string=False):\n \"\"\"\n Show the errors found in the read CoNLL file\n :param results_df: Input CoNLL file to test\n :param type_error: Dict containing the types of errors to show: ex.: {\"true\": \"B-PER_NOM\", \"pred\": \"O\"}.\n Show all the errors by default\n :param window: If \"single\", show the single misclassified token, if an int, show the previous and next n tokens\n :return_string: If True, print AND return a string with the results\n :return:\n \"\"\"\n from io import StringIO\n import sys\n\n errors_string = StringIO()\n old_stdout = sys.stdout\n if return_string:\n errors_string = StringIO()\n sys.stdout = errors_string\n\n results_df = results_df.fillna(\"\")\n results_df.index = range(1, len(results_df) + 1)\n if type_error:\n errors_idx = results_df[(results_df[\"true_tag\"] == type_error[\"true\"]) &\n (results_df[\"pred_tag\"] == type_error[\"pred\"])].index\n\n else:\n errors_idx = results_df[results_df[\"pred_tag\"] != results_df[\"true_tag\"]].index\n\n if window == \"single\":\n final_df = results_df.loc[errors_idx]\n print(final_df.to_string())\n elif isinstance(window, int):\n lower_bound, upper_bound = (-1, -1)\n for idx in errors_idx:\n if lower_bound < idx < upper_bound:\n continue\n lower_bound = max(0, idx - window)\n upper_bound = min(errors_idx.max(), idx + window)\n window_df = results_df.loc[lower_bound:upper_bound, :]\n print(f\"Line {idx} of the CoNLL file:\", end=\"\\n\\t\")\n print(window_df, end=\"\\n\\n\")\n\n if return_string:\n sys.stdout = old_stdout\n return errors_string.getvalue()\n\n\ndef main(conll_file_path, output_results_path, type_error, window):\n # Load conll file\n results_df = pd.read_csv(conll_file_path, delim_whitespace=True, names=[\"token\", \"true_tag\", \"pred_tag\"],\n skip_blank_lines=False)\n y_true = results_df[\"true_tag\"].dropna().values.tolist()\n y_pred = results_df[\"pred_tag\"].dropna().values.tolist()\n results = print_results(y_true=y_true, y_pred=y_pred)\n print()\n errors = print_errors(results_df=results_df, type_error=type_error, window=window, return_string=True)\n print(errors)\n results_errors = list(results) + [errors]\n\n with open(output_results_path, \"w\") as outo:\n for info in results_errors:\n outo.write(str(info))\n outo.write(\"\\n\\n\")\n\n\nif __name__ == '__main__':\n parser = argopt(__doc__).parse_args()\n conll_file_path = parser.conll_file_path\n output_results_path = parser.output_results_path\n window = parser.window\n if window.isdigit():\n window = int(window)\n\n if parser.type_error:\n type_error = parser.type_error.split(\",\")\n type_error = {\"true\": type_error[0], \"pred\": type_error[1]}\n else:\n type_error = parser.type_error\n\n main(conll_file_path=conll_file_path,\n output_results_path=output_results_path, type_error=type_error,\n window=window)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
NervanaSystems/ngraph-python
[ "5e5c9bb9f24d95aee190b914dd2d44122fc3be53", "5e5c9bb9f24d95aee190b914dd2d44122fc3be53" ]
[ "examples/deepspeech/data.py", "examples/video-c3d/video_c3d.py" ]
[ "from __future__ import division\nimport numpy as np\nfrom ngraph.frontends.neon.aeon_shim import AeonDataLoader\nfrom ngraph.util.persist import get_data_cache_or_nothing\n\n\ndef make_aeon_dataloader(manifest_filename, audio_length, transcript_length,\n sample_freq_hz=16000, frame_length=.025, frame_stride=.01,\n feature_type='mfsc', num_filters=13,\n alphabet=\"_'ABCDEFGHIJKLMNOPQRSTUVWXYZ \",\n batch_size=32, cache_root=None, num_batches=None,\n single_iteration=False, seed=None):\n\n \"\"\"\n Creates a custom dataloader for speech transcription.\n\n Arguments:\n manifest_filename (str): Path to manifest file\n audio_length (float): Length of longest audio clip (seconds)\n transcript_length (int): Length of longest transcription\n sample_freq_hz (int): Sample rate of audio files (hertz)\n frame_length (float): Length of window for spectrogram calculation (seconds)\n frame_stride (float): Stride for spectrogram calculation (seconds)\n feature_type (str): Feature space for audio\n num_filters (int): Number of mel-frequency bands\n alphabet (str): Alphabet for the character map\n batch_size (int): Size of a single batch\n cache_root (str): Path to dataloader cache directory\n num_batches (int): Number of batches to load. Defaults to infinite\n single_iteration (bool): Sets \"iteration_mode\" to \"ONCE\"\n seed (int): Random seed for dataloader. Also turns off shuffling.\n \"\"\"\n\n if cache_root is None:\n cache_root = get_data_cache_or_nothing('deepspeech2-cache/')\n\n feats_config = dict(type=\"audio\",\n sample_freq_hz=sample_freq_hz,\n max_duration=\"{} seconds\".format(audio_length),\n frame_length=\"{} seconds\".format(frame_length),\n frame_stride=\"{} seconds\".format(frame_stride),\n feature_type=feature_type,\n num_filters=num_filters,\n emit_length=True)\n\n # Transcript transformation parameters\n transcripts_config = dict(type=\"char_map\",\n alphabet=alphabet,\n max_length=transcript_length,\n emit_length=True)\n\n config = {'manifest_filename': manifest_filename,\n 'batch_size': batch_size,\n 'etl': [feats_config, transcripts_config],\n 'cache_directory': cache_root}\n\n if seed is not None:\n config[\"shuffle_enable\"] = False\n config[\"shuffle_manifest\"] = False\n config[\"random_seed\"] = seed\n\n if num_batches is not None:\n config[\"iteration_mode\"] = \"COUNT\"\n config[\"iteration_mode_count\"] = num_batches\n elif single_iteration is True:\n config[\"iteration_mode\"] = \"ONCE\"\n\n return SpeechTranscriptionLoader(config)\n\n\nclass SpeechTranscriptionLoader(AeonDataLoader):\n \"\"\"custom dataloader for speech transcription.\"\"\"\n\n def __init__(self, config, *args, **kwargs):\n\n config_types = [etl[\"type\"] for etl in config[\"etl\"]]\n for etl_type in (\"audio\", \"char_map\"):\n if etl_type not in config_types:\n raise ValueError(\"SpeechTranscriptionLoader must have an etl configuration \"\n \"with type '{}'\".format(etl_type))\n super(SpeechTranscriptionLoader, self).__init__(config, *args, **kwargs)\n\n audio_config = config[\"etl\"][config_types.index(\"audio\")]\n self.sample_rate = audio_config[\"sample_freq_hz\"]\n self.duration = float(audio_config[\"max_duration\"].split(\" \")[0])\n\n def __next__(self):\n\n sample = super(SpeechTranscriptionLoader, self).__next__()\n return self._preprocess(sample)\n\n def _preprocess(self, sample):\n \"\"\"\n Preprocess samples to pack char_map for ctc, ensure dtypes,\n and convert audio length to percent of max.\n\n Arguments:\n sample (dict): A single sample dictionary with keys of\n \"audio\", \"audio_length\", \"char_map\", and\n \"char_map_length\"\n \"\"\"\n\n max_samples = self.sample_rate * self.duration\n\n def pack_for_ctc(arr, trans_lens):\n\n packed = np.zeros(np.prod(arr.shape), dtype=arr.dtype)\n start = 0\n for ii, trans_len in enumerate(trans_lens):\n packed[start: start + trans_len] = arr[ii, 0, :trans_len]\n start += trans_len\n\n return np.reshape(packed, arr.shape)\n\n sample[\"audio_length\"] = 100 * sample[\"audio_length\"].astype(\"float32\") / max_samples\n sample[\"audio_length\"] = np.clip(sample[\"audio_length\"], 0, 100).astype(np.int32)\n sample[\"char_map\"] = pack_for_ctc(sample[\"char_map\"],\n sample[\"char_map_length\"].ravel()).astype(np.int32)\n sample[\"char_map_length\"] = sample[\"char_map_length\"].astype(np.int32)\n sample[\"audio\"] = sample[\"audio\"].astype(np.float32)\n\n return sample\n", "#!/usr/bin/env python\n# ******************************************************************************\n# Copyright 2017-2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ******************************************************************************\n\"\"\"\nVideo-C3D model. Activity classification example from UCF-101 data set.\nusage: python video_c3d.py -b gpu\n\"\"\"\nimport os\nimport numpy as np\nimport ngraph as ng\n\nfrom ngraph.frontends.neon import GaussianInit, ConstantInit\nfrom ngraph.frontends.neon import Layer, Affine, Convolution, Pooling, Sequential\nfrom ngraph.frontends.neon import Softmax, Rectlin, Dropout, GradientDescentMomentum\nfrom ngraph.frontends.neon import ax, make_bound_computation\nfrom ngraph.frontends.neon import NgraphArgparser\n\nimport ngraph.transformers as ngt\nfrom tqdm import tqdm, trange\nfrom contextlib import closing\nfrom data import make_train_loader, make_validation_loader\nfrom plot import plot_logs\nimport pickle\n\n# TODO Issue raised to have the strides default to the size of the pooling layer\n# change the strides after issue is resolved\n# https://github.com/NervanaSystems/private-ngraph/issues/2309\n# TODO Data loader needs fixing to remove the .reset() calls on the data iterators\n# TODO Data loader needs to convert data into dictionary\n\n\ndef create_network():\n '''\n Define 3D convolutional network\n '''\n\n # Define for weight initialization\n g1 = GaussianInit(mean=0., var=0.01)\n g5 = GaussianInit(mean=0., var=0.005)\n c0 = ConstantInit(val=0.)\n c1 = ConstantInit(val=1.)\n ax.Y.length = 101\n\n padding = {'D': 1, 'H': 1, 'W': 1, 'C': 0}\n strides = {'D': 2, 'H': 2, 'W': 2, 'C': 1}\n\n layers = [\n Convolution((3, 3, 3, 64), padding=padding, filter_init=g1, bias_init=c0,\n activation=Rectlin()),\n Pooling((1, 2, 2), strides={'D': 1, 'H': 2, 'W': 2, 'C': 1}),\n Convolution((3, 3, 3, 128), padding=padding, filter_init=g1, bias_init=c1,\n activation=Rectlin()),\n Pooling((2, 2, 2), strides=strides),\n Convolution((3, 3, 3, 256), padding=padding, filter_init=g1, bias_init=c1,\n activation=Rectlin()),\n Pooling((2, 2, 2), strides=strides),\n Convolution((3, 3, 3, 256), padding=padding, filter_init=g1, bias_init=c1,\n activation=Rectlin()),\n Pooling((2, 2, 2), strides=strides),\n Convolution((3, 3, 3, 256), padding=padding, filter_init=g1, bias_init=c1,\n activation=Rectlin()),\n Pooling((2, 2, 2), strides=strides),\n Affine(nout=2048, weight_init=g5, bias_init=c1, activation=Rectlin()),\n Dropout(keep=0.5),\n Affine(nout=2048, weight_init=g5, bias_init=c1, activation=Rectlin()),\n Dropout(keep=0.5),\n Affine(axes=ax.Y, weight_init=g1, bias_init=c0, activation=Softmax())\n ]\n\n return Sequential(layers)\n\n\ndef train_network(model, train_set, valid_set, batch_size, epochs, log_file):\n '''\n Trains the predefined network. Trains the model and saves the progress in\n the log file that is defined in the arguments\n\n model(object): Defines the model in Neon\n train_set(object): Defines the training set\n valid_set(object): Defines the validation set\n args(object): Training arguments\n batch_size(int): Minibatch size\n epochs(int): Number of training epoch\n log_file(string): File name to store trainig logs for plotting\n\n '''\n\n # Form placeholders for inputs to the network\n # Iterations needed for learning rate schedule\n inputs = train_set.make_placeholders(include_iteration=True)\n\n # Convert labels into one-hot vectors\n one_hot_label = ng.one_hot(inputs['label'], axis=ax.Y)\n\n learning_rate_policy = {'name': 'schedule',\n 'schedule': list(np.arange(2, epochs, 2)),\n 'gamma': 0.6,\n 'base_lr': 0.001}\n\n optimizer = GradientDescentMomentum(\n learning_rate=learning_rate_policy,\n momentum_coef=0.9,\n wdecay=0.005,\n iteration=inputs['iteration'])\n\n # Define graph for training\n train_prob = model(inputs['video'])\n train_loss = ng.cross_entropy_multi(train_prob, one_hot_label)\n batch_cost = ng.sequential([optimizer(train_loss), ng.mean(train_loss, out_axes=())])\n\n with closing(ngt.make_transformer()) as transformer:\n\n # Define graph for calculating validation set error and misclassification rate\n # Use inference mode for validation to avoid dropout in forward pass\n with Layer.inference_mode_on():\n inference_prob = model(inputs['video'])\n errors = ng.not_equal(ng.argmax(inference_prob), inputs['label'])\n eval_loss = ng.cross_entropy_multi(inference_prob, one_hot_label)\n eval_outputs = {'cross_ent_loss': eval_loss, 'misclass': errors}\n\n eval_computation = make_bound_computation(transformer, eval_outputs, inputs)\n\n train_outputs = {'batch_cost': batch_cost}\n train_computation = make_bound_computation(transformer, train_outputs, inputs)\n\n interval_cost = 0.0\n\n # Train in epochs\n logs = {'train': [], 'validation': [], 'misclass': []}\n for epoch in trange(epochs, desc='Epochs'):\n\n # Setup the training bar\n numBatches = train_set.ndata // batch_size\n tpbar = tqdm(unit='batches', ncols=100, total=numBatches, leave=False)\n\n train_set.reset()\n valid_set.reset()\n\n train_log = []\n for step, data in enumerate(train_set):\n data = dict(data)\n data['iteration'] = epoch # learning schedule based on epochs\n output = train_computation(data)\n train_log.append(float(output['batch_cost']))\n\n tpbar.update(1)\n tpbar.set_description(\"Training {:0.4f}\".format(float(output['batch_cost'])))\n interval_cost += float(output['batch_cost'])\n tqdm.write(\"Epoch {epch} complete. \"\n \"Avg Train Cost {cost:0.4f}\".format(\n epch=epoch,\n cost=interval_cost / step))\n interval_cost = 0.0\n tpbar.close()\n validation_loss = run_validation(valid_set, eval_computation)\n tqdm.write(\"Avg losses: {}\".format(validation_loss))\n logs['train'].append(train_log)\n logs['validation'].append(validation_loss['cross_ent_loss'])\n logs['misclass'].append(validation_loss['misclass'])\n\n # Save log data and plot at the end of each epoch\n with open(log_file, 'wb') as f:\n pickle.dump(logs, f)\n plot_logs(logs=logs)\n\n\ndef run_validation(dataset, computation):\n '''\n Computes the validation error and missclassification rate\n Helper function that is called from the main traning function\n\n dataset(object): Contains the validation dataset object\n computation(object): Validation function\n metric_names(dict): Names of the metrics calculated by the computation\n inputs(object): Placeholders for inputs\n '''\n\n dataset.reset()\n all_results = None\n for i, data in enumerate(dataset):\n data = dict(data)\n data['iteration'] = i\n results = computation(data)\n if all_results is None:\n all_results = {k: list(v) for k, v in results.items()}\n else:\n for k, v in results.items():\n all_results[k].extend(list(v))\n\n reduced_results = {k: np.mean(v[:dataset.ndata]) for k, v in all_results.items()}\n\n return reduced_results\n\n\ndef get_data(manifest, manifest_root, batch_size, subset_pct, rng_seed):\n '''\n Loads training and validation set using aeon loader\n\n args(object): Contains function arguments\n manifest(list): Manifest files for traning and validaions\n manifest_root(string): Root directory of manifest file\n batch_size(int): Mini batch size\n subset_pct(float): Subset percentage of the data (0-100)\n rng_seed(int): Seed for random number generator\n '''\n\n assert 'train' in manifest[1], \"Missing train manifest\"\n assert 'test' in manifest[0], \"Missing validation manifest\"\n\n train_set = make_train_loader(manifest[1], manifest_root, batch_size,\n subset_pct, rng_seed)\n valid_set = make_validation_loader(manifest[0], manifest_root, batch_size,\n subset_pct)\n\n return train_set, valid_set\n\n\nif __name__ == \"__main__\":\n\n # Load training configuration and parse arguments\n train_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train.cfg')\n config_files = [train_config] if os.path.exists(train_config) else []\n parser = NgraphArgparser(__doc__, default_config_files=config_files)\n\n parser.add_argument('--subset_pct', type=float, default=100,\n help='subset of training dataset to use (percentage)')\n parser.add_argument('--log_file', type=str, default='training_log.pkl',\n help='name for the trainig log file')\n args = parser.parse_args()\n\n np.random.seed = args.rng_seed\n\n # Load data\n train_set, valid_set = get_data(args.manifest, args.manifest_root, args.batch_size,\n args.subset_pct, args.rng_seed)\n\n # Define model and train\n model = create_network()\n train_network(model, train_set, valid_set, args.batch_size, args.epochs, args.log_file)\n" ]
[ [ "numpy.reshape", "numpy.prod", "numpy.clip" ], [ "numpy.arange", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dirkcgrunwald/SAM
[ "0478925c506ad38fd405954cc4415a3e96e77d90" ]
[ "scripts/find_best.py" ]
[ "import numpy as np\nimport argparse\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import metrics\nfrom sklearn.metrics import (precision_recall_curve, average_precision_score,\n roc_curve, roc_auc_score)\n \nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import svm\nimport sklearn\nimport matplotlib.pyplot as plt\nimport pandas\nimport math\nimport operator\n\ndef load_dataset(inputfile):\n with open(inputfile, \"r\") as infile:\n data = np.loadtxt(infile, delimiter=\",\")\n\n y = data[:, 0] #labels are in the first column\n X = data[:, 1:] #features are in the rest of the columns\n\n numNonZero = np.count_nonzero(y)\n numFound = 0\n i = 0\n while numFound < numNonZero/2:\n if y[i] == 1:\n numFound += 1\n i += 1\n \n i -= 1\n y1 = y[0: i]\n y2 = y[i:]\n X1 = X[0:i]\n X2 = X[i :]\n\n return X1, X2, y1, y2\n\ndef train(trainx, trainy, testx, testy, selected, feature):\n #print(\"trainx.shape\", trainx.shape, \"trainy.shape\", trainy.shape)\n #print(list(selected))\n feature_list = list(selected)\n feature_list.append(feature)\n print(\"feature_list\", feature_list)\n trainx = trainx[:, feature_list]\n testx = testx[:, feature_list]\n\n clf = RandomForestClassifier()\n print(\"trainx.shape\", trainx.shape, \"trainy.shape\", trainy.shape)\n clf.fit(trainx, trainy)\n test_scores = clf.predict_proba(testx)[:,1]\n auc = roc_auc_score(testy, test_scores)\n return auc\n\ndef find_best(selected, available, X1s, X2s, y1s, y2s):\n \n best = 0\n best_feature = -1\n\n for feature in available:\n aucs = []\n for i in range(len(X1s)):\n auc1 = train(X1s[i], y1s[i], X2s[i], y2s[i], selected, feature)\n print(\"auc1\", auc1)\n auc2 = train(X2s[i], y2s[i], X1s[i], y1s[i], selected, feature)\n print(\"auc2\", auc2)\n aucs.append(auc1)\n aucs.append(auc2)\n average = sum(aucs) / len(aucs)\n print (\"average\", average)\n if average > best:\n print(\"updating best\")\n best = average\n best_feature = feature \n\n return best_feature, best \n\ndef main():\n\n parser = argparse.ArgumentParser(\"You specify a directory for where\" +\n \" the CTU dataset is located.\")\n parser.add_argument('--dir', type=str, required=True,\n help=\"The directory where the CTU stuff is.\")\n parser.add_argument('--num_ctu', type=int, default=13,\n help=\"The number of CTU directories to use.\")\n parser.add_argument('--num_features', type=int, default=28,\n help=\"The number of features to explore.\")\n \n FLAGS = parser.parse_args()\n X1s = []\n X2s = []\n y1s = []\n y2s = []\n for i in range(FLAGS.num_ctu):\n filename = FLAGS.dir + \"/\" + str(i + 1) + \"/simple_features_dest_src.csv\"\n print (\"Loading \" + filename )\n X1, X2, y1, y2 = load_dataset(filename)\n \n X1s.append(X1)\n X2s.append(X2)\n y1s.append(y1)\n y2s.append(y2)\n\n selected = set()\n available = set()\n aucs = []\n for i in range(FLAGS.num_features):\n available.add(i)\n\n for i in range(FLAGS.num_features):\n best_feature, auc = find_best(selected, available, X1s, X2s, y1s, y2s)\n print (\"Adding feature\", best_feature)\n print (\"AUC \", auc)\n \n selected.add(best_feature)\n available.remove(best_feature)\n\nmain()\n" ]
[ [ "numpy.count_nonzero", "sklearn.metrics.roc_auc_score", "numpy.loadtxt", "sklearn.ensemble.RandomForestClassifier" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
youngsjjn/SFNet
[ "7de986398992aa2c8d3d50474b04c4c48235e075" ]
[ "util/dataset.py" ]
[ "import os\nimport os.path\nimport cv2\nimport numpy as np\n\nfrom torch.utils.data import Dataset\nfrom network.datasets import edge_utils as edge_utils\nimport torch\n\n\nIMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']\n\n\ndef is_image_file(filename):\n filename_lower = filename.lower()\n return any(filename_lower.endswith(extension) for extension in IMG_EXTENSIONS)\n\n\ndef make_dataset(split='train', data_root=None, data_list=None):\n assert split in ['train', 'val', 'test']\n if not os.path.isfile(data_list):\n raise (RuntimeError(\"Image list file do not exist: \" + data_list + \"\\n\"))\n image_label_list = []\n list_read = open(data_list).readlines()\n print(\"Totally {} samples in {} set.\".format(len(list_read), split))\n print(\"Starting Checking image&label pair {} list...\".format(split))\n for line in list_read:\n line = line.strip()\n line_split = line.split(' ')\n if split == 'test':\n if len(line_split) != 1:\n raise (RuntimeError(\"Image list file read line error : \" + line + \"\\n\"))\n image_name = os.path.join(data_root, line_split[0])\n label_name = image_name # just set place holder for label_name, not for use\n else:\n if len(line_split) != 2:\n raise (RuntimeError(\"Image list file read line error : \" + line + \"\\n\"))\n image_name = os.path.join(data_root, line_split[0])\n label_name = os.path.join(data_root, line_split[1])\n '''\n following check costs some time\n if is_image_file(image_name) and is_image_file(label_name) and os.path.isfile(image_name) and os.path.isfile(label_name):\n item = (image_name, label_name)\n image_label_list.append(item)\n else:\n raise (RuntimeError(\"Image list file line error : \" + line + \"\\n\"))\n '''\n item = (image_name, label_name)\n image_label_list.append(item)\n print(\"Checking image&label pair {} list done!\".format(split))\n return image_label_list\n\n\nclass SemData(Dataset):\n def __init__(self, split='train', data_root=None, data_list=None, transform=None):\n self.split = split\n self.data_list = make_dataset(split, data_root, data_list)\n self.transform = transform\n self.data_root = data_root\n\n def __len__(self):\n return len(self.data_list)\n\n def __getitem__(self, index):\n image_path, label_path = self.data_list[index] # data_root remove !!!!! in line 64 68\n image = cv2.imread(image_path, cv2.IMREAD_COLOR) # BGR 3 channel ndarray wiht shape H * W * 3\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert cv2 read image from BGR order to RGB order\n image = np.float32(image)\n\n label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE) # GRAY 1 channel ndarray with shape H * W\n if 'ade' in self.data_root:\n label = label - 1\n elif 'pascal' in self.data_root:\n label = label - 1\n\n if image.shape[0] != label.shape[0] or image.shape[1] != label.shape[1]:\n raise (RuntimeError(\"Image & label shape mismatch: \" + image_path + \" \" + label_path + \"\\n\"))\n if self.transform is not None:\n image, label = self.transform(image, label)\n\n return image, label\n" ]
[ [ "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neuro-ml/deep_pipe
[ "fa559630c20548105884151e973dc40c88531891", "fa559630c20548105884151e973dc40c88531891" ]
[ "dpipe/im/tests/test_preprocessing.py", "dpipe/dataset/wrappers.py" ]
[ "import unittest\nimport numpy as np\n\nfrom dpipe.im.preprocessing import *\n\n\nclass TestPrep(unittest.TestCase):\n def setUp(self):\n self.x = np.random.rand(3, 10, 10) * 2 + 3\n\n def test_normalize_image(self):\n x = normalize(self.x)\n np.testing.assert_almost_equal(0, x.mean())\n np.testing.assert_almost_equal(1, x.std())\n\n x = normalize(self.x, mean=False)\n np.testing.assert_almost_equal(1, x.std())\n\n x = normalize(self.x, std=False)\n np.testing.assert_almost_equal(0, x.mean())\n\n y = np.array([-100, 1, 2, 1000])\n x = normalize(y, percentiles=25)\n np.testing.assert_equal(x, (y - 1.5) * 2)\n np.testing.assert_equal(\n normalize(y, percentiles=25),\n normalize(y, percentiles=[25, 75]),\n )\n\n def test_normalize_multichannel_image(self):\n x = normalize(self.x, axis=0)\n np.testing.assert_almost_equal(0, x.mean(axis=(1, 2)))\n np.testing.assert_almost_equal(1, x.std(axis=(1, 2)))\n", "\"\"\"\nWrappers change the dataset's behaviour.\nSee the :doc:`tutorials/wrappers` tutorial for more details.\n\"\"\"\nimport functools\nfrom itertools import chain\nfrom types import MethodType, FunctionType\nfrom typing import Sequence, Callable, Iterable\nfrom collections import ChainMap, namedtuple\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom dpipe.checks import join\nfrom dpipe.io import save_numpy, PathLike, load_or_create, load_numpy\nfrom dpipe.itertools import zdict, collect\nfrom dpipe.im.preprocessing import normalize\nfrom .base import Dataset\n\n\nclass Proxy:\n \"\"\"Base class for all wrappers.\"\"\"\n\n def __init__(self, shadowed):\n self._shadowed = shadowed\n\n def __getattr__(self, name):\n return getattr(self._shadowed, name)\n\n def __dir__(self):\n return list(set(super().__dir__()) | set(dir(self._shadowed)))\n\n\n@collect\ndef _get_public_methods(instance):\n for attr in dir(instance):\n if not attr.startswith('_') and isinstance(getattr(instance, attr), (MethodType, FunctionType)):\n yield attr\n\n\ndef cache_methods(instance, methods: Iterable[str] = None, maxsize: int = None):\n \"\"\"Cache the ``instance``'s ``methods``. If ``methods`` is None, all public methods will be cached.\"\"\"\n if methods is None:\n methods = _get_public_methods(instance)\n\n cache = functools.lru_cache(maxsize)\n new_methods = {method: staticmethod(cache(getattr(instance, method))) for method in methods}\n proxy = type('Cached', (Proxy,), new_methods)\n return proxy(instance)\n\n\ndef cache_methods_to_disk(instance, base_path: PathLike, loader: Callable = load_numpy, saver: Callable = save_numpy,\n **methods: str):\n \"\"\"\n Cache the ``instance``'s ``methods`` to disk.\n\n Parameters\n ----------\n instance\n arbitrary object\n base_path: str\n the path, all other paths of ``methods`` relative to.\n methods: str\n each keyword argument has the form ``method_name=path_to_cache``.\n The methods are assumed to take a single argument of type ``str``.\n loader\n loads a single object given its path.\n saver: Callable(value, path)\n saves a single object to the given path.\n \"\"\"\n base_path = Path(base_path)\n\n def decorator(method, folder):\n method = getattr(instance, method)\n path = base_path / folder\n path.mkdir(parents=True, exist_ok=True)\n\n @functools.wraps(method)\n def wrapper(identifier, *args, **kwargs):\n return load_or_create(\n path / f'{identifier}.npy', method, identifier, *args, **kwargs, save=saver, load=loader)\n\n return staticmethod(wrapper)\n\n new_methods = {method: decorator(method, folder) for method, folder in methods.items()}\n proxy = type('CachedToDisk', (Proxy,), new_methods)\n return proxy(instance)\n\n\ndef apply(instance, **methods: Callable):\n \"\"\"\n Applies a given function to the output of a given method.\n\n Parameters\n ----------\n instance\n arbitrary object\n methods: Callable\n each keyword argument has the form ``method_name=func_to_apply``.\n ``func_to_apply`` is applied to the ``method_name`` method.\n\n Examples\n --------\n >>> # normalize will be applied to the output of load_image\n >>> dataset = apply(base_dataset, load_image=normalize)\n \"\"\"\n\n def decorator(method, func):\n @functools.wraps(method)\n def wrapper(*args, **kwargs):\n return func(method(*args, **kwargs))\n\n return staticmethod(wrapper)\n\n new_methods = {method: decorator(getattr(instance, method), func) for method, func in methods.items()}\n proxy = type('Apply', (Proxy,), new_methods)\n return proxy(instance)\n\n\ndef set_attributes(instance, **attributes):\n \"\"\"\n Sets or overwrites attributes with those provided as keyword arguments.\n\n Parameters\n ----------\n instance\n arbitrary object\n attributes\n each keyword argument has the form ``attr_name=attr_value``.\n \"\"\"\n proxy = type('SetAttr', (Proxy,), attributes)\n return proxy(instance)\n\n\ndef change_ids(dataset: Dataset, change_id: Callable, methods: Iterable[str] = None) -> Dataset:\n \"\"\"\n Change the ``dataset``'s ids according to the ``change_id`` function and adapt the provided ``methods``\n to work with the new ids.\n\n Parameters\n ----------\n dataset: Dataset\n the dataset to perform ids changing on.\n change_id: Callable(str) -> str\n the method which allows change ids. Output ids should be unique as well as old ids.\n methods: Iterable[str]\n the list of methods to be adapted. Each method takes a single argument - the identifier.\n \"\"\"\n if methods is None:\n methods = _get_public_methods(dataset)\n\n assert 'ids' not in methods\n ids = tuple(map(change_id, dataset.ids))\n if len(set(ids)) != len(ids):\n raise ValueError('The resulting ids are not unique.')\n new_to_old = zdict(ids, dataset.ids)\n\n def decorator(method):\n @functools.wraps(method)\n def wrapper(identifier):\n return method(new_to_old[identifier])\n\n return staticmethod(wrapper)\n\n attributes = {method: decorator(getattr(dataset, method)) for method in methods}\n attributes['ids'] = ids\n proxy = type('ChangedID', (Proxy,), attributes)\n return proxy(dataset)\n\n\ndef merge(*datasets: Dataset, methods: Sequence[str] = None, attributes: Sequence[str] = ()) -> Dataset:\n \"\"\"\n Merge several ``datasets`` into one by preserving the provided ``methods`` and ``attributes``.\n\n Parameters\n ----------\n datasets: Dataset\n sequence of datasets.\n methods: Sequence[str], None, optional\n the list of methods to be preserved. Each method should take an identifier as its first argument.\n If ``None``, all the common methods will be preserved.\n attributes: Sequence[str]\n the list of attributes to be preserved. For each dataset their values should be the same.\n Default is the empty sequence ``()``.\n \"\"\"\n if methods is None:\n methods = set(_get_public_methods(datasets[0]))\n for dataset in datasets:\n methods = methods & set(_get_public_methods(dataset))\n\n clash = set(methods) & set(attributes)\n if clash:\n raise ValueError(f'Method names clash with attribute names: {join(clash)}.')\n ids = tuple(id_ for dataset in datasets for id_ in dataset.ids)\n if len(set(ids)) != len(ids):\n raise ValueError('The ids are not unique.')\n\n preserved_attributes = []\n for attribute in attributes:\n # can't use a set here, because not all attributes can be hashed\n values = []\n for dataset in datasets:\n value = getattr(dataset, attribute)\n if value not in values:\n values.append(value)\n\n if len(values) != 1:\n raise ValueError(f'Datasets have different values of attribute \"{attribute}\".')\n\n preserved_attributes.append(values[0])\n\n def decorator(method_name):\n def wrapper(identifier, *args, **kwargs):\n if identifier not in id_to_dataset:\n raise KeyError(f\"This dataset doesn't contain the id {identifier}\")\n\n return getattr(id_to_dataset[identifier], method_name)(identifier, *args, **kwargs)\n\n return wrapper\n\n id_to_dataset = ChainMap(*({id_: dataset for id_ in dataset.ids} for dataset in datasets))\n Merged = namedtuple('Merged', chain(['ids'], methods, attributes))\n return Merged(*chain([ids], map(decorator, methods), preserved_attributes))\n\n\ndef apply_mask(dataset: Dataset, mask_modality_id: int = -1, mask_value: int = None) -> Dataset:\n \"\"\"\n Applies the ``mask_modality_id`` modality as the binary mask to the other modalities\n and remove the mask from sequence of modalities.\n\n Parameters\n ----------\n dataset: Dataset\n dataset which is used in the current task.\n mask_modality_id: int\n the index of mask in the sequence of modalities.\n Default is ``-1``, which means the last modality will be used as the mask.\n mask_value: int, None, optional\n the value in the mask to filter other modalities with.\n If ``None``, greater than zero filtering will be applied. Default is ``None``.\n\n Examples\n --------\n >>> modalities = ['flair', 't1', 'brain_mask'] # we are to apply brain mask to other modalities\n >>> target = 'target'\n >>>\n >>> dataset = apply_mask(\n >>> dataset=Wmh2017(\n >>> data_path=data_path,\n >>> modalities=modalities,\n >>> target=target\n >>> ),\n >>> mask_modality_id=-1,\n >>> mask_value=1\n >>> )\n \"\"\"\n\n class MaskedDataset(Proxy):\n def load_image(self, patient_id):\n images = self._shadowed.load_image(patient_id)\n mask = images[mask_modality_id]\n\n mask_bin = mask > 0 if mask_value is None else mask == mask_value\n if not np.sum(mask_bin) > 0:\n raise ValueError('The obtained mask is empty')\n\n images = [image * mask for image in images[:-1]]\n return np.array(images)\n\n @property\n def n_chans_image(self):\n return self._shadowed.n_chans_image - 1\n\n return MaskedDataset(dataset)\n" ]
[ [ "numpy.testing.assert_equal", "numpy.array", "numpy.random.rand" ], [ "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ys3x/deep-learning
[ "39a9191e05e1b7005456cacf6b84e1439f8252ec" ]
[ "first-neural-network/my_answers.py" ]
[ "import numpy as np\n\n\nclass NeuralNetwork(object):\n def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, \n (self.input_nodes, self.hidden_nodes))\n\n self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n \n self.lr = learning_rate\n \n #### TODO: Set self.activation_function to your implemented sigmoid function ####\n #\n # Note: in Python, you can define a function with a lambda expression,\n # as shown below.\n self.activation_function = lambda x : 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.\n \n ### If the lambda code above is not something you're familiar with,\n # You can uncomment out the following three lines and put your \n # implementation there instead.\n #\n #def sigmoid(x):\n # return 0 # Replace 0 with your sigmoid calculation here\n #self.activation_function = sigmoid\n \n\n def train(self, features, targets):\n ''' Train the network on batch of features and targets. \n \n Arguments\n ---------\n \n features: 2D array, each row is one data record, each column is a feature\n targets: 1D array of target values\n \n '''\n n_records = features.shape[0]\n delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)\n delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)\n for X, y in zip(features, targets):\n \n final_outputs, hidden_outputs = self.forward_pass_train(X) # Implement the forward pass function below\n # Implement the backproagation function below\n delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y, \n delta_weights_i_h, delta_weights_h_o)\n self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)\n\n\n def forward_pass_train(self, X):\n ''' Implement forward pass here \n \n Arguments\n ---------\n X: features batch\n\n '''\n #### Implement the forward pass here ####\n ### Forward pass ###\n # TODO: Hidden layer - Replace these values with your calculations.\n hidden_inputs = np.dot(X, self.weights_input_to_hidden)\n hidden_outputs = self.activation_function(hidden_inputs)\n\n # TODO: Output layer - Replace these values with your calculations.\n final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)\n final_outputs = final_inputs\n \n return final_outputs, hidden_outputs\n\n def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):\n ''' Implement backpropagation\n \n Arguments\n ---------\n final_outputs: output from forward pass\n y: target (i.e. label) batch\n delta_weights_i_h: change in weights from input to hidden layers\n delta_weights_h_o: change in weights from hidden to output layers\n\n '''\n #### Implement the backward pass here ####\n ### Backward pass ###\n\n # TODO: Output error - Replace this value with your calculations.\n error = y - final_outputs # (1, )\n\n # TODO: Calculate the hidden layer's contribution to the error\n output_error_term = error # (1, ), gradient is 1 because of identity function. If sigmoid => error * final_outputs * (1 - final_outputs)\n hidden_error = np.dot(self.weights_hidden_to_output, output_error_term) # (n_hidden, 1).(1, ) => (n_hidden, )\n hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs) # (n_hidden, )x(n_hidden, ) => (n_hidden, )\n\n # Weight step (input to hidden)\n delta_weights_i_h += hidden_error_term * X[:, None] # (n_hidden, ) x (n_features, 1) = dWih: (n_features, n_hidden) \n # Weight step (hidden to output)\n delta_weights_h_o += output_error_term * hidden_outputs[:, None] # (1, ) * (n_hidden, 1) = dWho:(n_hidden, 1)\n return delta_weights_i_h, delta_weights_h_o \n\n def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):\n ''' Update weights on gradient descent step\n \n Arguments\n ---------\n delta_weights_i_h: change in weights from input to hidden layers\n delta_weights_h_o: change in weights from hidden to output layers\n n_records: number of records\n\n '''\n self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records\n self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records\n\n def run(self, features):\n ''' Run a forward pass through the network with input features \n \n Arguments\n ---------\n features: 1D array of feature values\n '''\n \n #### Implement the forward pass here ####\n # TODO: Hidden layer - replace these values with the appropriate calculations.\n hidden_inputs = np.dot(features, self.weights_input_to_hidden)\n hidden_outputs = self.activation_function(hidden_inputs)\n \n # TODO: Output layer - Replace these values with the appropriate calculations.\n final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)\n final_outputs = final_inputs\n \n return final_outputs\n\n\n#########################################################\n# Set your hyperparameters here\n##########################################################\n\n# increased the number of hidden units as it's obviously too small. Tried 50, 32, 16, 12 (16 and 12 are similar)\n# optimized the learning rate as Andrew Ng tought -> 1, 0.01, 1, 0.5, 0.75, 0.8, 0.7 ==> 0.75\n# changed # of iteration, 1000, 2000, 3000, 4000, 5000, 10000. At 5K, bias saturated. At 10 K, variance increased. ==> 5000\n# again, tuned the lr between 0.75 - 0.8 then 0.75 was slightly better performance.\niterations = 5_000\nlearning_rate = 0.77\nhidden_nodes = 16\noutput_nodes = 1\n" ]
[ [ "numpy.dot", "numpy.random.normal", "numpy.exp", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sunnysinghnitb/tensorflow
[ "e3aa49701d60a006d09786f5a240530ee5f47e25" ]
[ "tensorflow/python/keras/engine/training_utils.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Training-related utilities.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nfrom collections import OrderedDict\nimport copy\n\nimport numpy as np\nimport six\n\nfrom tensorflow.python.data.experimental.ops import cardinality\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.data.ops import readers\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import callbacks as cbks\nfrom tensorflow.python.keras import losses\nfrom tensorflow.python.keras import metrics as metrics_module\nfrom tensorflow.python.keras.engine import base_layer\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils.losses_utils import squeeze_or_expand_dimensions\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import weights_broadcast_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import nest\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Aggregator(object):\n \"\"\"Abstract base class used to aggregate batch-level outputs of a loop.\n\n Attributes:\n use_steps: Whether the loop is using `step` or `batch_size`.\n num_samples_or_steps: Either `batch_size*num_batches` or `steps`.\n results: What to return at the end of the aggregation loop.\n \"\"\"\n\n def __init__(self, use_steps, num_samples_or_steps):\n self.use_steps = use_steps\n self.num_samples_or_steps = num_samples_or_steps\n self.results = []\n\n @abc.abstractmethod\n def create(self, batch_outs):\n \"\"\"Creates the initial results from the first batch outputs.\n\n Arguments:\n batch_outs: A list of batch-level outputs.\n \"\"\"\n raise NotImplementedError('Must be implemented in subclasses.')\n\n @abc.abstractmethod\n def aggregate(self, batch_outs, batch_start=None, batch_end=None):\n \"\"\"Aggregates batch-level results into total results.\n\n Arguments:\n batch_outs: A list of batch-level outputs.\n batch_start: The start index of this batch. Always `None` if `use_steps`\n is `True`.\n batch_end: The end index of this batch. Always `None` if `use_steps` is\n `True`.\n \"\"\"\n raise NotImplementedError('Must be implemented in subclasses.')\n\n @abc.abstractmethod\n def finalize(self):\n \"\"\"Prepares the total results to be returned.\"\"\"\n raise NotImplementedError('Must be implemented in subclasses.')\n\n\nclass MetricsAggregator(Aggregator):\n \"\"\"Aggregator that calculates loss and metrics info.\"\"\"\n\n def create(self, batch_outs):\n self.results = [0.] * len(batch_outs)\n\n def aggregate(self, batch_outs, batch_start=None, batch_end=None):\n # Loss.\n if self.use_steps:\n self.results[0] += batch_outs[0]\n else:\n self.results[0] += batch_outs[0] * (batch_end - batch_start)\n # Metrics (always stateful, just grab current values.)\n self.results[1:] = batch_outs[1:]\n\n def finalize(self):\n if not self.results:\n raise ValueError('Empty training data.')\n self.results[0] /= self.num_samples_or_steps\n\n\nclass OutputsAggregator(Aggregator):\n \"\"\"Aggregator that concatenates outputs.\"\"\"\n\n def create(self, batch_outs):\n if self.use_steps:\n # Cannot pre-allocate the returned NumPy arrays bc\n # batch sizes are unknown. Concatenate batches at the end.\n for _ in batch_outs:\n self.results.append([])\n else:\n # Pre-allocate NumPy arrays.\n for batch_out in batch_outs:\n shape = (self.num_samples_or_steps,) + batch_out.shape[1:]\n self.results.append(np.zeros(shape, dtype=batch_out.dtype))\n\n def aggregate(self, batch_outs, batch_start=None, batch_end=None):\n if self.use_steps:\n for i, batch_out in enumerate(batch_outs):\n self.results[i].append(batch_out)\n else:\n for i, batch_out in enumerate(batch_outs):\n self.results[i][batch_start:batch_end] = batch_out\n\n def finalize(self):\n if self.use_steps:\n self.results = [np.concatenate(result, axis=0) for result in self.results]\n\n\ndef get_progbar(model, count_mode):\n \"\"\"Get Progbar.\"\"\"\n stateful_metric_names = None\n if hasattr(model, 'metrics_names'):\n stateful_metric_names = model.metrics_names[1:] # Exclude `loss`\n return cbks.ProgbarLogger(count_mode, stateful_metrics=stateful_metric_names)\n\n\ndef slice_arrays(arrays, indices, contiguous=True):\n \"\"\"Slices batches out of provided arrays (workaround for eager tensors).\n\n Unfortunately eager tensors don't have the same slicing behavior as\n Numpy arrays (they follow the same slicing behavior as symbolic TF tensors),\n hence we cannot use `generic_utils.slice_arrays` directly\n and we have to implement this workaround based on `concat`. This has a\n performance cost.\n\n Arguments:\n arrays: Single array or list of arrays.\n indices: List of indices in the array that should be included in the output\n batch.\n contiguous: Boolean flag indicating whether the indices are contiguous.\n\n Returns:\n Slice of data (either single array or list of arrays).\n \"\"\"\n converted_to_list = False\n if not isinstance(arrays, list):\n converted_to_list = True\n arrays = [arrays]\n if any(tensor_util.is_tensor(x) for x in arrays):\n if not contiguous:\n entries = [[x[i:i + 1] for i in indices] for x in arrays]\n slices = [array_ops.concat(x, axis=0) for x in entries]\n else:\n slices = [x[indices[0]:indices[-1] + 1] for x in arrays]\n else:\n slices = generic_utils.slice_arrays(arrays, indices)\n\n if converted_to_list:\n slices = slices[0]\n return slices\n\n\ndef check_num_samples(ins,\n batch_size=None,\n steps=None,\n steps_name='steps'):\n \"\"\"Determine the number of samples provided for training and evaluation.\n\n The number of samples is not defined when running with `steps`,\n in which case the number of samples is set to `None`.\n\n Arguments:\n ins: List of tensors to be fed to the Keras function.\n batch_size: Integer batch size or `None` if not defined.\n steps: Total number of steps (batches of samples)\n before declaring `_predict_loop` finished.\n Ignored with the default value of `None`.\n steps_name: The public API's parameter name for `steps`.\n\n Raises:\n ValueError: when `steps` is `None` and the attribute `ins.shape`\n does not exist. Also raises ValueError when `steps` is not `None`\n and `batch_size` is not `None` because they are mutually\n exclusive.\n\n Returns:\n When steps is `None`, returns the number of samples to be\n processed based on the size of the first dimension of the\n first input numpy array. When steps is not `None` and\n `batch_size` is `None`, returns `None`.\n\n Raises:\n ValueError: In case of invalid arguments.\n \"\"\"\n if steps is not None and batch_size is not None:\n raise ValueError(\n 'If ' + steps_name + ' is set, the `batch_size` must be None.')\n if check_steps_argument(ins, steps, steps_name):\n return None\n if hasattr(ins[0], 'shape'):\n return int(ins[0].shape[0])\n return None # Edge case where ins == [static_learning_phase]\n\n\ndef standardize_single_array(x, expected_shape=None):\n \"\"\"Expand data of shape (x,) to (x, 1), unless len(expected_shape)==1.\"\"\"\n if x is None:\n return None\n\n if (x.shape is not None\n and len(x.shape) == 1\n and (expected_shape is None or len(expected_shape) != 1)):\n if tensor_util.is_tensor(x):\n x = array_ops.expand_dims(x, axis=1)\n else:\n x = np.expand_dims(x, 1)\n return x\n\n\ndef standardize_input_data(data,\n names,\n shapes=None,\n check_batch_axis=True,\n exception_prefix=''):\n \"\"\"Normalizes inputs and targets provided by users.\n\n Users may pass data as a list of arrays, dictionary of arrays,\n or as a single array. We normalize this to an ordered list of\n arrays (same order as `names`), while checking that the provided\n arrays have shapes that match the network's expectations.\n\n Arguments:\n data: User-provided input data (polymorphic).\n names: List of expected array names.\n shapes: Optional list of expected array shapes.\n check_batch_axis: Boolean; whether to check that\n the batch axis of the arrays matches the expected\n value found in `shapes`.\n exception_prefix: String prefix used for exception formatting.\n\n Returns:\n List of standardized input arrays (one array per model input).\n\n Raises:\n ValueError: in case of improperly formatted user-provided data.\n \"\"\"\n if not names:\n if (data is not None and hasattr(data, '__len__') and len(data) and\n not isinstance(data, dict)):\n raise ValueError('Error when checking model ' + exception_prefix + ': '\n 'expected no data, but got:', data)\n return []\n if data is None:\n return [None for _ in range(len(names))]\n\n if isinstance(data, dict):\n try:\n data = [\n data[x].values\n if data[x].__class__.__name__ == 'DataFrame' else data[x]\n for x in names\n ]\n except KeyError as e:\n raise ValueError('No data provided for \"' + e.args[0] + '\". Need data '\n 'for each key in: ' + str(names))\n elif isinstance(data, (list, tuple)):\n if isinstance(data[0], (list, tuple)):\n data = [np.asarray(d) for d in data]\n elif len(names) == 1 and isinstance(data[0], (float, int)):\n data = [np.asarray(data)]\n else:\n data = [\n x.values if x.__class__.__name__ == 'DataFrame' else x for x in data\n ]\n else:\n data = data.values if data.__class__.__name__ == 'DataFrame' else data\n data = [data]\n if shapes is not None:\n data = [standardize_single_array(x, shape)\n for (x, shape) in zip(data, shapes)]\n else:\n data = [standardize_single_array(x) for x in data]\n\n if len(data) != len(names):\n if data and hasattr(data[0], 'shape'):\n raise ValueError('Error when checking model ' + exception_prefix +\n ': the list of Numpy arrays that you are passing to '\n 'your model is not the size the model expected. '\n 'Expected to see ' + str(len(names)) + ' array(s), '\n 'but instead got the following list of ' +\n str(len(data)) + ' arrays: ' + str(data)[:200] + '...')\n elif len(names) > 1:\n raise ValueError(\n 'Error when checking model ' + exception_prefix +\n ': you are passing a list as input to your model, '\n 'but the model expects a list of ' + str(len(names)) +\n ' Numpy arrays instead. The list you passed was: ' + str(data)[:200])\n elif len(data) == 1 and not hasattr(data[0], 'shape'):\n raise TypeError('Error when checking model ' + exception_prefix +\n ': data should be a Numpy array, or list/dict of '\n 'Numpy arrays. Found: ' + str(data)[:200] + '...')\n elif len(names) == 1:\n data = [np.asarray(data)]\n\n # Check shapes compatibility.\n if shapes:\n for i in range(len(names)):\n if shapes[i] is not None:\n if tensor_util.is_tensor(data[i]):\n tensorshape = data[i].get_shape()\n if not tensorshape:\n continue\n data_shape = tuple(tensorshape.as_list())\n else:\n data_shape = data[i].shape\n shape = shapes[i]\n if len(data_shape) != len(shape):\n raise ValueError('Error when checking ' + exception_prefix +\n ': expected ' + names[i] + ' to have ' +\n str(len(shape)) + ' dimensions, but got array '\n 'with shape ' + str(data_shape))\n if not check_batch_axis:\n data_shape = data_shape[1:]\n shape = shape[1:]\n for dim, ref_dim in zip(data_shape, shape):\n if ref_dim != dim and ref_dim is not None and dim is not None:\n raise ValueError(\n 'Error when checking ' + exception_prefix + ': expected ' +\n names[i] + ' to have shape ' + str(shape) +\n ' but got array with shape ' + str(data_shape))\n return data\n\n\ndef standardize_sample_or_class_weights(x_weight, output_names, weight_type):\n \"\"\"Maps `sample_weight` or `class_weight` to model outputs.\n\n Arguments:\n x_weight: User-provided `sample_weight` or `class_weight` argument.\n output_names: List of output names (strings) in the model.\n weight_type: A string used purely for exception printing.\n\n Returns:\n A list of `sample_weight` or `class_weight` where there are exactly\n one element per model output.\n\n Raises:\n ValueError: In case of invalid user-provided argument.\n \"\"\"\n if x_weight is None or (isinstance(x_weight, list) and len(x_weight) == 0): # pylint: disable=g-explicit-length-test\n return [None for _ in output_names]\n if len(output_names) == 1:\n if isinstance(x_weight, list) and len(x_weight) == 1:\n return x_weight\n if isinstance(x_weight, dict) and output_names[0] in x_weight:\n return [x_weight[output_names[0]]]\n else:\n return [x_weight]\n if isinstance(x_weight, list):\n if len(x_weight) != len(output_names):\n raise ValueError('Provided `' + weight_type + '` was a list of ' +\n str(len(x_weight)) + ' elements, but the model has ' +\n str(len(output_names)) + ' outputs. '\n 'You should provide one `' + weight_type + '`'\n 'array per model output.')\n return x_weight\n if isinstance(x_weight, dict):\n x_weights = []\n for name in output_names:\n x_weights.append(x_weight.get(name))\n return x_weights\n else:\n raise TypeError(\n 'The model has multiple outputs, so `' + weight_type + '` '\n 'should be either a list or a dict. '\n 'Provided `' + weight_type + '` type not understood: ' + str(x_weight))\n\n\ndef standardize_class_weights(class_weight, output_names):\n return standardize_sample_or_class_weights(class_weight, output_names,\n 'class_weight')\n\n\ndef standardize_sample_weights(sample_weight, output_names):\n return standardize_sample_or_class_weights(sample_weight, output_names,\n 'sample_weight')\n\n\ndef check_array_lengths(inputs, targets, weights=None):\n \"\"\"Does user input validation for numpy arrays.\n\n Arguments:\n inputs: list of Numpy arrays of inputs.\n targets: list of Numpy arrays of targets.\n weights: list of Numpy arrays of sample weights.\n\n Raises:\n ValueError: in case of incorrectly formatted data.\n \"\"\"\n\n def set_of_lengths(x):\n # Returns a set with the variation between\n # different shapes, with None => 0\n if x is None:\n return {}\n else:\n return set([y.shape[0] for y in x\n if y is not None and not tensor_util.is_tensor(y)])\n\n set_x = set_of_lengths(inputs)\n set_y = set_of_lengths(targets)\n set_w = set_of_lengths(weights)\n if len(set_x) > 1:\n raise ValueError('All input arrays (x) should have '\n 'the same number of samples. Got array shapes: ' +\n str([x.shape for x in inputs]))\n if len(set_y) > 1:\n raise ValueError('All target arrays (y) should have '\n 'the same number of samples. Got array shapes: ' +\n str([y.shape for y in targets]))\n if set_x and set_y and list(set_x)[0] != list(set_y)[0]:\n raise ValueError('Input arrays should have '\n 'the same number of samples as target arrays. '\n 'Found ' + str(list(set_x)[0]) + ' input samples '\n 'and ' + str(list(set_y)[0]) + ' target samples.')\n if len(set_w) > 1:\n raise ValueError('All sample_weight arrays should have '\n 'the same number of samples. Got array shapes: ' +\n str([w.shape for w in weights]))\n if set_y and set_w and list(set_y)[0] != list(set_w)[0]:\n raise ValueError('Sample_weight arrays should have '\n 'the same number of samples as target arrays. Got ' +\n str(list(set_y)[0]) + ' input samples and ' +\n str(list(set_w)[0]) + ' target samples.')\n\n\ndef check_loss_and_target_compatibility(targets, loss_fns, output_shapes):\n \"\"\"Does validation on the compatibility of targets and loss functions.\n\n This helps prevent users from using loss functions incorrectly. This check\n is purely for UX purposes.\n\n Arguments:\n targets: list of Numpy arrays of targets.\n loss_fns: list of loss functions.\n output_shapes: list of shapes of model outputs.\n\n Raises:\n ValueError: if a loss function or target array\n is incompatible with an output.\n \"\"\"\n key_losses = {\n losses.mean_squared_error, losses.binary_crossentropy,\n losses.categorical_crossentropy\n }\n for y, loss, shape in zip(targets, loss_fns, output_shapes):\n if y is None or loss is None or tensor_util.is_tensor(y):\n continue\n if loss is losses.categorical_crossentropy:\n if y.shape[-1] == 1:\n raise ValueError('You are passing a target array of shape ' + str(\n y.shape) + ' while using as loss `categorical_crossentropy`. '\n '`categorical_crossentropy` expects '\n 'targets to be binary matrices (1s and 0s) '\n 'of shape (samples, classes). '\n 'If your targets are integer classes, '\n 'you can convert them to the expected format via:\\n'\n '```\\n'\n 'from keras.utils import to_categorical\\n'\n 'y_binary = to_categorical(y_int)\\n'\n '```\\n'\n '\\n'\n 'Alternatively, you can use the loss function '\n '`sparse_categorical_crossentropy` instead, '\n 'which does expect integer targets.')\n if loss in key_losses:\n for target_dim, out_dim in zip(y.shape[1:], shape[1:]):\n if out_dim is not None and target_dim != out_dim:\n raise ValueError('A target array with shape ' + str(y.shape) +\n ' was passed for an output of shape ' + str(shape) +\n ' while using as loss `' + loss.__name__ + '`. '\n 'This loss expects '\n 'targets to have the same shape '\n 'as the output.')\n\n\ndef collect_per_output_metric_info(metrics,\n output_names,\n output_shapes,\n loss_fns,\n sample_weights=None):\n \"\"\"Maps metric names and functions to model outputs.\n\n Arguments:\n metrics: a list or dict of metric functions.\n output_names: a list of the names (strings) of model outputs.\n output_shapes: a list of the shapes (strings) of model outputs.\n loss_fns: a list of the loss functions corresponding to the model outputs.\n sample_weights: a list of weights to be applied on the model outputs.\n\n Returns:\n A list (one entry per model output) of dicts.\n For instance, if the model has 2 outputs, and for the first output\n we want to compute \"binary_accuracy\" and \"binary_crossentropy\",\n and just \"binary_accuracy\" for the second output,\n the list would look like: `[\n {\n 'acc': (binary_accuracy(), mean_obj_1),\n 'ce': (binary_crossentropy(), mean_obj_2)\n },\n {\n 'acc': (binary_accuracy(), mean_obj_3)\n }\n ]`\n\n Raises:\n TypeError: if an incorrect type is passed for the `metrics` argument.\n \"\"\"\n if not metrics:\n return [{} for _ in output_names]\n if isinstance(metrics, list):\n # we then apply all metrics to all outputs.\n nested_metrics = [copy.copy(metrics) for _ in output_names]\n elif isinstance(metrics, dict):\n nested_metrics = []\n for name in output_names:\n output_metrics = metrics.get(name, [])\n if not isinstance(output_metrics, list):\n output_metrics = [output_metrics]\n nested_metrics.append(output_metrics)\n else:\n raise TypeError('Type of `metrics` argument not understood. '\n 'Expected a list or dictionary, found: ' + str(metrics))\n\n per_output_metrics = []\n for i, metrics in enumerate(nested_metrics):\n metrics_dict = OrderedDict()\n for metric in metrics:\n weighted = False if (sample_weights is None) else (\n sample_weights[i] is not None)\n metric_name = get_metric_name(metric, weighted)\n metric_fn = get_metric_function(\n metric, output_shape=output_shapes[i], loss_fn=loss_fns[i])\n\n # If the metric function is not stateful, we create a stateful version and\n # return both the stateless and the stateful version together. For batch\n # APIs like `train_on_batch` we will use the stateless version and for\n # other APIs like `fit` we will use the stateful version.\n is_stateful = isinstance(metric_fn,\n base_layer.Layer) and metric_fn.stateful\n stateful_fn = metric_fn\n if not is_stateful:\n stateful_fn = metrics_module.MeanMetricWrapper(\n metric_fn, name=metric_fn.__name__)\n\n metrics_dict[metric_name] = (metric_fn, stateful_fn)\n per_output_metrics.append(metrics_dict)\n\n return per_output_metrics\n\n\ndef batch_shuffle(index_array, batch_size):\n \"\"\"Shuffles an array in a batch-wise fashion.\n\n Useful for shuffling HDF5 arrays\n (where one cannot access arbitrary indices).\n\n Arguments:\n index_array: array of indices to be shuffled.\n batch_size: integer.\n\n Returns:\n The `index_array` array, shuffled in a batch-wise fashion.\n \"\"\"\n batch_count = int(len(index_array) / batch_size)\n # to reshape we need to be cleanly divisible by batch size\n # we stash extra items and reappend them after shuffling\n last_batch = index_array[batch_count * batch_size:]\n index_array = index_array[:batch_count * batch_size]\n index_array = index_array.reshape((batch_count, batch_size))\n np.random.shuffle(index_array)\n index_array = index_array.flatten()\n return np.append(index_array, last_batch)\n\n\ndef weighted_masked_objective(fn):\n \"\"\"Adds support for masking and sample-weighting to an objective function.\n\n It transforms an objective function `fn(y_true, y_pred)`\n into a sample-weighted, cost-masked objective function\n `fn(y_true, y_pred, weights, mask)`.\n\n Arguments:\n fn: The objective function to wrap,\n with signature `fn(y_true, y_pred)`.\n\n Returns:\n A function with signature `fn(y_true, y_pred, weights, mask)`.\n \"\"\"\n if fn is None:\n return None\n\n def weighted(y_true, y_pred, weights, mask=None):\n \"\"\"Wrapper function.\n\n Arguments:\n y_true: `y_true` argument of `fn`.\n y_pred: `y_pred` argument of `fn`.\n weights: Weights tensor.\n mask: Mask tensor.\n\n Returns:\n Scalar tensor.\n \"\"\"\n # score_array has ndim >= 2\n score_array = fn(y_true, y_pred)\n if mask is not None:\n mask = math_ops.cast(mask, y_pred.dtype)\n # Update weights with mask.\n if weights is None:\n weights = mask\n else:\n # Update dimensions of weights to match with mask if possible.\n mask, _, weights = squeeze_or_expand_dimensions(mask, None, weights)\n weights *= mask\n\n # Apply sample weighting.\n if weights is not None:\n\n # Update dimensions of weights to match with values if possible.\n score_array, _, weights = squeeze_or_expand_dimensions(\n score_array, None, weights)\n try:\n # Broadcast weights if possible.\n weights = weights_broadcast_ops.broadcast_weights(weights, score_array)\n except ValueError:\n # Reduce values to same ndim as weight array.\n ndim = K.ndim(score_array)\n weight_ndim = K.ndim(weights)\n score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))\n\n score_array = math_ops.multiply(score_array, weights)\n score_array = math_ops.reduce_sum(score_array)\n weights = math_ops.reduce_sum(weights)\n score_array = math_ops.div_no_nan(score_array, weights)\n return K.mean(score_array)\n\n return weighted\n\n\ndef standardize_weights(y,\n sample_weight=None,\n class_weight=None,\n sample_weight_mode=None):\n \"\"\"Performs sample weight validation and standardization.\n\n Everything gets normalized to a single sample-wise (or timestep-wise)\n weight array. If both `sample_weight` and `class_weight` are provided,\n the weights are multiplied.\n\n Arguments:\n y: Numpy array of model targets to be weighted.\n sample_weight: User-provided `sample_weight` argument.\n class_weight: User-provided `class_weight` argument.\n sample_weight_mode: One of `None` or `\"temporal\"`.\n `\"temporal\"` indicated that we expect 2D weight data\n that will be applied to the last 2 dimensions of\n the targets (i.e. we are weighting timesteps, not samples).\n\n Returns:\n A numpy array of target weights, one entry per sample to weight.\n\n Raises:\n ValueError: In case of invalid user-provided arguments.\n \"\"\"\n # Iterator may return sample_weight as 1-tuple\n if isinstance(sample_weight, tuple):\n sample_weight = sample_weight[0]\n if sample_weight_mode is not None:\n if sample_weight_mode != 'temporal':\n raise ValueError('\"sample_weight_mode '\n 'should be None or \"temporal\". '\n 'Found: ' + str(sample_weight_mode))\n if len(y.shape) < 3:\n raise ValueError('Found a sample_weight array for '\n 'an input with shape ' + str(y.shape) + '. '\n 'Timestep-wise sample weighting (use of '\n 'sample_weight_mode=\"temporal\") is restricted to '\n 'outputs that are at least 3D, i.e. that have '\n 'a time dimension.')\n if sample_weight is not None and len(sample_weight.shape) != 2:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + '. '\n 'In order to use timestep-wise sample weighting, '\n 'you should pass a 2D sample_weight array.')\n else:\n if sample_weight is not None and len(sample_weight.shape) != 1:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + '. '\n 'In order to use timestep-wise sample weights, '\n 'you should specify '\n 'sample_weight_mode=\"temporal\" '\n 'in compile(). If you just mean to use '\n 'sample-wise weights, make sure your '\n 'sample_weight array is 1D.')\n\n if sample_weight is not None:\n if len(sample_weight.shape) > len(y.shape):\n raise ValueError(\n 'Found a sample_weight with shape' + str(sample_weight.shape) + '.'\n 'Expected sample_weight with rank '\n 'less than or equal to ' + str(len(y.shape)))\n\n if (not tensor_util.is_tensor(sample_weight) and\n y.shape[:sample_weight.ndim] != sample_weight.shape):\n raise ValueError(\n 'Found a sample_weight array with shape ' + str(sample_weight.shape) +\n ' for an input with shape ' + str(y.shape) + '. '\n 'sample_weight cannot be broadcast.')\n\n # Class weights applied per-sample.\n class_sample_weight = None\n if isinstance(class_weight, dict):\n if len(y.shape) > 2:\n raise ValueError('`class_weight` not supported for '\n '3+ dimensional targets.')\n\n if len(y.shape) == 2:\n if y.shape[1] > 1:\n y_classes = np.argmax(y, axis=1)\n elif y.shape[1] == 1:\n y_classes = np.reshape(y, y.shape[0])\n else:\n y_classes = y\n\n class_sample_weight = np.asarray(\n [class_weight[cls] for cls in y_classes if cls in class_weight])\n\n if len(class_sample_weight) != len(y_classes):\n # subtract the sets to pick all missing classes\n existing_classes = set(y_classes)\n existing_class_weight = set(class_weight.keys())\n raise ValueError('`class_weight` must contain all classes in the data.'\n ' The classes %s exist in the data but not in '\n '`class_weight`.' %\n (existing_classes - existing_class_weight))\n\n if class_sample_weight is not None and sample_weight is not None:\n # Multiply weights if both are provided.\n return class_sample_weight * sample_weight\n if sample_weight is not None:\n return sample_weight\n if class_sample_weight is not None:\n return class_sample_weight\n return None\n\n\ndef has_symbolic_tensors(ls):\n if context.executing_eagerly():\n return False\n return has_tensors(ls)\n\n\ndef has_tensors(ls):\n if isinstance(ls, (list, tuple)):\n return any(tensor_util.is_tensor(v) for v in ls)\n if isinstance(ls, dict):\n return any(tensor_util.is_tensor(v) for _, v in six.iteritems(ls))\n return tensor_util.is_tensor(ls)\n\n\ndef get_metric_name(metric, weighted=False):\n \"\"\"Returns the name corresponding to the given metric input.\n\n Arguments:\n metric: Metric function name or reference.\n weighted: Boolean indicating if the given metric is weighted.\n\n Returns:\n The metric name.\n \"\"\"\n metric_name_prefix = 'weighted_' if weighted else ''\n if metric in ('accuracy', 'acc', 'crossentropy', 'ce'):\n if metric in ('accuracy', 'acc'):\n suffix = 'acc'\n elif metric in ('crossentropy', 'ce'):\n suffix = 'ce'\n else:\n metric_fn = metrics_module.get(metric)\n # Get metric name as string\n if hasattr(metric_fn, 'name'):\n suffix = metric_fn.name\n else:\n suffix = metric_fn.__name__\n metric_name = metric_name_prefix + suffix\n return metric_name\n\n\ndef get_metric_function(metric, output_shape=None, loss_fn=None):\n \"\"\"Returns the metric function corresponding to the given metric input.\n\n Arguments:\n metric: Metric function name or reference.\n output_shape: The shape of the output that this metric\n will be calculated for.\n loss_fn: The loss function used.\n\n Returns:\n The metric function.\n \"\"\"\n if metric in ['accuracy', 'acc']:\n if output_shape[-1] == 1 or loss_fn == losses.binary_crossentropy:\n return metrics_module.binary_accuracy # case: binary accuracy\n elif loss_fn == losses.sparse_categorical_crossentropy:\n # case: categorical accuracy with sparse targets\n return metrics_module.sparse_categorical_accuracy\n return metrics_module.categorical_accuracy # case: categorical accuracy\n elif metric in ['crossentropy', 'ce']:\n if output_shape[-1] == 1 or loss_fn == losses.binary_crossentropy:\n return metrics_module.binary_crossentropy # case: binary cross-entropy\n elif loss_fn == losses.sparse_categorical_crossentropy:\n # case: categorical cross-entropy with sparse targets\n return metrics_module.sparse_categorical_crossentropy\n # case: categorical cross-entropy\n return metrics_module.categorical_crossentropy\n return metrics_module.get(metric)\n\n\ndef call_metric_function(metric_fn, y_true, y_pred, weights=None, mask=None):\n \"\"\"Invokes metric function and returns the metric result tensor.\"\"\"\n if mask is None:\n return metric_fn(y_true, y_pred, sample_weight=weights)\n\n mask = math_ops.cast(mask, y_pred.dtype)\n if weights is None:\n # Use mask as sample weight.\n return metric_fn(y_true, y_pred, sample_weight=mask)\n\n # Update dimensions of weights to match with mask.\n mask, _, weights = squeeze_or_expand_dimensions(mask, None, weights)\n weights *= mask\n return metric_fn(y_true, y_pred, sample_weight=weights)\n\n\ndef get_loss_function(loss):\n \"\"\"Returns the loss function corresponding to the given loss input.\"\"\"\n if loss is None or isinstance(loss, losses.Loss):\n return loss\n\n # TODO(psv): After we have added all V2 losses, update this function.\n if loss in ['mse', 'MSE', 'mean_squared_error']:\n return losses.MeanSquaredError()\n return losses.get(loss)\n\n\ndef validate_dataset_input(x, y, sample_weight, validation_split=None):\n \"\"\"Validates user input arguments when a dataset iterator is passed.\n\n Arguments:\n x: Input data. A `tf.data` dataset or iterator.\n y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s).\n Expected to be `None` when `x` is a dataset iterator.\n sample_weight: An optional sample-weight array passed by the user to\n weight the importance of each sample in `x`. Expected to be `None` when\n `x` is a dataset iterator\n validation_split: Float between 0 and 1. Fraction of the training data to\n be used as validation data. Expected to be `None` when `x` is a dataset\n iterator.\n\n Raises:\n ValueError: if argument `y` or `sample_weight` or `validation_split` are\n provided by user.\n \"\"\"\n if y is not None:\n raise ValueError('You passed a dataset or dataset iterator (%s) as '\n 'input `x` to your model. In that case, you should '\n 'not specify a target (`y`) argument, since the dataset '\n 'or dataset iterator generates both input data and '\n 'target data. '\n 'Received: %s' % (x, y))\n if sample_weight is not None:\n raise ValueError('`sample_weight` argument is not supported when input '\n '`x` is a dataset or a dataset iterator. Instead, you'\n 'can provide sample_weight as the third element of your'\n 'dataset, i.e. (inputs, targets, sample_weight). '\n 'Received: x=%s, sample_weight=%s' % (x, sample_weight))\n if validation_split is not None and validation_split != 0.0:\n raise ValueError(\n '`validation_split` argument is not supported when '\n 'input `x` is a dataset or a dataset iterator. '\n 'Received: x=%s, validation_split=%f' % (x, validation_split))\n\n\ndef check_generator_arguments(y=None,\n sample_weight=None,\n validation_split=None):\n \"\"\"Validates arguments passed when using a generator.\"\"\"\n if y is not None:\n raise ValueError('`y` argument is not supported when data is'\n 'a generator or Sequence instance. Instead pass targets'\n ' as the second element of the generator.')\n if sample_weight is not None:\n raise ValueError('`sample_weight` argument is not supported when data is'\n 'a generator or Sequence instance. Instead pass sample'\n ' weights as the third element of the generator.')\n if validation_split:\n raise ValueError('If your data is in the form of a Python generator, '\n 'you cannot use `validation_split`.')\n\n\ndef check_steps_argument(input_data, steps, steps_name):\n \"\"\"Validates `steps` argument based on input data's type.\n\n The cases when `steps` value must be provided are when\n 1. input data passed is an iterator.\n 2. model was built on top of symbolic tensors, input data is not\n required and is `None`.\n 3. input data passed is a symbolic tensor.\n\n Arguments:\n input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or\n tf.data.Dataset iterator or `None`.\n steps: Integer or `None`. Total number of steps (batches of samples) to\n execute.\n steps_name: The public API's parameter name for `steps`.\n\n Returns:\n boolean, True if `steps` argument is required, else False.\n\n Raises:\n ValueError: if `steps` argument is required for given input data type\n but not provided.\n \"\"\"\n # TODO(fchollet): allow datasets with steps=None if cardinality is known.\n is_x_iterator = isinstance(input_data, (iterator_ops.Iterator,\n iterator_ops.EagerIterator))\n if (input_data is None or is_x_iterator or has_symbolic_tensors(input_data) or\n (isinstance(input_data, list) and not input_data)):\n if steps is None:\n input_type_str = 'a Dataset iterator' if is_x_iterator else 'data tensors'\n raise ValueError('When using {input_type} as input to a model, you should'\n ' specify the `{steps_name}` argument.'.format(\n input_type=input_type_str, steps_name=steps_name))\n return True\n return False\n\n\ndef cast_single_tensor(x):\n if tensor_util.is_tensor(x) and x.dtype.is_floating:\n return math_ops.cast(x, dtype=K.floatx())\n return x\n\n\ndef cast_if_floating_dtype(x):\n \"\"\"Casts the given data tensors to the default floating point type.\n\n Casts only if the input is already a floating point type.\n Args:\n x: tensor or list/tuple of tensors.\n\n Returns:\n Converted input.\n\n Raises:\n RuntimeError: if data isn't tensors.\n \"\"\"\n if not has_tensors(x):\n raise RuntimeError(\n 'Please provide tensors for casting, got: {x}'.format(x=x))\n\n return nest.map_structure(cast_single_tensor, x)\n\n\ndef get_output_sample_weight_and_mode(skip_target_weighing_indices,\n sample_weight_mode, output_name,\n output_index):\n \"\"\"Returns the sample weight and weight mode for a single output.\"\"\"\n if output_index in skip_target_weighing_indices:\n return None, None\n\n if sample_weight_mode == 'temporal':\n default_value = [[1.]]\n shape = [None, None]\n mode = 'temporal'\n else:\n default_value = [1.]\n shape = [None]\n mode = None\n if context.executing_eagerly():\n weight = None\n else:\n weight = array_ops.placeholder_with_default(\n constant_op.constant(default_value, dtype=K.floatx()),\n shape=shape,\n name=output_name + '_sample_weights')\n return weight, mode\n\n\ndef prepare_sample_weights(output_names, sample_weight_mode,\n skip_target_weighing_indices):\n \"\"\"Prepares sample weights for the model.\n\n Args:\n output_names: List of model output names.\n sample_weight_mode: sample weight mode user input passed from compile API.\n skip_target_weighing_indices: Indices of output for which sample weights\n should be skipped.\n\n Returns:\n A pair of list of sample weights and sample weight modes\n (one for each output).\n\n Raises:\n ValueError: In case of invalid `sample_weight_mode` input.\n \"\"\"\n sample_weights = []\n sample_weight_modes = []\n if isinstance(sample_weight_mode, dict):\n unknown_output = set(sample_weight_mode.keys()) - set(output_names)\n if unknown_output:\n raise ValueError('Unknown entry in '\n 'sample_weight_mode dictionary: \"' + unknown_output +\n '\". Only expected the following keys: ' +\n str(output_names))\n for i, name in enumerate(output_names):\n if (i not in skip_target_weighing_indices and\n name not in sample_weight_mode):\n raise ValueError('Output missing from sample_weight_modes dictionary')\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices, sample_weight_mode.get(name), name, i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n elif isinstance(sample_weight_mode, list):\n if len(sample_weight_mode) != len(output_names):\n raise ValueError('When passing a list as sample_weight_mode, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(output_names)) +\n ' outputs, but you passed ' +\n str(len(sample_weight_mode)) + 'sample_weight_modes')\n for i, name in enumerate(output_names):\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices, sample_weight_mode[i], name, i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n else:\n for i, name in enumerate(output_names):\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices, sample_weight_mode, name, i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n return sample_weights, sample_weight_modes\n\n\n# TODO(rohanj): This is a hack to get around not depending on feature_column and\n# create a cyclical dependency. Figure out a cleaner solution\ndef is_feature_layer(layer):\n \"\"\"Returns whether `layer` is a FeatureLayer or not.\"\"\"\n return getattr(layer, '_is_feature_layer', False)\n\n\ndef is_eager_dataset_or_iterator(data):\n return context.executing_eagerly() and isinstance(\n data, (dataset_ops.DatasetV1,\n dataset_ops.DatasetV2,\n iterator_ops.EagerIterator))\n\n\n# pylint: disable=protected-access\ndef assert_not_batched(dataset):\n \"\"\"Asserts that `dataset` is not batched.\n\n The algorithm used by this method is sound but not complete. In other words,\n if the method fails to establish the assertion, it does not mean the dataset\n is batched.\n\n Example usage:\n ```python\n try:\n assert_not_batched(dataset)\n # safe to assume `dataset` it not batched here\n expect ValueError:\n # make no assumptions about `dataset`\n ```\n\n Args:\n dataset: The dataset to analyze.\n\n Raises:\n ValueError: If the method cannot establish the assertion.\n \"\"\"\n if isinstance(dataset, dataset_ops.DatasetV1Adapter):\n return assert_not_batched(dataset._dataset)\n else:\n whitelisted_types = [\n dataset_ops._OptionsDataset,\n dataset_ops.ConcatenateDataset,\n dataset_ops.CacheDataset,\n dataset_ops.FilterDataset,\n dataset_ops.MapDataset,\n dataset_ops.ParallelMapDataset,\n dataset_ops.PrefetchDataset,\n dataset_ops.RangeDataset,\n dataset_ops.RepeatDataset,\n dataset_ops.ShuffleDataset,\n dataset_ops.SkipDataset,\n dataset_ops.SparseTensorSliceDataset,\n dataset_ops.TakeDataset,\n dataset_ops.TensorDataset,\n dataset_ops.TensorSliceDataset,\n dataset_ops.ZipDataset,\n readers.FixedLengthRecordDatasetV2,\n readers.TextLineDatasetV2,\n readers.TFRecordDatasetV2,\n ]\n for ty in whitelisted_types:\n if isinstance(dataset, ty):\n for input_dataset in dataset._inputs():\n assert_not_batched(input_dataset)\n return\n raise ValueError('Could not assert that dataset is not batched.')\n\n\n# pylint: disable=protected-access\ndef assert_not_shuffled(dataset):\n \"\"\"Asserts that `dataset` is not shuffled.\n\n The algorithm used by this method is sound but not complete. In other words,\n if the method fails to establish the assertion, it does not mean the dataset\n is shuffled.\n\n Example usage:\n ```python\n try:\n assert_not_shuffled(dataset)\n # safe to assume `dataset` it not shuffled here\n expect ValueError:\n # make no assumptions about `dataset`\n ```\n\n Args:\n dataset: The dataset to analyze.\n\n Raises:\n ValueError: If the method cannot establish the assertion.\n \"\"\"\n if isinstance(dataset, dataset_ops.DatasetV1Adapter):\n return assert_not_shuffled(dataset._dataset)\n else:\n whitelisted_types = [\n dataset_ops._OptionsDataset,\n dataset_ops.BatchDataset,\n dataset_ops.ConcatenateDataset,\n dataset_ops.CacheDataset,\n dataset_ops.FilterDataset,\n dataset_ops.MapDataset,\n dataset_ops.PaddedBatchDataset,\n dataset_ops.ParallelMapDataset,\n dataset_ops.PrefetchDataset,\n dataset_ops.RangeDataset,\n dataset_ops.RepeatDataset,\n dataset_ops.SkipDataset,\n dataset_ops.SparseTensorSliceDataset,\n dataset_ops.TakeDataset,\n dataset_ops.TensorDataset,\n dataset_ops.TensorSliceDataset,\n dataset_ops.WindowDataset,\n dataset_ops.ZipDataset,\n readers.FixedLengthRecordDatasetV2,\n readers.TextLineDatasetV2,\n readers.TFRecordDatasetV2,\n ]\n for ty in whitelisted_types:\n if isinstance(dataset, ty):\n for input_dataset in dataset._inputs():\n assert_not_shuffled(input_dataset)\n return\n raise ValueError('Could not assert that dataset is not shuffled.')\n\n\ndef verify_dataset_shuffled(x):\n \"\"\"Verifies that the dataset is shuffled.\n\n Args:\n x: Dataset passed as an input to the model.\n\n Raises:\n ValueError: if the dataset is not already shuffled.\n \"\"\"\n assert isinstance(x, dataset_ops.DatasetV2)\n try:\n assert_not_shuffled(x)\n except ValueError:\n # Dataset may or may not be shuffled.\n return\n else:\n logging.warning('Expected a shuffled dataset but input dataset `x` is '\n 'not shuffled. Please invoke `shuffle()` on input dataset.')\n\n\ndef is_dataset_or_iterator(data):\n return isinstance(data, (dataset_ops.DatasetV1,\n dataset_ops.DatasetV2,\n iterator_ops.EagerIterator,\n iterator_ops.Iterator))\n\n\ndef extract_tensors_from_dataset(dataset):\n \"\"\"Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset.\n\n Arguments:\n dataset: Dataset instance.\n\n Returns:\n Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.\n \"\"\"\n iterator = dataset_ops.make_initializable_iterator(dataset)\n init_op = iterator.initializer\n if not context.executing_eagerly():\n K.get_session().run(init_op)\n inputs, targets, sample_weight = unpack_iterator_input(iterator)\n return inputs, targets, sample_weight\n\n\ndef unpack_iterator_input(iterator):\n \"\"\"Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`.\n\n Arguments:\n iterator: Instance of a dataset iterator.\n\n Returns:\n Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.\n \"\"\"\n try:\n next_element = iterator.get_next()\n except errors.OutOfRangeError:\n raise RuntimeError('Your dataset iterator ran out of data; '\n 'Make sure that your dataset can generate '\n 'required number of samples.')\n\n if isinstance(next_element, (list, tuple)):\n if len(next_element) not in [2, 3]:\n raise ValueError(\n 'Please provide model inputs as a list or tuple of 2 or 3 '\n 'elements: (input, target) or (input, target, sample_weights) '\n 'Received %s' % next_element)\n if len(next_element) == 2:\n x, y = next_element\n weights = None\n else:\n x, y, weights = next_element\n else:\n x = next_element\n y = None\n weights = None\n return x, y, weights\n\n\ndef infer_steps_for_dataset(dataset, steps, epochs=1, steps_name='steps'):\n \"\"\"Infers steps_per_epoch needed to loop through a dataset.\n\n Arguments:\n dataset: Input data of type tf.data.Dataset.\n steps: Number of steps to draw from the dataset (may be None if unknown).\n epochs: Number of times to iterate over the dataset.\n steps_name: The string name of the steps argument, either `steps`,\n `validation_steps`, or `steps_per_epoch`. Only used for error message\n formatting.\n\n Returns:\n Integer or `None`. Inferred number of steps to loop through the dataset.\n `None` is returned if the size of the dataset is unknown and `steps` was\n not specified.\n\n Raises:\n ValueError: In case of invalid argument values.\n \"\"\"\n assert isinstance(dataset, dataset_ops.DatasetV2)\n size = K.get_value(cardinality.cardinality(dataset))\n if size == cardinality.INFINITE and steps is None:\n raise ValueError('When passing an infinitely repeating dataset, you '\n 'must specify the `%s` argument.' % (steps_name,))\n if size != cardinality.UNKNOWN:\n if steps is not None and steps * epochs > size:\n if epochs > 1:\n raise ValueError('The dataset you passed contains %s batches, but you '\n 'passed `epochs=%s` and `%s=%s`, which is a total of '\n '%s steps. We cannot draw that many steps from this '\n 'dataset. We suggest to set `%s=%s`.' %\n (size, epochs, steps_name, steps, steps * epochs,\n steps_name, size // epochs))\n else:\n raise ValueError('The dataset you passed contains %s batches, but you '\n 'passed `%s=%s`. We cannot draw that many steps from '\n 'this dataset. We suggest to set `%s=%s`.' %\n (size, steps_name, steps, steps_name, size))\n if steps is None:\n if size >= 0:\n return size\n return None\n return steps\n\n\nclass ModelInputs(object):\n \"\"\"Encapsulates model inputs.\n\n Allows for transforming model inputs while keeping the same structure.\n \"\"\"\n\n def __init__(self, inputs):\n self._inputs = inputs\n self._is_dict = isinstance(self._inputs, dict)\n self._is_single_input = not isinstance(self._inputs, (list, tuple, dict))\n\n self._flattened_inputs = []\n self._input_names = []\n\n if self._is_dict:\n for k in sorted(self._inputs.keys()):\n self._flattened_inputs.append(self._inputs[k])\n self._input_names.append(k)\n else:\n self._flattened_inputs = nest.flatten(self._inputs)\n self._input_names = [\n 'input_%d' % (i + 1) for i in range(len(self._flattened_inputs))\n ]\n\n def get_input_names(self):\n \"\"\"Returns keys to name inputs by.\n\n In case inputs provided were a list, tuple or single entry, we make up a\n key 'input_%d'. For dictionary case, we return a sorted list of keys.\n \"\"\"\n return self._input_names\n\n def get_symbolic_inputs(self, return_single_as_list=False):\n \"\"\"Returns inputs to be set as self.inputs for a model.\"\"\"\n # TODO(karmel): There is a side-effect here where what you get\n # with as_list and as_dict depends on whether you have called this\n # method first, since it modifies in place.\n for i in range(len(self._flattened_inputs)):\n k = self._input_names[i]\n v = self._flattened_inputs[i]\n if isinstance(v, (list, float, int)):\n v = np.asarray(v)\n if v.ndim == 1:\n v = np.expand_dims(v, 1)\n\n if isinstance(v, (np.ndarray, ops.EagerTensor)):\n # We fix the placeholder shape except the batch size.\n # This is suboptimal, but it is the best we can do with the info\n # we have. The user should call `model._set_inputs(placeholders)`\n # to specify custom placeholders if the need arises.\n shape = (None,) + tuple(v.shape[1:])\n v = K.placeholder(shape=shape, name=k)\n elif isinstance(v, tensor_shape.TensorShape):\n shape = (None,) + tuple(v.as_list()[1:])\n v = K.placeholder(shape=shape, name=k)\n\n self._flattened_inputs[i] = v\n\n if self._is_dict:\n return dict(zip(self._input_names, self._flattened_inputs))\n if self._is_single_input and not return_single_as_list:\n return self._flattened_inputs[0]\n return self._flattened_inputs\n\n def as_dict(self):\n \"\"\"An iterable over a dictionary version of inputs.\"\"\"\n for i in range(len(self._flattened_inputs)):\n yield self._input_names[i], self._flattened_inputs[i]\n\n def as_list(self):\n \"\"\"Returning the inputs as a list.\"\"\"\n return self._flattened_inputs\n\n\n# Allow use of methods not exposed to the user.\n# pylint: disable=protected-access\ndef get_input_shape_and_dtype(layer):\n \"\"\"Retrieves input shape and input dtype of layer if applicable.\n\n Args:\n layer: Layer (or model) instance.\n\n Returns:\n Tuple (input_shape, input_dtype). Both could be None if the layer\n does not have a defined input shape.\n\n Raises:\n ValueError: in case an empty Sequential or Graph Network is passed.\n \"\"\"\n\n def _is_graph_model(layer):\n return ((hasattr(layer, '_is_graph_network') and layer._is_graph_network) or\n layer.__class__.__name__ == 'Sequential')\n\n # In case of nested models: recover the first layer\n # of the deepest model to infer input shape and dtype.\n # Subclassed Models may not have been built so can't be checked.\n while _is_graph_model(layer):\n if not layer.layers:\n raise ValueError('An empty Model cannot be used as a Layer.')\n layer = layer.layers[0]\n\n if hasattr(layer, '_batch_input_shape'):\n return layer._batch_input_shape, layer.dtype\n return None, None\n\n\n# pylint: enable=protected-access\n\n\ndef get_static_batch_size(layer):\n \"\"\"Gets the static batch size of a Layer.\n\n Arguments:\n layer: a `Layer` instance.\n\n Returns:\n The static batch size of a Layer.\n \"\"\"\n batch_input_shape, _ = get_input_shape_and_dtype(layer)\n if batch_input_shape is not None:\n return tensor_shape.as_dimension(batch_input_shape[0]).value\n return None\n\n\ndef generic_output_names(outputs_list):\n return ['output_%d' % (i + 1) for i in range(len(outputs_list))]\n\n\ndef set_run_eagerly_for_dict_structure(model, x):\n \"\"\"Set model.run_eagerly to true if x is dict structure.\n\n Set model.run_eagerly to true if x is dict or\n Iterator/EagerIterator/Dataset of dict.\n\n Args:\n model: A Keras model.\n x: Input data.\n \"\"\"\n if not context.executing_eagerly():\n return\n if isinstance(x, dict):\n model.run_eagerly = True\n if (isinstance(x, (iterator_ops.Iterator, iterator_ops.EagerIterator,\n dataset_ops.DatasetV2))):\n for item in x.output_shapes:\n if isinstance(item, dict):\n model.run_eagerly = True\n return\n\n\ndef convert_eager_tensors_to_numpy(structure):\n \"\"\"Convert every EagerTensor in `structure` to NumPy.\n\n Arguments:\n structure: An arbitrary structure of elements to be converted to NumPy\n arrays.\n\n Returns:\n An identical structure with EagerTensors converted to NumPy arrays.\n \"\"\"\n\n def _convert(element):\n if isinstance(element, ops.EagerTensor):\n return element.numpy()\n return element\n\n return nest.map_structure(_convert, structure)\n" ]
[ [ "numpy.expand_dims", "numpy.asarray", "tensorflow.python.keras.backend.placeholder", "tensorflow.python.keras.metrics.MeanMetricWrapper", "numpy.concatenate", "tensorflow.python.keras.losses.MeanSquaredError", "tensorflow.python.eager.context.executing_eagerly", "numpy.reshape", "tensorflow.python.ops.math_ops.div_no_nan", "tensorflow.python.keras.callbacks.ProgbarLogger", "numpy.argmax", "tensorflow.python.keras.backend.floatx", "tensorflow.python.data.experimental.ops.cardinality.cardinality", "tensorflow.python.ops.weights_broadcast_ops.broadcast_weights", "tensorflow.python.keras.backend.get_session", "tensorflow.python.util.nest.map_structure", "numpy.zeros", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.keras.utils.generic_utils.slice_arrays", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.keras.losses.get", "tensorflow.python.data.ops.dataset_ops.make_initializable_iterator", "tensorflow.python.framework.tensor_util.is_tensor", "tensorflow.python.framework.tensor_shape.as_dimension", "tensorflow.python.keras.metrics.get", "numpy.append", "tensorflow.python.keras.utils.losses_utils.squeeze_or_expand_dimensions", "tensorflow.python.ops.array_ops.concat", "numpy.random.shuffle", "tensorflow.python.ops.math_ops.multiply", "tensorflow.python.keras.backend.ndim", "tensorflow.python.keras.backend.mean", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.util.nest.flatten" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
EulerWong/director
[ "e30a56ba3a3bac82216adb0f8cc29d1fae8cb74b" ]
[ "src/python/director/segmentationroutines.py" ]
[ "''' Routines and Fitting algorithms\n Fitting: means where ALL other non-object points\n have been removed, determining the transform frame\n of the object\n\n Segment: means seperating clusters from a single cloud\n'''\n\n\nfrom director.filterUtils import *\nimport director.visualization as vis\nfrom director import objectmodel as om\nfrom director.transformUtils import getTransformFromAxes\nfrom director import vtkAll as vtk\n\nimport vtkNumpy\nimport numpy as np\nfrom shallowCopy import shallowCopy\nfrom debugVis import DebugData\n\n\n\nclass SegmentationContext(object):\n '''\n Maintains an abstraction between the fitting scene and a robot\n Assumes point cloud is world aligned, with z up\n Provides access to (1) ground height,\n (2) location of the head frame, (3) view direction\n\n Can be configured:\n (a) Default mode: populated continously by EST_ROBOT_STATE\n (2) and (3) set seperately\n (b) Autonomy: where (2) gives (3)\n (c) Populated programmatically. e.g:\n - for unit testing\n - where ground plane from feet cannot be used\n '''\n\n def __init__(self, groundHeightProvider, viewProvider):\n self.groundHeightProvider = groundHeightProvider\n self.viewProvider = viewProvider\n\n def getGroundHeight(self):\n return self.groundHeightProvider.getGroundHeight()\n\n def getViewFrame(self):\n return self.viewProvider.getViewFrame()\n\n def getViewOrigin(self):\n return self.viewProvider.getViewOrigin()\n\n def getViewDirection(self):\n return self.viewProvider.getViewDirection()\n\n '''\n These static methods are provided for convenience to initialize\n a globalally accessible instance of the SegmentationContext.\n '''\n\n _globalSegmentationContext = None\n\n @staticmethod\n def installGlobalInstance(inst):\n if SegmentationContext._globalSegmentationContext is not None:\n raise Exception('Error, a global segmentation context instance is already installed.')\n\n SegmentationContext._globalSegmentationContext = inst\n\n @staticmethod\n def getGlobalInstance():\n if SegmentationContext._globalSegmentationContext is None:\n raise Exception('Error, the global segmentation context instance has not been initialized.')\n return SegmentationContext._globalSegmentationContext\n\n @staticmethod\n def initWithRobot(model):\n sc = SegmentationContext(RobotModelGroundHeightProvider(model), RobotModelViewProvider(model))\n SegmentationContext.installGlobalInstance(sc)\n\n @staticmethod\n def initWithCamera(camera, userGroundHeight):\n sc = SegmentationContext(UserGroundHeightProvider(userGroundHeight), CameraViewProvider(camera))\n SegmentationContext.installGlobalInstance(sc)\n\n @staticmethod\n def initWithUser(userGroundHeight, userViewFrame, viewAxis=0):\n sc = SegmentationContext(UserGroundHeightProvider(userGroundHeight), UserViewProvider(userViewFrame, viewAxis))\n SegmentationContext.installGlobalInstance(sc)\n\n\nclass RobotModelGroundHeightProvider(object):\n\n def __init__(self, model):\n self.model = model\n\n def getGroundHeight(self):\n from director.footstepsdriver import FootstepsDriver\n return FootstepsDriver.getFeetMidPoint(self.model).GetPosition()[2]\n\n\nclass RobotModelViewProvider(object):\n\n def __init__(self, model):\n self.model = model\n\n def getViewFrame(self):\n return self.model.getLinkFrame(self.model.getHeadLink())\n\n def getViewOrigin(self):\n headFrame = self.model.getLinkFrame(self.model.getHeadLink())\n return np.array(headFrame.GetPosition())\n\n def getViewDirection(self):\n headFrame = self.model.getLinkFrame(self.model.getHeadLink())\n viewDirection = [1,0,0]\n headFrame.TransformVector(viewDirection, viewDirection)\n return np.array(viewDirection)\n\nclass UserGroundHeightProvider(object):\n\n def __init__(self, groundHeight):\n self.groundHeight = groundHeight\n\n def getGroundHeight():\n return self.groundHeight\n\nclass UserViewProvider(object):\n\n def __init__(self, viewFrame, viewAxis):\n self.viewFrame = viewFrame\n self.viewAxis = viewAxis\n\n def getViewFrame(self):\n return self.viewFrame\n\n def getViewOrigin(self):\n return np.array( self.viewFrame.GetPosition())\n\n def getViewDirection(self):\n viewDirection = [0.0, 0.0, 0.0]\n viewDirection[self.viewAxis] = 1.0\n self.viewFrame.TransformVector(viewDirection, viewDirection)\n return np.array(viewDirection)\n\nclass CameraViewProvider(object):\n\n def __init__(self, camera):\n self.camera = camera\n\n def getViewFrame(self):\n return self.camera.GetViewTransformObject()\n\n def getViewOrigin(self):\n return np.array(self.camera.GetViewPosition())\n\n def getViewDirection(self):\n return np.array(self.camera.GetViewDirection())\n\n\n\ndef getDebugFolder():\n obj = om.findObjectByName('debug')\n if obj is None:\n obj = om.getOrCreateContainer('debug', om.getOrCreateContainer('segmentation'))\n om.collapse(obj)\n return obj\n\n\ndef applyLineFit(dataObj, distanceThreshold=0.02):\n\n f = vtk.vtkPCLSACSegmentationLine()\n f.SetInput(dataObj)\n f.SetDistanceThreshold(distanceThreshold)\n f.Update()\n origin = np.array(f.GetLineOrigin())\n direction = np.array(f.GetLineDirection())\n\n return origin, direction, shallowCopy(f.GetOutput())\n\n\ndef projectPointToPlane(point, origin, normal):\n projectedPoint = np.zeros(3)\n vtk.vtkPlane.ProjectPoint(point, origin, normal, projectedPoint)\n return projectedPoint\n\n\ndef intersectLineWithPlane(line_point, line_ray, plane_point, plane_normal ):\n '''\n Find the intersection between a line and a plane\n http://www.scratchapixel.com/lessons/3d-basic-lessons/lesson-7-intersecting-simple-shapes/ray-plane-and-ray-disk-intersection/\n '''\n\n line_point = np.asarray(line_point)\n line_ray = np.asarray(line_ray)\n plane_point = np.asarray(plane_point)\n plane_normal = np.asarray(plane_normal)\n\n denom = np.dot( plane_normal , line_ray )\n\n # TODO: implement this check\n #if (denom > 1E-6):\n # # ray is very close to parallel to plane\n # return None\n\n p0l0 = plane_point - line_point\n t = np.dot(p0l0, plane_normal) / denom\n\n intersection_point = line_point + t*line_ray\n return intersection_point\n\n\ndef labelPointDistanceAlongAxis(polyData, axis, origin=None, resultArrayName='distance_along_axis'):\n\n points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')\n if origin is not None:\n points = points - origin\n distanceValues = np.dot(points, axis)\n if origin is None:\n distanceValues -= np.nanmin(distanceValues)\n newData = shallowCopy(polyData)\n vtkNumpy.addNumpyToVtk(newData, distanceValues, resultArrayName)\n return newData\n\n\ndef applyEuclideanClustering(dataObj, clusterTolerance=0.05, minClusterSize=100, maxClusterSize=1e6):\n\n f = vtk.vtkPCLEuclideanClusterExtraction()\n f.SetInput(dataObj)\n f.SetClusterTolerance(clusterTolerance)\n f.SetMinClusterSize(int(minClusterSize))\n f.SetMaxClusterSize(int(maxClusterSize))\n f.Update()\n return shallowCopy(f.GetOutput())\n\n\ndef extractClusters(polyData, clusterInXY=False, **kwargs):\n ''' Segment a single point cloud into smaller clusters\n using Euclidean Clustering\n '''\n\n if not polyData.GetNumberOfPoints():\n return []\n\n if (clusterInXY == True):\n ''' If Points are seperated in X&Y, then cluster outside this '''\n polyDataXY = vtk.vtkPolyData()\n polyDataXY.DeepCopy(polyData)\n points=vtkNumpy.getNumpyFromVtk(polyDataXY , 'Points') # shared memory\n points[:,2] = 0.0\n #showPolyData(polyDataXY, 'polyDataXY', visible=False, parent=getDebugFolder())\n polyDataXY = applyEuclideanClustering(polyDataXY, **kwargs)\n clusterLabels = vtkNumpy.getNumpyFromVtk(polyDataXY, 'cluster_labels')\n vtkNumpy.addNumpyToVtk(polyData, clusterLabels, 'cluster_labels')\n\n else:\n polyData = applyEuclideanClustering(polyData, **kwargs)\n clusterLabels = vtkNumpy.getNumpyFromVtk(polyData, 'cluster_labels')\n\n\n clusters = []\n for i in xrange(1, clusterLabels.max() + 1):\n cluster = thresholdPoints(polyData, 'cluster_labels', [i, i])\n clusters.append(cluster)\n return clusters\n\n\ndef applyVoxelGrid(polyData, leafSize=0.01):\n\n v = vtk.vtkPCLVoxelGrid()\n v.SetLeafSize(leafSize, leafSize, leafSize)\n v.SetInput(polyData)\n v.Update()\n return shallowCopy(v.GetOutput())\n\n\ndef labelOutliers(dataObj, searchRadius=0.03, neighborsInSearchRadius=10):\n\n f = vtk.vtkPCLRadiusOutlierRemoval()\n f.SetInput(dataObj)\n f.SetSearchRadius(searchRadius)\n f.SetNeighborsInSearchRadius(int(neighborsInSearchRadius))\n f.Update()\n return shallowCopy(f.GetOutput())\n\n\ndef sparsifyStereoCloud(polyData):\n ''' Take in a typical Stereo Camera Point Cloud\n Filter it down to about the density of a lidar point cloud\n and remove outliers\n '''\n\n # >>> strips color out <<<\n polyData = applyVoxelGrid(polyData, leafSize=0.01)\n\n # remove outliers\n polyData = labelOutliers(polyData)\n vis.showPolyData(polyData, 'is_outlier', colorByName='is_outlier', visible=False, parent=getDebugFolder())\n polyData = thresholdPoints(polyData, 'is_outlier', [0.0, 0.0])\n return polyData\n\ndef fitDrillBarrel ( drillPoints, forwardDirection, plane_origin, plane_normal):\n ''' Given a point cloud which ONLY contains points from a barrell drill, standing upright\n and the equations of a table its resting on, and the general direction of the robot\n Fit a barrell drill\n '''\n\n if not drillPoints.GetNumberOfPoints():\n return\n\n vis.updatePolyData(drillPoints, 'drill cluster', parent=getDebugFolder(), visible=False)\n drillBarrelPoints = thresholdPoints(drillPoints, 'dist_to_plane', [0.177, 0.30])\n\n if not drillBarrelPoints.GetNumberOfPoints():\n return\n\n\n # fit line to drill barrel points\n linePoint, lineDirection, _ = applyLineFit(drillBarrelPoints, distanceThreshold=0.5)\n\n if np.dot(lineDirection, forwardDirection) < 0:\n lineDirection = -lineDirection\n\n vis.updatePolyData(drillBarrelPoints, 'drill barrel points', parent=getDebugFolder(), visible=False)\n\n\n pts = vtkNumpy.getNumpyFromVtk(drillBarrelPoints, 'Points')\n\n dists = np.dot(pts-linePoint, lineDirection)\n\n p1 = linePoint + lineDirection*np.min(dists)\n p2 = linePoint + lineDirection*np.max(dists)\n\n p1 = projectPointToPlane(p1, plane_origin, plane_normal)\n p2 = projectPointToPlane(p2, plane_origin, plane_normal)\n\n\n d = DebugData()\n d.addSphere(p1, radius=0.01)\n d.addSphere(p2, radius=0.01)\n d.addLine(p1, p2)\n vis.updatePolyData(d.getPolyData(), 'drill debug points', color=[0,1,0], parent=getDebugFolder(), visible=False)\n\n\n drillToBasePoint = np.array([-0.07, 0.0 , -0.12])\n\n zaxis = plane_normal\n xaxis = lineDirection\n xaxis /= np.linalg.norm(xaxis)\n yaxis = np.cross(zaxis, xaxis)\n yaxis /= np.linalg.norm(yaxis)\n xaxis = np.cross(yaxis, zaxis)\n xaxis /= np.linalg.norm(xaxis)\n\n t = getTransformFromAxes(xaxis, yaxis, zaxis)\n t.PreMultiply()\n t.Translate(-drillToBasePoint)\n t.PostMultiply()\n t.Translate(p1)\n\n return t\n" ]
[ [ "numpy.dot", "numpy.min", "numpy.asarray", "numpy.nanmin", "numpy.linalg.norm", "numpy.max", "numpy.cross", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
intheworld/incubator-tvm
[ "c07aa37aeb602e1ade7e26061d0fd3e908dd3791", "c07aa37aeb602e1ade7e26061d0fd3e908dd3791" ]
[ "tests/python/driver/tvmc/conftest.py", "tests/python/unittest/test_tir_intrin.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport os\nimport pytest\nimport tarfile\n\nimport numpy as np\n\nfrom PIL import Image\n\nfrom tvm.driver import tvmc\n\nfrom tvm.contrib.download import download_testdata\n\n# Support functions\n\n\ndef download_and_untar(model_url, model_sub_path, temp_dir):\n model_tar_name = os.path.basename(model_url)\n model_path = download_testdata(model_url, model_tar_name, module=[\"tvmc\"])\n\n if model_path.endswith(\"tgz\") or model_path.endswith(\"gz\"):\n tar = tarfile.open(model_path)\n tar.extractall(path=temp_dir)\n tar.close()\n\n return os.path.join(temp_dir, model_sub_path)\n\n\ndef get_sample_compiled_module(target_dir, package_filename):\n \"\"\"Support function that returns a TFLite compiled module\"\"\"\n base_url = \"https://storage.googleapis.com/download.tensorflow.org/models\"\n model_url = \"mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz\"\n model_file = download_and_untar(\n \"{}/{}\".format(base_url, model_url),\n \"mobilenet_v1_1.0_224_quant.tflite\",\n temp_dir=target_dir,\n )\n\n tvmc_model = tvmc.frontends.load_model(model_file)\n return tvmc.compiler.compile_model(\n tvmc_model, target=\"llvm\", package_path=os.path.join(target_dir, package_filename)\n )\n\n\n# PyTest fixtures\n\n\[email protected](scope=\"session\")\ndef tflite_mobilenet_v1_1_quant(tmpdir_factory):\n base_url = \"https://storage.googleapis.com/download.tensorflow.org/models\"\n model_url = \"mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz\"\n model_file = download_and_untar(\n \"{}/{}\".format(base_url, model_url),\n \"mobilenet_v1_1.0_224_quant.tflite\",\n temp_dir=tmpdir_factory.mktemp(\"data\"),\n )\n\n return model_file\n\n\[email protected](scope=\"session\")\ndef pb_mobilenet_v1_1_quant(tmpdir_factory):\n base_url = \"https://storage.googleapis.com/download.tensorflow.org/models\"\n model_url = \"mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz\"\n model_file = download_and_untar(\n \"{}/{}\".format(base_url, model_url),\n \"mobilenet_v1_1.0_224_frozen.pb\",\n temp_dir=tmpdir_factory.mktemp(\"data\"),\n )\n\n return model_file\n\n\[email protected](scope=\"session\")\ndef keras_resnet50(tmpdir_factory):\n try:\n from tensorflow.keras.applications.resnet50 import ResNet50\n except ImportError:\n # not all environments provide TensorFlow, so skip this fixture\n # if that is that case.\n return \"\"\n\n model_file_name = \"{}/{}\".format(tmpdir_factory.mktemp(\"data\"), \"resnet50.h5\")\n model = ResNet50(include_top=True, weights=\"imagenet\", input_shape=(224, 224, 3), classes=1000)\n model.save(model_file_name)\n\n return model_file_name\n\n\[email protected](scope=\"session\")\ndef keras_simple(tmpdir_factory):\n try:\n from tensorflow import keras\n except ImportError:\n # not all environments provide TensorFlow, so skip this fixture\n # if that is that case.\n return \"\"\n\n model_file_name = \"{}/{}\".format(tmpdir_factory.mktemp(\"data\"), \"simple_conv.h5\")\n model = keras.Sequential(\n [\n keras.layers.InputLayer(input_shape=[32, 32, 3], batch_size=1),\n keras.layers.Conv2D(8, kernel_size=(3, 3)),\n keras.layers.Flatten(),\n keras.layers.Dense(64),\n ]\n )\n model.save(model_file_name)\n\n return model_file_name\n\n\[email protected](scope=\"session\")\ndef pytorch_resnet18(tmpdir_factory):\n try:\n import torch\n import torchvision.models as models\n except ImportError:\n # Not all environments provide Pytorch, so skip if that's the case.\n return \"\"\n model = models.resnet18()\n model_file_name = \"{}/{}\".format(tmpdir_factory.mktemp(\"data\"), \"resnet18.pth\")\n # Trace model into torchscript.\n traced_cpu = torch.jit.trace(model, torch.randn(1, 3, 224, 224))\n torch.jit.save(traced_cpu, model_file_name)\n\n return model_file_name\n\n\[email protected](scope=\"session\")\ndef onnx_resnet50():\n base_url = \"https://github.com/onnx/models/raw/master/vision/classification/resnet/model\"\n file_to_download = \"resnet50-v2-7.onnx\"\n model_file = download_testdata(\n \"{}/{}\".format(base_url, file_to_download), file_to_download, module=[\"tvmc\"]\n )\n\n return model_file\n\n\[email protected](scope=\"session\")\ndef onnx_mnist():\n base_url = \"https://github.com/onnx/models/raw/master/vision/classification/mnist/model\"\n file_to_download = \"mnist-1.onnx\"\n model_file = download_testdata(\n \"{}/{}\".format(base_url, file_to_download), file_to_download, module=[\"tvmc\"]\n )\n\n return model_file\n\n\[email protected](scope=\"session\")\ndef tflite_compiled_model(tmpdir_factory):\n\n # Not all CI environments will have TFLite installed\n # so we need to safely skip this fixture that will\n # crash the tests that rely on it.\n # As this is a pytest.fixture, we cannot take advantage\n # of pytest.importorskip. Using the block below instead.\n try:\n import tflite\n except ImportError:\n print(\"Cannot import tflite, which is required by tflite_compiled_module_as_tarfile.\")\n return \"\"\n\n target_dir = tmpdir_factory.mktemp(\"data\")\n return get_sample_compiled_module(target_dir, \"mock.tar\")\n\n\[email protected](scope=\"session\")\ndef imagenet_cat(tmpdir_factory):\n tmpdir_name = tmpdir_factory.mktemp(\"data\")\n cat_file_name = \"imagenet_cat.npz\"\n\n cat_url = \"https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true\"\n image_path = download_testdata(cat_url, \"inputs\", module=[\"tvmc\"])\n resized_image = Image.open(image_path).resize((224, 224))\n image_data = np.asarray(resized_image).astype(\"float32\")\n image_data = np.expand_dims(image_data, axis=0)\n\n cat_file_full_path = os.path.join(tmpdir_name, cat_file_name)\n np.savez(cat_file_full_path, input=image_data)\n\n return cat_file_full_path\n\n\[email protected](scope=\"session\")\ndef tflite_mobilenet_v1_0_25_128(tmpdir_factory):\n base_url = \"https://storage.googleapis.com/download.tensorflow.org/models\"\n model_url = \"mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz\"\n model_file = download_and_untar(\n \"{}/{}\".format(base_url, model_url),\n \"mobilenet_v1_0.25_128.tflite\",\n temp_dir=tmpdir_factory.mktemp(\"data\"),\n )\n\n return model_file\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport tvm\nimport tvm.testing\nfrom tvm import te, tir\nfrom tvm import topi\nfrom tvm.contrib import utils, clang\nfrom tvm.script import ty\nimport numpy as np\nimport ctypes\nimport math\n\n\ndef test_nearbyint():\n m = te.var(\n \"m\",\n )\n A = te.placeholder((m,), name=\"A\")\n A_rounded = te.compute((m,), lambda *i: tvm.tir.nearbyint(A(*i)), name=\"A\")\n s = te.create_schedule(A_rounded.op)\n f = tvm.build(s, [A, A_rounded], \"llvm\")\n dev = tvm.cpu(0)\n n = 10\n a = tvm.nd.array(np.random.uniform(high=100, size=n).astype(A.dtype), dev)\n a_rounded = tvm.nd.array(np.random.uniform(size=n).astype(A_rounded.dtype), dev)\n f(a, a_rounded)\n # Note that numpys rint rounds to nearest integer with\n # ties to halfway is broken by rounding to even.\n # So that 1.5 and 2.5 will round 2.\n # This is the default rounding mode with libc as well.\n # However one can set a different rounding mode and in that\n # case numpy result might differ.\n tvm.testing.assert_allclose(a_rounded.asnumpy(), np.rint(a.asnumpy()))\n\n\ndef test_round_intrinsics_on_int():\n i = tvm.te.var(\"i\", \"int32\")\n for op in [tvm.tir.round, tvm.tir.trunc, tvm.tir.ceil, tvm.tir.floor, tvm.tir.nearbyint]:\n assert op(tvm.tir.const(10, \"int32\")).value == 10\n assert op(tvm.tir.const(True, \"bool\")).value == True\n assert op(i).same_as(i)\n\n assert tvm.tir.isnan(tvm.tir.const(10, \"int32\")).value == False\n\n\ndef test_unary_intrin():\n test_funcs = [\n (tvm.tir.exp10, lambda x: np.power(10, x)),\n (tvm.tir.log2, lambda x: np.log2(x)),\n (tvm.tir.log10, lambda x: np.log10(x)),\n (tvm.tir.sinh, lambda x: np.sinh(x)),\n (tvm.tir.cosh, lambda x: np.cosh(x)),\n (tvm.tir.log1p, lambda x: np.log1p(x)),\n (tvm.tir.asin, lambda x: np.arcsin(x)),\n (tvm.tir.acos, lambda x: np.arccos(x)),\n (tvm.tir.atan, lambda x: np.arctan(x)),\n (tvm.tir.asinh, lambda x: np.arcsinh(x)),\n (tvm.tir.acosh, lambda x: np.arccosh(x)),\n (tvm.tir.atanh, lambda x: np.arctanh(x)),\n ]\n\n def run_test(tvm_intrin, np_func):\n m = te.var(\n \"m\",\n )\n A = te.placeholder((m,), name=\"A\")\n B = te.compute((m,), lambda *i: tvm_intrin(A(*i)), name=\"B\")\n s = te.create_schedule(B.op)\n f = tvm.build(s, [A, B], \"llvm\")\n dev = tvm.cpu(0)\n n = 10\n a = tvm.nd.array(np.random.uniform(0.1, 0.5, size=n).astype(A.dtype), dev)\n b = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)\n f(a, b)\n tvm.testing.assert_allclose(b.asnumpy(), np_func(a.asnumpy()), atol=1e-5, rtol=1e-5)\n\n for func in test_funcs:\n run_test(*func)\n\n\ndef test_binary_intrin():\n test_funcs = [\n (tvm.tir.atan2, lambda x1, x2: np.arctan2(x1, x2)),\n (tvm.tir.nextafter, lambda x1, x2: np.nextafter(x1, x2)),\n (tvm.tir.copysign, lambda x1, x2: np.copysign(x1, x2)),\n (tvm.tir.hypot, lambda x1, x2: np.hypot(x1, x2)),\n ]\n\n def run_test(tvm_intrin, np_func):\n m = te.var(\n \"m\",\n )\n A = te.placeholder((m,), name=\"A\")\n B = te.placeholder((m,), name=\"B\")\n C = te.compute((m,), lambda *i: tvm_intrin(A(*i), B(*i)), name=\"C\")\n s = te.create_schedule(C.op)\n f = tvm.build(s, [A, B, C], \"llvm\")\n dev = tvm.cpu(0)\n n = 10\n a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), dev)\n b = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(B.dtype), dev)\n c = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)\n f(a, b, c)\n tvm.testing.assert_allclose(\n c.asnumpy(), np_func(a.asnumpy(), b.asnumpy()), atol=1e-5, rtol=1e-5\n )\n\n for func in test_funcs:\n run_test(*func)\n\n\ndef test_ldexp():\n m = te.var(\n \"m\",\n )\n A = te.placeholder((m,), name=\"A\")\n B = te.placeholder((m,), name=\"B\", dtype=\"int32\")\n C = te.compute((m,), lambda *i: tvm.tir.ldexp(A(*i), B(*i)), name=\"C\")\n s = te.create_schedule(C.op)\n f = tvm.build(s, [A, B, C], \"llvm\")\n dev = tvm.cpu(0)\n n = 10\n a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), dev)\n b = tvm.nd.array(np.random.randint(0, 5, size=n).astype(B.dtype), dev)\n c = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)\n f(a, b, c)\n tvm.testing.assert_allclose(\n c.asnumpy(), np.ldexp(a.asnumpy(), b.asnumpy()), atol=1e-5, rtol=1e-5\n )\n\n\ndef test_clz():\n def clz_np(x, dtype):\n ceil_log2 = np.ceil(np.log2(x)).astype(dtype)\n bits = int(dtype[-2:])\n clz = bits - ceil_log2\n clz[np.bitwise_and(x, x - 1) == 0] -= 1\n return clz\n\n for target in [\"llvm\", \"vulkan\"]:\n if not tvm.testing.device_enabled(\"vulkan\"):\n continue\n\n for dtype in [\"int32\", \"int64\"]:\n m = te.var(\"m\")\n A = te.placeholder((m,), name=\"A\", dtype=dtype)\n B = te.compute((m,), lambda *i: tvm.tir.clz(A(*i)), name=\"B\")\n s = te.create_schedule(B.op)\n\n if target == \"vulkan\":\n bx, tx = s[B].split(B.op.axis[0], factor=64)\n\n s[B].bind(bx, te.thread_axis(\"blockIdx.x\"))\n s[B].bind(tx, te.thread_axis(\"threadIdx.x\"))\n\n f = tvm.build(s, [A, B], target)\n dev = tvm.device(target, 0)\n n = 10\n\n highs = [10, 100, 1000, 10000, 100000, 1000000]\n\n if dtype == \"int64\":\n highs.append((1 << 63) - 1)\n\n for high in highs:\n a_np = np.random.randint(1, high=high, size=(n,)).astype(dtype)\n a = tvm.nd.array(a_np, dev)\n b = tvm.nd.array(np.zeros((n,)).astype(\"int32\"), dev)\n f(a, b)\n ref = clz_np(a_np, dtype)\n np.testing.assert_equal(b.asnumpy(), ref)\n\n\[email protected]\nclass Module:\n def test_tir_fma(A: ty.handle, B: ty.handle, C: ty.handle, d: ty.handle) -> None:\n # function attr dict\n tir.func_attr({\"global_symbol\": \"test_fma\", \"tir.noalias\": True})\n n = tir.var(\"int32\")\n stride = tir.var(\"int32\")\n stride_1 = tir.var(\"int32\")\n stride_2 = tir.var(\"int32\")\n stride_3 = tir.var(\"int32\")\n A_1 = tir.match_buffer(\n A,\n [n],\n strides=[stride],\n elem_offset=0,\n align=128,\n offset_factor=1,\n type=\"auto\",\n )\n B_1 = tir.match_buffer(\n B,\n [n],\n strides=[stride_1],\n elem_offset=0,\n align=128,\n offset_factor=1,\n type=\"auto\",\n )\n C_1 = tir.match_buffer(\n C,\n [n],\n strides=[stride_2],\n elem_offset=0,\n align=128,\n offset_factor=1,\n type=\"auto\",\n )\n d_1 = tir.match_buffer(\n d,\n [n],\n strides=[stride_3],\n elem_offset=0,\n align=128,\n offset_factor=1,\n type=\"auto\",\n )\n # body\n for i in tir.serial(0, n):\n d_1.data[(i * stride_3)] = (\n tir.load(\"float32\", A_1.data, (i * stride))\n * tir.load(\"float32\", B_1.data, (i * stride_1))\n ) + tir.load(\"float32\", C_1.data, (i * stride_2))\n\n\ndef test_fma():\n opt = tvm.transform.Sequential(\n [\n tvm.tir.transform.Apply(lambda f: f.with_attr(\"target\", tvm.target.Target(\"llvm\"))),\n tvm.tir.transform.LowerIntrin(),\n ]\n )\n mod = opt(Module())\n assert mod[\"test_tir_fma\"].body.body.value.op.name == \"tir.call_llvm_pure_intrin\"\n\n\nif __name__ == \"__main__\":\n test_nearbyint()\n test_unary_intrin()\n test_round_intrinsics_on_int()\n test_binary_intrin()\n test_ldexp()\n test_clz()\n test_fma()\n" ]
[ [ "torch.jit.save", "tensorflow.keras.applications.resnet50.ResNet50", "numpy.expand_dims", "numpy.savez", "numpy.asarray", "torch.randn", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.InputLayer", "tensorflow.keras.layers.Flatten" ], [ "numpy.arctanh", "numpy.arctan", "numpy.arctan2", "numpy.hypot", "numpy.random.randint", "numpy.nextafter", "numpy.arcsin", "numpy.copysign", "numpy.log1p", "numpy.zeros", "numpy.cosh", "numpy.power", "numpy.arccosh", "numpy.arccos", "numpy.log10", "numpy.arcsinh", "numpy.log2", "numpy.sinh", "numpy.bitwise_and", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Brian-ning/HMNE
[ "1b4ee4c146f526ea6e2f4f8607df7e9687204a9e", "1b4ee4c146f526ea6e2f4f8607df7e9687204a9e" ]
[ "Source/tools/Layer_Distance_Calculation/78/draw_78.py", "Source/AMNE.py" ]
[ "#coding: utf-8\n\nimport pickle\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\nlayers = ['0_communication.txt','1_financial.txt','2_operationalaggregated.txt','3_trustaggregated.txt']\ngraphs = []\n\n# nodes_infor = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 40, 43, 44, 45, 49, 50, 51, 52, 53, 54, 55, 56, 59, 60, 61, 62, 63, 64, 65, 68, 69, 73, 74, 75, 82, 86, 87, 88, 89, 90, 91, 92, 93, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 139, 140, 141, 142, 146, 147, 148, 149, 150, 151, 152, 158, 159, 160, 163], [13, 14, 26, 27, 38, 39, 41, 42, 46, 47, 48, 57, 58, 66, 67, 70, 71, 72, 76, 77, 78, 79, 80, 81, 83, 84, 85, 94, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 143, 144, 145, 153, 154, 155, 156, 157, 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190]]\n# nodes_infor1 = [str(i) for i in nodes_infor[0]]\n# nodes_infor2 = [str(i) for i in nodes_infor[1]]\n# nodes_infor = []\n# nodes_infor.append(nodes_infor1)\n# nodes_infor.append(nodes_infor2)\n\nfor l in layers:\n with open(l,'r+') as f:\n graph = nx.Graph(name=l)\n for line in f.readlines():\n src,dst = line.strip().split()\n graph.add_node(src)\n graph.add_node(dst)\n graph.add_edge(src,dst)\n graphs.append(graph)\n\nmerged_graph = nx.Graph(name='merged')\nfor g in graphs:\n merged_graph.add_nodes_from(g.nodes())\n merged_graph.add_edges_from(g.edges())\n\npos = nx.spring_layout(merged_graph)\n\ngraphs.append(merged_graph)\n\nfor g in graphs:\n plt.figure(g.name)\n # nx.draw_networkx_nodes(g,pos,node_size=150,nodelist=list(set(nodes_infor[0])&set(g.nodes())),node_color='r',node_shape='o',alpha=0.8)\n nx.draw_networkx_nodes(g,pos,node_size=150,node_color='r',node_shape='o',alpha=0.8)\n nx.draw_networkx_edges(g,pos)\n nx.draw_networkx_labels(g,pos,font_size=8)\n plt.axis('off')\n plt.savefig(g.name+'.pdf')\nplt.show()\n\n\n\n\n", "import os\nimport torch\nimport torch.nn as nn\nimport Reader\nimport Graph2Coo\nimport GATModel\nimport numpy as np\nimport pickle as pk\nimport Evaluation as eval\nimport cross_layer_walk as clw\n\ntransform = {\n '1':lambda a, b, c: a + (b + c),\n '2':lambda a, b, c: torch.mm(torch.mm(b, c.t()), a),\n '3':lambda a, b, c: torch.mul(a, b*c)\n}\n\nclass train_model:\n def __init__(self, path, sampling, dimension):\n self.path = path\n self.sampling = sampling\n self.dimension = dimension\n self.data = None\n\n def train_AMNE(self):\n # 加载数据集\n dataset, edges_list, edges_label, nodes_mat = self.dataset_load() # 加载数据集,测试边集和相应的标签\n number_graphs = len(dataset) # 图的个数\n model = GATModel.Net(dataset, number_graphs) # 初始化模型\n # 设置目标函数和优化方法\n criterion = nn.BCELoss() #要是解决多分类任务的目标函数\n optimizer = torch.optim.Adam(model.parameters(), weight_decay=0.001, lr=0.0001) # 设置优化器\n\n pre_acc = 0 # 用来保存最优的模型情况\n for epoch in range(1, 3001):\n optimizer.zero_grad()\n fuse_feat, used_feat, comp_re, obf1, obf0 = model() # 前向计算,返回的这些值应该不需要计算梯度,除了used_feat\n\n obf2 = criterion(fuse_feat, torch.tensor(dataset[0].x, dtype=torch.float32)) # 初始的属性与融合之后的属性\n obf3 = criterion(comp_re, nodes_mat) # 互补性信息的计算\n object_function = obf0 + obf1 + obf2 + obf3 # 总的目标函数\n\n # 测试集上验证学习到的嵌入的性能\n Acc = []\n Adj = []\n # 循环10次,求平均值\n for i in range(10):\n # 利用5交叉验证,计算得到准确性和另外一种指标\n accuracy, adjust = eval.link_prediction(used_feat, edges_list, edges_label, dimensions = 128, GCN=True)\n Acc.append(accuracy)\n Adj.append(adjust)\n # 求平均值\n average_acc = sum(Acc)/10\n average_adj = sum(Adj)/10\n\n print(\"----Epoch: %d -----Loss = %.4f : %.4f/ %.4f/ %.4f/ %.4f ----Accuracy = %.4f ----ADJ Score = %.4f\"%(epoch, object_function, obf0, obf1, obf2, obf3, average_acc, average_adj)) # 最优模型的输出和保存\n if pre_acc < average_acc:\n max_accuracy = average_acc\n max_adjust = average_adj\n pre_acc = average_acc\n Max_acc = Acc\n Max_adj = Adj\n torch.save(model.state_dict(),'./model/model.pt')\n # 模型的反向传播和参数修改\n object_function.backward() # 反向传播,更新梯度\n optimizer.step()\n print(\" ----Max Accuracy : %.4f ---- MAx ADJ Score: %.4f ----\"%(max_accuracy, max_adjust))\n print([(Max_acc[i], Max_adj[i]) for i in range(len(Max_acc))])\n\n def dataset_load(self):\n path = \"baselines.pkl\"\n if os.path.exists(path):\n print(\"The pkl file has existed!\")\n with open(path, 'rb') as f:\n mul_nets, _, pos_edge_list, neg_edge_list, nodes_attr = pk.load(f)\n else:\n file_path = \"./Sampling_graph/Datasets_With_Attributes/Graph/\"+ os.path.basename(self.path) + \".graph\"\n mul_nets, pos_edge_list, neg_edge_list, nodes_attr = Reader.data_load(file_path)\n nodes_prob = clw.RWGraph(mul_nets) # 加载节点局部信息互补性的采样度量\n nodes_mat = np.zeros((mul_nets[0].number_of_nodes(), len(mul_nets)*len(mul_nets)))\n for n in sorted(list(mul_nets[0].nodes())):\n nodes_mat[int(n),:] = nodes_prob.layer_transition_prob[n].flatten()\n\n graph_list = Graph2Coo.graphs2coo(mul_nets, nodes_attr)\n dataset = Graph2Coo.CreatMyDataset(graph_list, '../Benchmark/')\n for i in range(len(mul_nets)):\n dataset[i].edge_index = torch.tensor(dataset[i].edge_index, dtype=torch.long)\n edges_list, labels = get_selected_edges(pos_edge_list, neg_edge_list)\n return dataset, edges_list, labels, torch.from_numpy(nodes_mat).to(torch.float32)\n\n def test_modal(self):\n path = './model/model.pt'\n dataset, edges_list, edges_label, nodes_attr = self.dataset_load()\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n if os.path.exists(path):\n model = GATModel.Net(dataset, len(dataset))\n model.load_state_dict(torch.load(path))\n _, used_feat, _, _, _ = model.forward()\n for i in range(100):\n accuracy, adjust = eval.link_prediction(used_feat, edges_list, edges_label, dimensions = 128, GCN=True)\n print(\" ---- Accuracy : %.4f ---- ADJ Score: %.4f ----\"%(accuracy, adjust))\n else:\n print('The model has saved in this file path!')\n\ndef get_selected_edges(pos_edge_list, neg_edge_list):\n edges = pos_edge_list + neg_edge_list\n labels = np.zeros(len(edges))\n labels[:len(pos_edge_list)] = 1\n return edges, labels\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.axis", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure" ], [ "torch.load", "torch.from_numpy", "torch.tensor", "torch.nn.BCELoss", "torch.mul", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
darwinbeing/deepdriving-tensorflow
[ "036a83871f3515b2c041bc3cd5e845f6d8f7b3b7", "036a83871f3515b2c041bc3cd5e845f6d8f7b3b7", "036a83871f3515b2c041bc3cd5e845f6d8f7b3b7" ]
[ "python/modules/deep_learning/layer/conv/CLogFeatureMap.py", "python/modules/deep_learning/layer/dense/CDense.py", "python/modules/deep_learning/trainer/CTrainer.py" ]
[ "# The MIT license:\n#\n# Copyright 2017 Andre Netzeband\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and\n# to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO\n# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Note: The DeepDriving project on this repository is derived from the DeepDriving project devloped by the princeton\n# university (http://deepdriving.cs.princeton.edu/). The above license only applies to the parts of the code, which\n# were not a derivative of the original DeepDriving project. For the derived parts, the original license and\n# copyright is still valid. Keep this in mind, when using code from this project.\n\nimport misc.arguments as args\nimport tensorflow as tf\n\nfrom .. import Setup\nfrom .. import struct\nfrom ... import helpers\n\nclass CLogFeatureMap(struct.CLayer):\n def __init__(self, Name = \"LogFeatures\"):\n self._Name = Name\n\n\n def copy(self):\n New = CLogFeatureMap(self._Name)\n return New\n\n\n def __call__(self, Name = args.NotSet):\n New = self.copy()\n\n if args.isSet(Name):\n self._Name = Name\n\n return New\n\n\n def apply(self, Input):\n Setup.log(\"* Log Featute Map in summary\")\n\n with tf.variable_scope(self._Name):\n helpers.saveFeatureMap(Input, self._Name)\n\n return tf.identity(Input, \"Features\")\n\n\n", "# The MIT license:\n#\n# Copyright 2017 Andre Netzeband\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and\n# to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO\n# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Note: The DeepDriving project on this repository is derived from the DeepDriving project devloped by the princeton\n# university (http://deepdriving.cs.princeton.edu/). The above license only applies to the parts of the code, which\n# were not a derivative of the original DeepDriving project. For the derived parts, the original license and\n# copyright is still valid. Keep this in mind, when using code from this project.\n\nimport numpy as np\nimport tensorflow as tf\nimport misc.arguments as args\nimport debug\n\nfrom .. import struct\nfrom .. import initializer\nfrom .. import Setup\nfrom ... import helpers\n\nclass CDense(struct.CNamedLayer):\n\n def __init__(self, Nodes, Name = \"Dense\"):\n super().__init__(Name)\n self._Nodes = Nodes\n self._WeightLR = 1.0\n self._BiasLR = 1.0\n self._WeightDecay = 1.0\n self._BiasDecay = 0.0\n self._UseBias = True\n self._WeightInit = initializer.XavierInitializer()\n self._BiasInit = initializer.ConstantInitializer(0.0)\n\n\n def copy(self):\n New = CDense(self._Nodes)\n New = self._copyArgs(New)\n return New\n\n\n def _copyArgs(self, New):\n New = super()._copyArgs(New)\n New._Nodes = self._Nodes\n New._WeightLR = self._WeightLR\n New._BiasLR = self._BiasLR\n New._WeightDecay = self._WeightDecay\n New._BiasDecay = self._BiasDecay\n New._WeightInit = self._WeightInit\n New._BiasInit = self._BiasInit\n New._UseBias = self._UseBias\n return New\n\n\n def __call__(self, Nodes = args.NotSet, Name = args.NotSet):\n New = super().__call__(Name)\n\n if args.isSet(Nodes):\n New._Nodes = Nodes\n\n return New\n\n\n def setWeightLR(self, LR):\n self._WeightLR = LR\n return self\n\n def setBiasLR(self, LR):\n self._BiasLR = LR\n return self\n\n def setWeightDecay(self, Decay):\n self._WeightDecay = Decay\n return self\n\n def setBiasDecay(self, Decay):\n self._BiasDecay = Decay\n return self\n\n def setWeightInit(self, Init):\n self._WeightInit = Init\n return self\n\n def setBiasInit(self, Init):\n self._BiasInit = Init\n return self\n\n def setUseBias(self, UseBias):\n self._UseBias = UseBias\n return self\n\n def setNodes(self, Nodes):\n self._Nodes = Nodes\n return self\n\n\n def _apply(self, Input):\n Temp = self.copy()\n\n debug.Assert(Temp._Nodes != None, \"You have to specify the number of nodes for this layer.\")\n\n InputShape = Input.shape\n if len(InputShape) > 2:\n InputLength = int(np.prod(InputShape[1:]))\n Setup.log(\"* Reshape layer input {} to vector with {} elements.\".format(InputShape, InputLength))\n Input = tf.reshape(Input, shape=[-1, InputLength])\n\n else:\n InputLength = int(InputShape[1])\n\n if Temp._UseBias:\n Setup.log(\"* with {} Output-Nodes\".format(Temp._Nodes))\n else:\n Setup.log(\"* with {} Output-Nodes without Bias\".format(Temp._Nodes))\n\n X = Input\n\n if Temp._WeightLR != 1.0:\n Setup.log(\"* Weight-LR: {}\".format(Temp._WeightLR))\n\n if Temp._WeightDecay != 1.0:\n Setup.log(\"* Weight-Decay: {}\".format(Temp._WeightDecay))\n\n Setup.log(\"* Weight-Initializer: {}\".format(Temp._WeightInit))\n\n W = helpers.createVariable(Shape=[InputLength, Temp._Nodes],\n Name=\"Weights\",\n WeightDecayFactor=Temp._WeightDecay,\n Initializer=Temp._WeightInit.getInit(),\n LearningRate=Temp._WeightLR)\n\n if Temp._UseBias:\n if Temp._BiasLR != 1.0:\n Setup.log(\"* Bias-LR: {}\".format(Temp._BiasLR))\n\n if Temp._BiasDecay != 1.0:\n Setup.log(\"* Bias-Decay: {}\".format(Temp._BiasDecay))\n\n Setup.log(\"* Bias-Initializer: {}\".format(Temp._BiasInit))\n\n B = helpers.createBias(Shape=[Temp._Nodes],\n Name=\"Bias\",\n WeightDecayFactor=Temp._BiasDecay,\n Initializer=Temp._BiasInit.getInit(),\n LearningRate=Temp._BiasLR)\n\n S = tf.matmul(X, W)\n\n if Temp._UseBias:\n S = tf.add(S, B)\n\n\n if Setup.StoreHistogram:\n tf.summary.histogram(\"Weights\", W)\n if Temp._UseBias:\n tf.summary.histogram(\"Bias\", B)\n tf.summary.histogram(\"Signal\", S)\n\n Setup.log(\"* Output-Shape: {}\".format(S.shape))\n\n return S", "import debug\r\nimport tensorflow as tf\r\nimport time\r\nimport os\r\n\r\nfrom .. import data\r\nfrom .. import error\r\nfrom .. import helpers\r\nfrom .. import internal\r\nfrom .. import network\r\nfrom .. import checkpoint\r\nfrom .. import layer\r\n\r\n\r\nclass CTrainer(internal.CBaseRunner):\r\n def __init__(self, Network, Reader, ErrorMeasurement, Settings = None):\r\n super().__init__(Settings)\r\n debug.Assert(isinstance(Network, network.CNetwork), \"You must specify a Network object.\")\r\n debug.Assert(isinstance(Reader, data.CReader), \"You must specify a Reader object.\")\r\n debug.Assert(isinstance(ErrorMeasurement, error.CMeasurement), \"You must specify an ErrorMeasurement object.\")\r\n\r\n self._Network = Network\r\n self._Reader = Reader\r\n self._ErrorMeasurement = ErrorMeasurement\r\n self._Printer = None\r\n self._SummaryMerger = None\r\n\r\n self._IsReady = False\r\n self._OptimizerStep = None\r\n\r\n self._prepare(self._Settings)\r\n\r\n\r\n def _prepare(self, Settings):\r\n if not self._IsReady:\r\n self._OptimizerStep = self._createOptimizer(self._ErrorMeasurement, Settings)\r\n\r\n Variables, Tensors = helpers.getTrainableVariables()\r\n print(\"Current Model has {} parameters in {} trainable tensors.\".format(Variables, Tensors))\r\n\r\n self.reset(self.getCheckpointDir())\r\n self._Summary = tf.summary.merge_all()\r\n self._IsReady = True\r\n\r\n def train(self, NumberOfEpochs = None):\r\n Session = self._Session\r\n\r\n # Init Writer is necessary\r\n TrainWriter = None\r\n ValWriter = None\r\n if self._SummaryDir != None:\r\n print(\"Store tensorboard summary at directory {}\".format(self._SummaryDir))\r\n TrainWriter = tf.summary.FileWriter(os.path.join(self._SummaryDir, \"train\"))\r\n TrainWriter.add_graph(Session.graph)\r\n ValWriter = tf.summary.FileWriter(os.path.join(self._SummaryDir, \"val\"))\r\n ValWriter.add_graph(Session.graph)\r\n else:\r\n print(\"Do not store any summary\")\r\n\r\n # Store settings\r\n if not os.path.exists(self.getCheckpointDir()):\r\n os.makedirs(self.getCheckpointDir())\r\n Filename = os.path.join(self.getCheckpointDir(), \"train.cfg\")\r\n print(\"Store training settings in file {}\".format(Filename))\r\n with open(Filename, \"w\") as File:\r\n File.write(str(self._Settings))\r\n\r\n # Start queues\r\n QueueCoordinage = tf.train.Coordinator()\r\n tf.train.start_queue_runners(sess=Session, coord=QueueCoordinage)\r\n\r\n # Calculate number of epochs to run\r\n MaxEpochs = self.getMaxEpochs() - self._EpochCount\r\n if NumberOfEpochs != None:\r\n MaxEpochs = min([NumberOfEpochs, MaxEpochs])\r\n\r\n # Setup Printer\r\n if self._Printer != None:\r\n self._Printer.setupTraining(self.getMaxEpochs())\r\n\r\n # Loop Preparation\r\n BatchSize = self._Reader.getBatchSize()\r\n Epoch = self._EpochCount\r\n IterationsPerEpoch = helpers.getIterationsPerEpoch(self.getEpochSize(), BatchSize)\r\n Iteration = Epoch * IterationsPerEpoch\r\n print(\"Run training for {} epochs beginning with epoch {} and {} iterations per epoch.\".format(MaxEpochs, self._EpochCount, IterationsPerEpoch))\r\n\r\n # Initial Eval Step\r\n StartTime = time.time()\r\n SummaryResult, OtherResults = self._internalEvalStep(Session, Iteration, 0, Epoch)\r\n\r\n Writer = TrainWriter\r\n if Epoch > 0:\r\n # Do not write to summary, since is has already been written by the training before\r\n Writer = None\r\n\r\n self._postEpochAction(Writer, SummaryResult, OtherResults, StartTime, Iteration, Epoch, BatchSize)\r\n SummaryResult = self._internalValidationStep(Session, Iteration, 0, Epoch)\r\n\r\n Writer = ValWriter\r\n if Epoch > 0:\r\n # Do not write to summary, since is has already been written by the training before\r\n Writer = None\r\n\r\n self._postValidationAction(Writer, SummaryResult, Iteration, Epoch, BatchSize)\r\n\r\n # Training Loop\r\n StartTime = time.time()\r\n for EpochNumber in range(MaxEpochs):\r\n Epoch = EpochNumber + self._EpochCount + 1\r\n SampleCount = 0\r\n for Batch in range(IterationsPerEpoch):\r\n Iteration += 1\r\n SampleCount += BatchSize\r\n self._printTrainingBar(20, Iteration, Epoch, Batch, IterationsPerEpoch, True)\r\n self._internalTrainStep(Session, Iteration, Batch, Epoch)\r\n\r\n SummaryResult, OtherResults = self._internalEvalStep(Session, Iteration, 0, Epoch)\r\n StartTime = self._postEpochAction(TrainWriter, SummaryResult, OtherResults, StartTime, Iteration, Epoch, SampleCount)\r\n SummaryResult = self._internalValidationStep(Session, Iteration, 0, Epoch)\r\n self._postValidationAction(ValWriter, SummaryResult, Iteration, Epoch, BatchSize)\r\n\r\n self._saveCheckpoint(Epoch, EpochNumber == MaxEpochs)\r\n\r\n self._EpochCount = Epoch\r\n\r\n # Stop queues\r\n QueueCoordinage.request_stop()\r\n QueueCoordinage.join()\r\n\r\n # Close writer\r\n if TrainWriter != None:\r\n TrainWriter.close()\r\n\r\n # Close writer\r\n if ValWriter != None:\r\n ValWriter.close()\r\n\r\n\r\n def _internalEvalStep(self, Session, Iteration, Batch, Epoch):\r\n RunTargets = [self._Summary]\r\n\r\n RawResults = list(self._trainIteration(Session, RunTargets, self._Reader, Iteration, Batch, Epoch))\r\n\r\n SummaryResult = RawResults[0]\r\n if len(RawResults) > 1:\r\n OtherResults = RawResults[1:]\r\n else:\r\n OtherResults = []\r\n\r\n return SummaryResult, OtherResults\r\n\r\n\r\n def _internalValidationStep(self, Session, Iteration, Batch, Epoch):\r\n RunTargets = [self._Summary]\r\n\r\n IsTraining = self._Reader.IsTraining\r\n self._Reader.IsTraining = False\r\n\r\n #print(\"Validate {} Iterations...\".format(self.getValidationIterations(self._Settings)))\r\n IterationsPerStep = self.getValidationIterations(self._Settings)\r\n for i in range(IterationsPerStep):\r\n self._printTrainingBar(20, Iteration, Epoch, i, IterationsPerStep, False)\r\n RawResults = list(self._trainIteration(Session, RunTargets, self._Reader, Iteration, Batch, Epoch))\r\n SummaryResult = RawResults[0]\r\n\r\n if self._SummaryMerger != None:\r\n self._SummaryMerger.add(SummaryResult)\r\n\r\n self._Reader.IsTraining = IsTraining\r\n\r\n if self._SummaryMerger != None:\r\n SummaryResult = self._SummaryMerger.merge()\r\n\r\n return SummaryResult\r\n\r\n\r\n def _internalTrainStep(self, Session, Iteration, Batch, Epoch):\r\n RunTargets = [self._OptimizerStep]\r\n RawResults = list(self._trainIteration(Session, RunTargets, self._Reader, Iteration, Batch, Epoch))\r\n\r\n return RawResults\r\n\r\n\r\n def _postValidationAction(self, Writer, Summary, Iteration, Epoch, SampleCount):\r\n if (Summary != None) and (Writer != None):\r\n Writer.add_summary(Summary, Epoch)\r\n\r\n if self._Printer != None:\r\n self._Printer.printValidationUpdate(Summary, Iteration, Epoch, SampleCount)\r\n\r\n\r\n def _postEpochAction(self, Writer, Summary, OtherResults, StartTime, Iteration, Epoch, SampleCount):\r\n Duration = (time.time() - StartTime)\r\n StartTime = time.time()\r\n\r\n if (Summary != None) and (Writer != None):\r\n Writer.add_summary(Summary, Epoch)\r\n\r\n if self._Printer != None:\r\n self._Printer.printEpochUpdate(Summary, Iteration, Epoch, Duration, SampleCount)\r\n\r\n return StartTime\r\n\r\n\r\n def _saveCheckpoint(self, Epoch, IsForceSave = False):\r\n EpochsUntilCheckpoint = self.getEpochsUntilCheckpoint()\r\n IsSave = False\r\n if EpochsUntilCheckpoint != None:\r\n if Epoch % EpochsUntilCheckpoint == 0:\r\n IsSave = True\r\n\r\n if IsSave or IsForceSave:\r\n self.saveModel(self.getCheckpointDir(), Epoch)\r\n\r\n\r\n def restore(self, Epoch=None):\r\n if Epoch is None:\r\n CheckpointFile = checkpoint.getLatestCheckpointFile(self.getCheckpointDir())\r\n\r\n else:\r\n CheckpointFile = checkpoint.getCheckpointFilename(self.getCheckpointDir(), Epoch)\r\n\r\n debug.Assert(CheckpointFile != None, \"Cannot find checkpoint file {}.\".format(CheckpointFile))\r\n super().restore(CheckpointFile)\r\n\r\n\r\n def getMaxEpochs(self):\r\n return self._getMaxEpochs(self._Settings)\r\n\r\n def getEpochSize(self):\r\n return self._getEpochSize(self._Settings)\r\n\r\n def getCheckpointDir(self):\r\n return self._getCheckpointDir(self._Settings)\r\n\r\n def getEpochsUntilCheckpoint(self):\r\n return self._getEpochsUntilCheckpoint(self._Settings)\r\n\r\n\r\n def _trainIteration(self, Session, RunTargets, Reader, Iteration, Batch, Epoch):\r\n raise Exception(\"You have to overwride this method and run a training iteration inside.\")\r\n # Return the results here\r\n return None, None\r\n\r\n def _createOptimizer(self, ErrorMeasurement, Settings):\r\n raise Exception(\"You have to overwride this method and create an optimizer step to return.\")\r\n return None\r\n\r\n def _getMaxEpochs(self, Settings):\r\n # You have to overwride this method to return the maximum number of epochs.\r\n return Settings['Trainer']['NumberOfEpochs']\r\n\r\n def _getEpochSize(self, Settings):\r\n # You have to overwride this method to return the epoch size.\r\n return Settings['Trainer']['EpochSize']\r\n\r\n def _getSummaryDir(self, Settings):\r\n # You can overrite this function to specify a summary directory\r\n if 'Trainer' in Settings:\r\n if 'SummaryPath' in Settings['Trainer']:\r\n return Settings['Trainer']['SummaryPath']\r\n\r\n return None\r\n\r\n def _getCheckpointDir(self, Settings):\r\n # You can overrite this function to specify a checkpoint directory\r\n if 'Trainer' in Settings:\r\n if 'CheckpointPath' in Settings['Trainer']:\r\n return os.path.join(Settings['Trainer']['CheckpointPath'], \"State_{}\".format(self._Network.State))\r\n\r\n return None\r\n\r\n def _getEpochsUntilCheckpoint(self, Settings):\r\n # You can overrite this function to specify the number of epochs until a checkpoint is stored\r\n if 'Trainer' in Settings:\r\n if 'CheckpointEpochs' in Settings['Trainer']:\r\n return Settings['Trainer']['CheckpointEpochs']\r\n\r\n return None\r\n\r\n\r\n def getValidationIterations(self, Settings):\r\n # You can overrite this function to specify the number of epochs until a checkpoint is stored\r\n if 'Validation' in Settings:\r\n if 'Samples' in Settings['Validation']:\r\n return int(Settings['Validation']['Samples']/self._Reader.getBatchSize())\r\n\r\n return 1\r\n\r\n\r\n def _printTrainingBar(self, BarSize, Iteration, Epoch, Batch, IterationsPerEpoch, IsTraining=True):\r\n Percent = Batch/IterationsPerEpoch\r\n Bar = '.' * int((BarSize*Percent))\r\n BarString = str(\"{:<\"+str(BarSize)+\"}\").format(Bar)\r\n\r\n if IsTraining:\r\n Prefix = str(\"Training Epoch {}\").format(Epoch)\r\n else:\r\n Prefix = str(\"Validation Epoch {}\").format(Epoch)\r\n\r\n print(\"\\r{:>8}: ({}) [{}] - {} / {}\".format(Iteration, Prefix, BarString, Batch, IterationsPerEpoch), end='', flush=True)\r\n print(\"\\r{:>8}: ({}) [{}] - {} / {}\".format(Iteration, Prefix, BarString, Batch, IterationsPerEpoch), end='', flush=True)\r\n if Batch >= (IterationsPerEpoch-1):\r\n print(\"\\r\", end='', flush=True)\r\n\r\n\r\n def _applyNoise(self, Gradients, GradientNoise):\r\n if GradientNoise is not None and GradientNoise > 0.0:\r\n NoiseLevel = GradientNoise / (tf.sqrt(tf.cast((CurrentOptimizationStep + 1), tf.float32)))\r\n NoisyGradients = []\r\n print(\"Apply noise to gradients (nu = {})...\".format(GradientNoise))\r\n # Taken from: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/optimizers.py\r\n for Gradient, Variable in Gradients:\r\n if Gradient is not None:\r\n if isinstance(Gradient, tf.IndexedSlices):\r\n GradientShape = Gradient.dense_shape\r\n else:\r\n GradientShape = Gradient.get_shape()\r\n\r\n Noise = tf.truncated_normal(GradientShape) * NoiseLevel\r\n Gradient += Noise\r\n\r\n NoisyGradients.append((Gradient, Variable))\r\n\r\n else:\r\n NoiseLevel = 0\r\n NoisyGradients = Gradients\r\n\r\n tf.summary.scalar(\"NoiseLevel\", NoiseLevel)\r\n return NoisyGradients\r\n\r\n\r\n def _applyIndiviualLearningRates(self, Gradients):\r\n print(\"Apply individual learning rate scales...\")\r\n ScaledGradients = []\r\n for Gradient, Variable in Gradients:\r\n Scale = layer.LearningRates.get(Variable.name)\r\n if Scale != None:\r\n Gradient *= Scale\r\n print(\" * \\\"{}\\\" has scale {}\".format(Variable.name, Scale))\r\n\r\n ScaledGradients.append((Gradient, Variable))\r\n\r\n return ScaledGradients\r\n\r\n\r\n def _addSumGradientSummary(self, Gradients):\r\n Sum = 0.0\r\n for Gradient, Variable in Gradients:\r\n Sum += tf.norm(Gradient)\r\n\r\n tf.summary.scalar(\"GradientNorm\", Sum)\r\n\r\n\r\n def _addSingleGradientSummary(self, Gradients):\r\n for Gradient, Variable in Gradients:\r\n tf.summary.scalar(Variable.name, tf.norm(Gradient))\r\n\r\n\r\n def _addGradientNoiseSummary(self, Gradients, NoisyGradients):\r\n for i, (Gradients, Variable) in enumerate(Gradients):\r\n tf.summary.scalar(Variable.name, tf.norm(NoisyGradients[i][0]) - tf.norm(Gradients))" ]
[ [ "tensorflow.variable_scope", "tensorflow.identity" ], [ "tensorflow.matmul", "tensorflow.reshape", "tensorflow.add", "numpy.prod", "tensorflow.summary.histogram" ], [ "tensorflow.norm", "tensorflow.truncated_normal", "tensorflow.train.start_queue_runners", "tensorflow.train.Coordinator", "tensorflow.cast", "tensorflow.summary.merge_all", "tensorflow.summary.scalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
pribalta/MolBART
[ "c2afec482883df370f4ce99e2ebdd98bce6bcc55", "c2afec482883df370f4ce99e2ebdd98bce6bcc55", "c2afec482883df370f4ce99e2ebdd98bce6bcc55" ]
[ "test/pre_train_model_test.py", "molbart/data/datasets.py", "molbart/predict.py" ]
[ "import pytest\nimport torch\nimport random\n\nfrom molbart.decoder import DecodeSampler\nfrom molbart.tokeniser import MolEncTokeniser\nfrom molbart.models.pre_train import BARTModel\n\nregex = \"\\[[^\\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\\(|\\)|\\.|=|#|-|\\+|\\\\\\\\|\\/|:|~|@|\\?|>|\\*|\\$|\\%[0-9]{2}|[0-9]\"\n\n# Use dummy SMILES strings\nreact_data = [\n \"CCO.C\",\n \"CCCl\",\n \"C(=O)CBr\"\n]\n\n# Use dummy SMILES strings\nprod_data = [\n \"cc\",\n \"CCl\",\n \"CBr\"\n]\n\nmodel_args = {\n \"d_model\": 5,\n \"num_layers\": 2,\n \"num_heads\": 1,\n \"d_feedforward\": 32,\n \"lr\": 0.0001,\n \"weight_decay\": 0.0,\n \"activation\": \"gelu\",\n \"num_steps\": 1000,\n \"max_seq_len\": 40\n}\n\nrandom.seed(a=1)\ntorch.manual_seed(1)\n\n\ndef build_tokeniser():\n tokeniser = MolEncTokeniser.from_smiles(react_data + prod_data, regex, mask_scheme=\"replace\")\n return tokeniser\n\n\ndef test_pos_emb_shape():\n tokeniser = build_tokeniser()\n pad_token_idx = tokeniser.vocab[tokeniser.pad_token]\n sampler = DecodeSampler(tokeniser, model_args[\"max_seq_len\"])\n model = BARTModel(sampler, pad_token_idx, len(tokeniser), **model_args)\n\n pos_embs = model._positional_embs()\n\n assert pos_embs.shape[0] == model_args[\"max_seq_len\"]\n assert pos_embs.shape[1] == model.d_model\n\n\ndef test_construct_input_shape():\n tokeniser = build_tokeniser()\n pad_token_idx = tokeniser.vocab[tokeniser.pad_token]\n sampler = DecodeSampler(tokeniser, model_args[\"max_seq_len\"])\n model = BARTModel(sampler, pad_token_idx, len(tokeniser), **model_args)\n\n token_output = tokeniser.tokenise(react_data, sents2=prod_data, pad=True)\n tokens = token_output[\"original_tokens\"]\n sent_masks = token_output[\"sentence_masks\"]\n\n token_ids = torch.tensor(tokeniser.convert_tokens_to_ids(tokens)).transpose(0, 1)\n sent_masks = torch.tensor(sent_masks).transpose(0, 1)\n\n emb = model._construct_input(token_ids, sent_masks)\n\n assert emb.shape[0] == max([len(ts) for ts in tokens])\n assert emb.shape[1] == 3\n assert emb.shape[2] == model_args[\"d_model\"]\n\n\ndef test_bart_forward_shape():\n tokeniser = build_tokeniser()\n pad_token_idx = tokeniser.vocab[tokeniser.pad_token]\n sampler = DecodeSampler(tokeniser, model_args[\"max_seq_len\"])\n model = BARTModel(sampler, pad_token_idx, len(tokeniser), **model_args)\n\n react_token_output = tokeniser.tokenise(react_data, mask=True, pad=True)\n react_tokens = react_token_output[\"masked_tokens\"]\n react_pad_mask = react_token_output[\"masked_pad_masks\"]\n react_ids = torch.tensor(tokeniser.convert_tokens_to_ids(react_tokens)).T\n react_mask = torch.tensor(react_pad_mask).T\n\n prod_token_output = tokeniser.tokenise(prod_data, pad=True)\n prod_tokens = prod_token_output[\"original_tokens\"]\n prod_pad_mask = prod_token_output[\"original_pad_masks\"]\n prod_ids = torch.tensor(tokeniser.convert_tokens_to_ids(prod_tokens)).T\n prod_mask = torch.tensor(prod_pad_mask).T\n\n batch_input = {\n \"encoder_input\": react_ids,\n \"encoder_pad_mask\": react_mask,\n \"decoder_input\": prod_ids,\n \"decoder_pad_mask\": prod_mask\n }\n\n output = model(batch_input)\n model_output = output[\"model_output\"]\n token_output = output[\"token_output\"]\n\n exp_seq_len = 4 # From expected tokenised length of prod data\n exp_batch_size = len(prod_data)\n exp_dim = model_args[\"d_model\"]\n exp_vocab_size = len(tokeniser)\n\n assert tuple(model_output.shape) == (exp_seq_len, exp_batch_size, exp_dim)\n assert tuple(token_output.shape) == (exp_seq_len, exp_batch_size, exp_vocab_size)\n\n\ndef test_bart_encode_shape():\n tokeniser = build_tokeniser()\n pad_token_idx = tokeniser.vocab[tokeniser.pad_token]\n sampler = DecodeSampler(tokeniser, model_args[\"max_seq_len\"])\n model = BARTModel(sampler, pad_token_idx, len(tokeniser), **model_args)\n\n react_token_output = tokeniser.tokenise(react_data, mask=True, pad=True)\n react_tokens = react_token_output[\"masked_tokens\"]\n react_pad_mask = react_token_output[\"masked_pad_masks\"]\n react_ids = torch.tensor(tokeniser.convert_tokens_to_ids(react_tokens)).T\n react_mask = torch.tensor(react_pad_mask).T\n\n batch_input = {\n \"encoder_input\": react_ids,\n \"encoder_pad_mask\": react_mask\n }\n\n output = model.encode(batch_input)\n\n exp_seq_len = 9 # From expected tokenised length of react data\n exp_batch_size = len(react_data)\n exp_dim = model_args[\"d_model\"]\n\n assert tuple(output.shape) == (exp_seq_len, exp_batch_size, exp_dim)\n\n\ndef test_bart_decode_shape():\n tokeniser = build_tokeniser()\n pad_token_idx = tokeniser.vocab[tokeniser.pad_token]\n sampler = DecodeSampler(tokeniser, model_args[\"max_seq_len\"])\n model = BARTModel(sampler, pad_token_idx, len(tokeniser), **model_args)\n\n react_token_output = tokeniser.tokenise(react_data, mask=True, pad=True)\n react_tokens = react_token_output[\"masked_tokens\"]\n react_pad_mask = react_token_output[\"masked_pad_masks\"]\n react_ids = torch.tensor(tokeniser.convert_tokens_to_ids(react_tokens)).T\n react_mask = torch.tensor(react_pad_mask).T\n\n encode_input = {\n \"encoder_input\": react_ids,\n \"encoder_pad_mask\": react_mask\n }\n memory = model.encode(encode_input)\n\n prod_token_output = tokeniser.tokenise(prod_data, pad=True)\n prod_tokens = prod_token_output[\"original_tokens\"]\n prod_pad_mask = prod_token_output[\"original_pad_masks\"]\n prod_ids = torch.tensor(tokeniser.convert_tokens_to_ids(prod_tokens)).T\n prod_mask = torch.tensor(prod_pad_mask).T\n\n batch_input = {\n \"decoder_input\": prod_ids,\n \"decoder_pad_mask\": prod_mask,\n \"memory_input\": memory,\n \"memory_pad_mask\": react_mask\n }\n output = model.decode(batch_input)\n\n exp_seq_len = 4 # From expected tokenised length of prod data\n exp_batch_size = len(react_data)\n exp_vocab_size = len(tokeniser)\n\n assert tuple(output.shape) == (exp_seq_len, exp_batch_size, exp_vocab_size)\n\n\ndef test_calc_token_acc():\n tokeniser = build_tokeniser()\n pad_token_idx = tokeniser.vocab[tokeniser.pad_token]\n sampler = DecodeSampler(tokeniser, model_args[\"max_seq_len\"])\n model = BARTModel(sampler, pad_token_idx, len(tokeniser), **model_args)\n\n react_token_output = tokeniser.tokenise(react_data[1:], pad=True)\n react_tokens = react_token_output[\"original_tokens\"]\n react_pad_mask = react_token_output[\"original_pad_masks\"]\n target_ids = torch.tensor(tokeniser.convert_tokens_to_ids(react_tokens)).T[1:, :]\n target_mask = torch.tensor(react_pad_mask).T[1:, :]\n\n # 9 is expected seq len of react data when padded\n token_output = torch.rand([8, len(react_data[1:]), len(tokeniser)])\n\n \"\"\"\n Expected outputs \n CCCl\n C(=O)CBr\n\n Vocab:\n 0 <PAD>\n 3 &\n 6 C\n 7 O\n 8 .\n 9 Cl\n 10 (\n 11 =\n 12 )\n 13 Br\n \"\"\"\n\n # Batch element 0\n token_output[0, 0, 6] += 1\n token_output[1, 0, 6] -= 1\n token_output[2, 0, 9] += 1\n token_output[3, 0, 3] += 1\n token_output[4, 0, 0] += 1\n token_output[5, 0, 0] -= 1\n\n # Batch element 1\n token_output[0, 1, 6] += 1\n token_output[1, 1, 10] += 1\n token_output[2, 1, 11] += 1\n token_output[3, 1, 7] += 1\n token_output[4, 1, 12] -= 1\n token_output[5, 1, 6] += 1\n token_output[6, 1, 13] -= 1\n token_output[7, 1, 3] += 1\n\n batch_input = {\n \"target\": target_ids,\n \"target_pad_mask\": target_mask\n }\n model_output = {\n \"token_output\": token_output\n }\n token_acc = model._calc_token_acc(batch_input, model_output)\n\n exp_token_acc = (3 + 6) / (4 + 8)\n\n assert exp_token_acc == token_acc\n", "import random\nimport functools\nimport torch\nimport pandas as pd\nimport pytorch_lightning as pl\nfrom pathlib import Path\nfrom rdkit import Chem\nfrom typing import Optional\nfrom torch.utils.data import Dataset\nfrom concurrent.futures import ProcessPoolExecutor\n\n\nclass _AbsDataset(Dataset):\n def __len__(self):\n raise NotImplementedError()\n\n def __getitem__(self, item):\n raise NotImplementedError()\n\n def split_idxs(self, val_idxs, test_idxs):\n raise NotImplementedError()\n\n def split(self, val_perc=0.2, test_perc=0.2):\n \"\"\" Split the dataset randomly into three datasets\n\n Splits the dataset into train, validation and test datasets.\n Validation and test dataset have round(len * <val/test>_perc) elements in each\n \"\"\"\n\n split_perc = val_perc + test_perc\n if split_perc > 1:\n msg = f\"Percentage of dataset to split must not be greater than 1, got {split_perc}\"\n raise ValueError(msg)\n\n dataset_len = len(self)\n val_len = round(dataset_len * val_perc)\n test_len = round(dataset_len * test_perc)\n\n val_idxs = random.sample(range(dataset_len), val_len)\n test_idxs = random.sample(range(dataset_len), test_len)\n\n train_dataset, val_dataset, test_dataset = self.split_idxs(val_idxs, test_idxs)\n\n return train_dataset, val_dataset, test_dataset\n\n\nclass ReactionDataset(_AbsDataset):\n def __init__(self, reactants, products, seq_lengths=None):\n super(ReactionDataset, self).__init__()\n\n if len(reactants) != len(products):\n raise ValueError(f\"There must be an equal number of reactants and products\")\n\n self.reactants = reactants\n self.products = products\n self.seq_lengths = seq_lengths\n\n def __len__(self):\n return len(self.reactants)\n\n def __getitem__(self, item):\n reactant_mol = self.reactants[item]\n product_mol = self.products[item]\n return reactant_mol, product_mol\n\n def split_idxs(self, val_idxs, test_idxs):\n \"\"\" Splits dataset into train, val and test\n\n Note: Assumes all remaining indices outside of val_idxs and test_idxs are for training data\n The datasets are returned as ReactionDataset objects, if these should be a subclass \n the from_reaction_pairs function should be overidden\n\n Args:\n val_idxs (List[int]): Indices for validation data\n test_idxs (List[int]): Indices for test data\n\n Returns:\n (ReactionDataset, ReactionDataset, ReactionDataset): Train, val and test datasets\n \"\"\"\n\n val_data = [self[idx] for idx in val_idxs]\n val_lengths = [self.seq_lengths[idx] for idx in val_idxs] if self.seq_lengths is not None else None\n val_dataset = self.from_reaction_pairs(val_data, lengths=val_lengths)\n\n test_data = [self[idx] for idx in test_idxs]\n test_lengths = [self.seq_lengths[idx] for idx in test_idxs] if self.seq_lengths is not None else None\n test_dataset = self.from_reaction_pairs(test_data, lengths=test_lengths)\n\n train_idxs = set(range(len(self))) - set(val_idxs).union(set(test_idxs))\n train_data = [self[idx] for idx in sorted(train_idxs)]\n train_lengths = [self.seq_lengths[idx] for idx in train_idxs] if self.seq_lengths is not None else None\n train_dataset = self.from_reaction_pairs(train_data, lengths=train_lengths)\n\n return train_dataset, val_dataset, test_dataset\n\n @staticmethod\n def from_reaction_pairs(reaction_pairs, lengths=None):\n reacts, prods = tuple(zip(*reaction_pairs))\n dataset = ReactionDataset(reacts, prods, seq_lengths=lengths)\n return dataset\n\n\nclass Uspto50(ReactionDataset):\n def __init__(self, data_path):\n path = Path(data_path)\n df = pd.read_pickle(path)\n reactants = df[\"reactant_ROMol\"].tolist()\n products = df[\"products_ROMol\"].tolist()\n \n super().__init__(reactants, products)\n\n self.train_idxs, self.val_idxs, self.test_idxs = self._save_idxs(df)\n\n def _save_idxs(self, df):\n val_idxs = df.index[df[\"set\"] == \"valid\"].tolist()\n test_idxs = df.index[df[\"set\"] == \"test\"].tolist()\n\n idxs_intersect = set(val_idxs).intersection(set(test_idxs))\n if len(idxs_intersect) > 0:\n raise ValueError(f\"Val idxs and test idxs overlap\")\n\n idxs = set(range(len(df.index)))\n train_idxs = idxs - set(val_idxs).union(set(test_idxs))\n\n return train_idxs, val_idxs, test_idxs\n\n\nclass UsptoMit(ReactionDataset):\n def __init__(self, data_path):\n path = Path(data_path)\n df = pd.read_pickle(path)\n reactants = df[\"reactants_mol\"].tolist()\n products = df[\"products_mol\"].tolist()\n reactant_lengths = df[\"reactant_lengths\"].tolist()\n product_lengths = df[\"product_lengths\"].tolist()\n\n super().__init__(reactants, products, seq_lengths=product_lengths)\n\n self.train_idxs, self.val_idxs, self.test_idxs = self._save_idxs(df)\n\n def _save_idxs(self, df):\n val_idxs = df.index[df[\"set\"] == \"valid\"].tolist()\n test_idxs = df.index[df[\"set\"] == \"test\"].tolist()\n\n idxs_intersect = set(val_idxs).intersection(set(test_idxs))\n if len(idxs_intersect) > 0:\n raise ValueError(f\"Val idxs and test idxs overlap\")\n\n idxs = set(range(len(df.index)))\n train_idxs = idxs - set(val_idxs).union(set(test_idxs))\n\n return train_idxs, val_idxs, test_idxs\n\n\nclass MolOptDataset(ReactionDataset):\n def __init__(self, data_path):\n path = Path(data_path)\n df = pd.read_csv(path)\n data_in = df[\"Input\"].tolist()\n data_out = df[\"Output\"].tolist()\n\n super().__init__(data_in, data_out)\n\n self.train_idxs, self.val_idxs, self.test_idxs = self._save_idxs(df)\n\n def _save_idxs(self, df):\n val_idxs = df.index[df[\"Set\"] == \"validation\"].tolist()\n test_idxs = df.index[df[\"Set\"] == \"test\"].tolist()\n\n idxs_intersect = set(val_idxs).intersection(set(test_idxs))\n if len(idxs_intersect) > 0:\n raise ValueError(f\"Val idxs and test idxs overlap\")\n\n idxs = set(range(len(df.index)))\n train_idxs = idxs - set(val_idxs).union(set(test_idxs))\n\n return train_idxs, val_idxs, test_idxs\n\n\nclass MoleculeDataset(_AbsDataset):\n def __init__(\n self,\n molecules,\n seq_lengths=None,\n transform=None,\n train_idxs=None,\n val_idxs=None,\n test_idxs=None\n ):\n super(MoleculeDataset, self).__init__()\n\n self.molecules = molecules\n self.seq_lengths = seq_lengths\n self.transform = transform\n self.train_idxs = train_idxs\n self.val_idxs = val_idxs\n self.test_idxs = test_idxs\n\n def __len__(self):\n return len(self.molecules)\n\n def __getitem__(self, item):\n molecule = self.molecules[item]\n if self.transform is not None:\n molecule = self.transform(molecule)\n\n return molecule\n\n def split_idxs(self, val_idxs, test_idxs):\n val_mols = [self.molecules[idx] for idx in val_idxs]\n val_lengths = [self.seq_lengths[idx] for idx in val_idxs] if self.seq_lengths is not None else None\n val_dataset = MoleculeDataset(val_mols, val_lengths, self.transform)\n\n test_mols = [self.molecules[idx] for idx in test_idxs]\n test_lengths = [self.seq_lengths[idx] for idx in test_idxs] if self.seq_lengths is not None else None\n test_dataset = MoleculeDataset(test_mols, test_lengths, self.transform)\n\n train_idxs = set(range(len(self))) - set(val_idxs).union(set(test_idxs))\n train_mols = [self.molecules[idx] for idx in sorted(train_idxs)]\n train_lengths = [self.seq_lengths[idx] for idx in train_idxs] if self.seq_lengths is not None else None\n train_dataset = MoleculeDataset(train_mols, train_lengths, self.transform)\n\n return train_dataset, val_dataset, test_dataset\n\n\nclass Chembl(MoleculeDataset):\n def __init__(self, data_path):\n path = Path(data_path)\n df = pd.read_pickle(path)\n\n molecules = df[\"molecules\"].tolist()\n lengths = df[\"lengths\"].tolist()\n train_idxs, val_idxs, test_idxs = self._save_idxs(df)\n\n super().__init__(\n molecules,\n seq_lengths=lengths,\n train_idxs=train_idxs,\n val_idxs=val_idxs,\n test_idxs=test_idxs\n )\n\n def _save_idxs(self, df):\n val_idxs = df.index[df[\"set\"] == \"val\"].tolist()\n test_idxs = df.index[df[\"set\"] == \"test\"].tolist()\n\n idxs_intersect = set(val_idxs).intersection(set(test_idxs))\n if len(idxs_intersect) > 0:\n raise ValueError(f\"Val idxs and test idxs overlap\")\n\n idxs = set(range(len(df.index)))\n train_idxs = idxs - set(val_idxs).union(set(test_idxs))\n\n return train_idxs, val_idxs, test_idxs\n\n\nclass ZincSlice(MoleculeDataset):\n def __init__(self, df):\n smiles = df[\"smiles\"].tolist()\n train_idxs, val_idxs, test_idxs = self._save_idxs(df)\n\n super().__init__(\n smiles,\n train_idxs=train_idxs,\n val_idxs=val_idxs,\n test_idxs=test_idxs,\n transform=lambda smi: Chem.MolFromSmiles(smi)\n )\n\n def _save_idxs(self, df):\n val_idxs = df.index[df[\"set\"] == \"val\"].tolist()\n test_idxs = df.index[df[\"set\"] == \"test\"].tolist()\n\n idxs_intersect = set(val_idxs).intersection(set(test_idxs))\n if len(idxs_intersect) > 0:\n raise ValueError(f\"Val idxs and test idxs overlap\")\n\n idxs = set(range(len(df.index)))\n train_idxs = idxs - set(val_idxs).union(set(test_idxs))\n\n return train_idxs, val_idxs, test_idxs\n\n\nclass Zinc(ZincSlice):\n def __init__(self, data_path):\n path = Path(data_path)\n\n # If path is a directory then read every subfile\n if path.is_dir():\n df = self._read_dir_df(path)\n else:\n df = pd.read_csv(path)\n\n super().__init__(df)\n\n def _read_dir_df(self, path):\n # num_cpus = 4\n # executor = ProcessPoolExecutor(num_cpus)\n # files = [f for f in path.iterdir()]\n # futures = [executor.submit(pd.read_csv, f) for f in files]\n # dfs = [future.result() for future in futures]\n\n dfs = [pd.read_csv(f) for f in path.iterdir()]\n\n zinc_df = pd.concat(dfs, ignore_index=True, copy=False)\n return zinc_df\n\n\nclass ConcatMoleculeDataset(MoleculeDataset):\n \"\"\" Dataset class for storing (concatenated) molecules \n\n Automatically constructs a dataset which contains rdkit molecules\n Roughly a third of these molecule objects are single molecules,\n another third contain two molecules and the final third contain three molecules.\n\n The molecules to be concatenated are randomly selected, \n so the ordering from the original data is not preserved.\n \"\"\"\n\n def __init__(\n self, \n dataset: MoleculeDataset,\n join_token: Optional[str] = \".\",\n double_mol_prob: Optional[float] = 0.333,\n triple_mol_prob: Optional[float] = 0.333\n ):\n self.join_token = join_token\n self.double_mol_prob = double_mol_prob\n self.triple_mol_prob = triple_mol_prob\n\n self.original_dataset = dataset\n\n concat_idxs = self._construct_concat_idxs(dataset)\n\n super(ConcatMoleculeDataset, self).__init__(\n concat_idxs, \n transform=self._process_molecule_idxs,\n train_idxs=dataset.train_idxs,\n val_idxs=dataset.val_idxs,\n test_idxs=dataset.test_idxs\n )\n\n def _construct_concat_idxs(self, dataset):\n idxs = list(range(len(dataset)))\n random.shuffle(idxs)\n\n curr = 0\n molecule_idxs = []\n\n added_prob = self.double_mol_prob + self.triple_mol_prob\n \n while curr <= len(idxs) - 1:\n rand = random.random()\n\n # Use two molecules\n if rand < self.double_mol_prob and curr <= len(idxs) - 2:\n curr_idxs = [idxs[curr + i] for i in range(2)]\n molecule_idxs.append(curr_idxs)\n curr += 2\n\n # Or, Use three molecules together\n elif rand < added_prob and curr <= len(idxs) - 3:\n curr_idxs = [idxs[curr + i] for i in range(3)]\n molecule_idxs.append(curr_idxs)\n curr += 3\n\n # Or, take a single molecule\n else:\n curr_idx = idxs[curr]\n molecule_idxs.append([curr_idx])\n curr += 1\n\n return molecule_idxs\n\n def _process_molecule_idxs(self, idxs):\n if len(idxs) == 1:\n molecule = self.original_dataset[idxs[0]]\n else:\n molecule = self._concat_mols_from_idxs(idxs, self.original_dataset)\n\n return molecule\n\n def _concat_mols_from_idxs(self, idxs, dataset):\n mols = [dataset[idx] for idx in idxs]\n concat_mol = functools.reduce(lambda m1, m2: Chem.CombineMols(m1, m2), mols)\n return concat_mol\n", "import torch\nimport pickle\nimport argparse\nfrom rdkit import Chem\nfrom pathlib import Path\n\nimport molbart.util as util\nfrom molbart.decoder import DecodeSampler\nfrom molbart.models.pre_train import BARTModel\nfrom molbart.data.datasets import MoleculeDataset\nfrom molbart.data.datamodules import MoleculeDataModule\n\n\nDEFAULT_BATCH_SIZE = 32\nDEFAULT_NUM_BEAMS = 5\n\n\nclass SmilesError(Exception):\n def __init__(self, idx, smi):\n message = f\"RDKit could not parse smiles {smi} at index {idx}\"\n super().__init__(message)\n\n\ndef build_dataset(args):\n text = Path(args.reactants_path).read_text()\n smiles = text.split(\"\\n\")\n smiles = [smi for smi in smiles if smi != \"\" and smi is not None]\n molecules = [Chem.MolFromSmiles(smi) for smi in smiles]\n\n # Check for parsing errors\n for idx, mol in enumerate(molecules):\n if mol is None:\n raise SmilesError(idx, smiles[idx])\n\n test_idxs = list(range(len(molecules)))\n dataset = MoleculeDataset(molecules, train_idxs=[], val_idxs=[], test_idxs=test_idxs)\n return dataset\n\n\ndef build_datamodule(args, dataset, tokeniser, max_seq_len):\n dm = MoleculeDataModule(\n dataset,\n tokeniser,\n args.batch_size,\n max_seq_len,\n val_idxs=dataset.val_idxs,\n test_idxs=dataset.test_idxs,\n augment=False\n )\n return dm\n\n\ndef predict(model, test_loader):\n device = \"cuda:0\" if util.use_gpu else \"cpu\"\n model = model.to(device)\n model.eval()\n\n smiles = []\n log_lhs = []\n\n for b_idx, batch in enumerate(test_loader):\n device_batch = {\n key: val.to(device) if type(val) == torch.Tensor else val for key, val in batch.items()\n }\n with torch.no_grad():\n smiles_batch, log_lhs_batch = model.sample_molecules(device_batch, sampling_alg=\"beam\")\n\n smiles.extend(smiles_batch)\n log_lhs.extend(log_lhs_batch)\n\n return smiles, log_lhs\n\n\ndef write_predictions(args, smiles, log_lhs):\n output_str = \"\"\n for smiles_beams, log_lhs_beams in zip(smiles, log_lhs):\n for smi, log_lhs in zip(smiles_beams, log_lhs_beams):\n output_str += f\"{smi},{str(log_lhs)}\\n\"\n\n output_str += \"\\n\"\n\n p = Path(args.products_path)\n p.write_text(output_str)\n\n\ndef main(args):\n print(\"Building tokeniser...\")\n tokeniser = util.load_tokeniser(args.vocab_path, args.chem_token_start_idx)\n print(\"Finished tokeniser.\")\n\n print(\"Reading dataset...\")\n dataset = build_dataset(args)\n print(\"Finished dataset.\")\n\n sampler = DecodeSampler(tokeniser, util.DEFAULT_MAX_SEQ_LEN)\n pad_token_idx = tokeniser.vocab[tokeniser.pad_token]\n\n print(\"Loading model...\")\n model = util.load_bart(args, sampler)\n model.num_beams = args.num_beams\n sampler.max_seq_len = model.max_seq_len\n print(\"Finished model.\")\n\n print(\"Building data loader...\")\n dm = build_datamodule(args, dataset, tokeniser, model.max_seq_len)\n dm.setup()\n test_loader = dm.test_dataloader()\n print(\"Finished loader.\")\n\n print(\"Evaluating model...\")\n smiles, log_lhs = predict(model, test_loader)\n write_predictions(args, smiles, log_lhs)\n print(\"Finished evaluation.\")\n\n print(\"Printing unknown tokens...\")\n tokeniser.print_unknown_tokens()\n print(\"Complete.\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # Program level args\n parser.add_argument(\"--reactants_path\", type=str)\n parser.add_argument(\"--model_path\", type=str)\n parser.add_argument(\"--products_path\", type=str)\n parser.add_argument(\"--vocab_path\", type=str, default=util.DEFAULT_VOCAB_PATH)\n parser.add_argument(\"--chem_token_start_idx\", type=int, default=util.DEFAULT_CHEM_TOKEN_START)\n\n # Model args\n parser.add_argument(\"--batch_size\", type=int, default=DEFAULT_BATCH_SIZE)\n parser.add_argument(\"--num_beams\", type=int, default=DEFAULT_NUM_BEAMS)\n\n args = parser.parse_args()\n main(args)\n" ]
[ [ "torch.manual_seed", "torch.tensor" ], [ "pandas.concat", "pandas.read_pickle", "pandas.read_csv" ], [ "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alessandro-gentilini/geostatsmodels
[ "4f50fbd0b53a504050ac6df8dc1db84f23836649" ]
[ "geostatsmodels/variograms.py" ]
[ "#!/usr/bin/env python\nimport numpy as np\n\nimport geostatsmodels.utilities as utilities\n\n\ndef lagindices(pwdist, lag, tol):\n '''\n Input: (pwdist) square NumPy array of pairwise distances\n (lag) the distance, h, between points\n (tol) the tolerance we are comfortable with around (lag)\n Output: (ind) list of tuples; the first element is the row of\n (data) for one point, the second element is the row\n of a point (lag)+/-(tol) away from the first point,\n e.g., (3,5) corresponds fo data[3,:], and data[5,:]\n '''\n # grab the coordinates in a given range: lag +/- tolerance\n i, j = np.where((pwdist >= lag - tol) & (pwdist < lag + tol))\n # take out the repeated elements,\n # since p is a *symmetric* distance matrix\n indices=np.c_[i, j][np.where(j > i)]\n return indices\n\n\ndef anilagindices(data, pwdist, lag, tol, angle, atol):\n '''\n Input: (data) NumPy array where the frist two columns\n are the spatial coordinates, x and y, and\n the third column is the variable of interest\n (pwdist) square NumPy array of pairwise distances\n (lag) the distance, h, between points\n (tol) the tolerance we are comfortable with around (lag)\n (angle) float, [0,360), North = 0 --> 360 clockwise\n (atol) number of degrees about (angle) to consider\n '''\n index = lagindices(pwdist, lag, tol)\n brngs = utilities.bearings(data, index)\n bridx = list(zip(brngs, index))\n index = [idx.tolist() for br, idx in bridx if utilities.inangle(br, angle, atol)]\n # add 180 to the angle and take the modulus\n angle = (angle + 180) % 360\n index += [idx.tolist() for br, idx in bridx if utilities.inangle(br, angle, atol)]\n return np.array(index)\n\n\ndef semivariance(data, indices):\n '''\n Input: (data) NumPy array where the first two columns\n are the spatial coordinates, x and y, and\n the third column is the variable of interest\n (indices) indices of paired data points in (data)\n Output: (z) semivariance value at lag (h) +/- (tol)\n '''\n # take the squared difference between\n # the values of the variable of interest\n # the semivariance is half the mean squared difference\n i=indices[:, 0]\n j=indices[:, 1]\n z=(data[i, 2] - data[j, 2])**2.0\n return np.mean(z) / 2.0\n\n\ndef semivariogram(data, lags, tol):\n '''\n Input: (data) NumPy array where the first two columns\n are the spatial coordinates, x and y\n (lag) the distance, h, between points\n (tol) the tolerance we are comfortable with around (lag)\n Output: (sv) <2xN> NumPy array of lags and semivariogram values\n '''\n return variogram(data, lags, tol, 'semivariogram')\n\n\ndef covariance(data, indices):\n '''\n Input: (data) NumPy array where the first two columns\n are the spatial coordinates, x and y\n (lag) the distance, h, between points\n (tol) the tolerance we are comfortable with around (lag)\n Output: (z) covariance value at lag (h) +/- (tol)\n '''\n # grab the indices of the points\n # that are lag +/- tolerance apart\n i=indices[:, 0]\n j=indices[:, 1]\n return np.cov(data[i, 2], data[j, 2])[0][1]\n\n\ndef covariogram(data, lags, tol):\n '''\n Input: (data) NumPy array where the first two columns\n are the spatial coordinates, x and y\n (lag) the distance, h, between points\n (tol) the tolerance we are comfortable with around (lag)\n Output: (cv) <2xN> NumPy array of lags and covariogram values\n '''\n return variogram(data, lags, tol, 'covariogram')\n\n\ndef variogram(data, lags, tol, method):\n '''\n Input: (data) NumPy array where the first two columns\n are the spatial coordinates, x and y\n (lag) the distance, h, between points\n (tol) the tolerance we are comfortable with around (lag)\n (method) either 'semivariogram', or 'covariogram'\n Output: (cv) <2xN> NumPy array of lags and variogram values\n '''\n # calculate the pairwise distances\n pwdist = utilities.pairwise(data)\n # create a list of lists of indices of points having the ~same lag\n index = [lagindices(pwdist, lag, tol) for lag in lags]\n # remove empty \"lag\" sets, prevents zero division error in [co|semi]variance()\n index = list(filter(lambda x: len(x) > 0, index))\n # calculate the variogram at different lags given some tolerance\n if method in ['semivariogram', 'semi', 'sv', 's']:\n v = [semivariance(data, indices) for indices in index]\n elif method in ['covariogram', 'cov', 'co', 'cv', 'c']:\n v = [covariance(data, indices) for indices in index]\n # bundle the semivariogram values with their lags\n return np.c_[lags, v].T\n" ]
[ [ "numpy.cov", "numpy.array", "numpy.where", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shineware/KOMORANPy
[ "b8c1904b42a0bdfcd26c4c85cb37cd8cb48ffb6a" ]
[ "KOMORANPy/training/model/transition.py" ]
[ "import numpy as np\nimport pickle\nimport gzip\n\n\nclass Transition:\n def __init__(self, size):\n self.score_matrix = np.full((size, size), -np.inf)\n\n def put(self, prev_pos_id, cur_pos_id, score):\n self.score_matrix[prev_pos_id][cur_pos_id] = score\n\n def get(self, prev_pos_id, cur_pos_id):\n score = self.score_matrix[prev_pos_id][cur_pos_id]\n # has_transition_score = np.where(score == -np.inf, False, True)\n # print(f\"{prev_pos_id}->{cur_pos_id}:{score}\")\n # print(has_transition_score)\n # if has_transition_score:\n # return score\n # else:\n # return None\n if score == -np.inf:\n return None\n return score\n\n def save(self, filename):\n with gzip.open(filename, 'wb') as f:\n pickle.dump(self.score_matrix, f)\n\n def load(self, filename):\n with gzip.open(filename, 'rb') as f:\n self.score_matrix = pickle.load(f)\n" ]
[ [ "numpy.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Joshuaalbert/IonoTomo
[ "9f50fbac698d43a824dd098d76dce93504c7b879", "9f50fbac698d43a824dd098d76dce93504c7b879" ]
[ "src/ionotomo/utils/gaussian_process_expected_improvement.py", "src/ionotomo/scripts/2d_vs_3d/temporal_powerspectrum.py" ]
[ "import numpy as np\nimport pylab as plt\nfrom scipy.special import erf\nfrom scipy.integrate import simps\nfrom scipy.linalg import cho_solve\n#from ChoSolver import choSolve, choBackSubstitution\n \ndef styblinsky(x):\n return (x[0]**4 - 16*x[0]**2 + 5*x[0] + x[1]**4 - 16*x[1]**2 + 5*x[1])/2.\n\ndef rosenbrock(x):\n a = 1\n b = 100\n return (a-x[0])**2 + b*(x[1] - x[0]**2)**2\n\ndef complexInjunction(x):\n Nm = len(x)\n a = np.arange(Nm)\n A = np.outer(np.cos(np.arange(Nm)),np.sin(1j*np.arange(Nm))-Nm)\n y = np.exp(1j*A.dot(x))\n return -np.abs((np.min(y)/np.max(y)).real) \n \ndef mean(x):\n #return styblinsky(x)\n return np.log10(1+rosenbrock(x))# + rosenbrock((x-1))\n return np.sqrt((x[0]-0.5)**2 + (x[1])**2)\n\n\ndef M52(XX,theta):\n theta0 = theta[0]\n nu = theta[1]\n lengthScales = theta[2:]\n N = XX.shape[0]\n r2 = np.zeros([N,N],dtype=np.double)\n K = np.zeros([N,N],dtype=np.double)\n i = 0\n while i < len(lengthScales):\n r2 += (XX[:,i,:,i]/lengthScales[i])**2\n i += 1\n K += r2*(5./3.)\n np.sqrt(5*r2,out=r2)\n K += 1+r2\n np.exp(-r2,out=r2)\n K *= r2\n K *= theta0\n return K\n\ndef expK(XX,theta):\n theta0 = theta[0]\n nu = theta[1]\n lengthScales = theta[2:]\n N = XX.shape[0]\n K = np.zeros([N,N],dtype=np.double)\n i = 0\n while i < len(lengthScales):\n K -= (XX[:,i,:,i]/lengthScales[i])**2\n i += 1\n K /= 2.\n np.exp(K,out=K)\n K *= theta0\n #K += nu**2*np.eye(N)\n return K\n\ndef expK_derivative(XX,theta):\n theta0 = theta[0]\n nu = theta[1]\n lengthScales = theta[2:]\n N = XX.shape[0]\n Kdiff = np.zeros([N,N,len(theta)],dtype=np.double)\n K = np.zeros([N,N],dtype=np.double)\n #0 -> exp(-r^2)\n #1 -> 2*eye(N)*nu\n #2: ->-2r*eye(-r^2)*-2*(x1[i]-x2[i])^2/(lengthScale[i])^3\n i = 0\n while i < len(lengthScales):\n Kdiff[:,:,0] -= (XX[:,i,:,i]/lengthScales[i])**2\n Kdiff[:,:,2+i] += 4*XX[:,i,:,i]**2/lengthScales[i]**3\n i += 1\n #*r\n #np.rollaxis(K[:,:,2:],2,0) *= np.sqrt(-Kdiff[:,:,0])\n K /= 2.\n np.exp(K,out=K)\n K *= theta0\n K += nu**2*np.eye(N)\n return K\n\nclass Prior(object):\n def __init__(self, **kwargs):\n for key in kwargs.keys():\n setattr(self,key,kwargs[key])\n def domain(self):\n '''Get domain of prior'''\n return None\n def sample(self,N=1):\n '''get a sample from the distribution'''\n return None\n def pdf(self,x):\n '''get the pdf at x'''\n return None\n\nclass UniformPrior(Prior):\n def __init__(self,xmin,xmax):\n d = {\"xmin\":float(min(xmin,xmax)),\"xmax\":float(max(xmin,xmax)),\"width\":float(max(xmin,xmax) - min(xmin,xmax))}\n super(UniformPrior,self).__init__(**d)\n def sample(self,N=1):\n return np.random.uniform(low=self.xmin,high=self.xmax,size=N)\n def pdf(self,x):\n out = np.ones_like(x)\n out /= self.width\n out[x>self.xmax] *= 0.\n out[x<self.xmin] *= 0.\n return out\n \nclass NormalPrior(Prior):\n def __init__(self,mean,std):\n d = {\"mean\":float(mean),\"std\":float(std)}\n super(NormalPrior,self).__init__(**d)\n def sample(self,N=1):\n return self.mean + self.std*np.random.normal(size=N)\n def pdf(self,x):\n return np.exp(-(x - self.mean)**2/self.std**2/2.)/np.sqrt(2*np.pi)/self.std\n\nclass LogNormalPrior(Prior):\n def __init__(self,mean,std):\n d = {\"mean\":float(mean),\"std\":float(std)}\n super(LogNormalPrior,self).__init__(**d)\n def sample(self,N=1):\n return np.random.lognormal(mean=self.mean, sigma=self.std, size=N)\n def pdf(self,x):\n return np.exp(-(np.log(x) - self.mean)**2/self.std**2/2.)/np.sqrt(2*np.pi)/self.std/x\n\nclass ClassPrior(Prior):\n def __init__(self,numClasses,weights=None):\n if weights is None:\n weights = np.ones(numClasses,dtype=np.double)/numClasses\n d = {\"numClasses\":float(numClasses),\"weights\":float(weights)}\n super(ClassPrior,self).__init__(**d)\n def sample(self,N=1):\n samples = np.zeros(N,dtype=np.int64)\n i = 0\n while i < N:\n c = -1\n while c == -1: \n c_ = np.random.randint(self.numClasses)\n if np.random.uniform() < self.weights[c_]:\n c = c_\n samples[i] = c\n i += 1 \n return samples\n def pdf(self,x):\n return self.weights[np.int64(x)]\n \nclass DiscretePrior(Prior):\n def __init__(self,values,prior=None):\n if prior is None:\n prior = UniformPrior(np.min(values),np.max(values))\n d = {\"values\":values,\"prior\":prior}\n super(DiscretePrior,self).__init__(**d)\n def sample(self,N=1):\n samples = np.zeros(N,dtype=np.int64)\n i = 0\n while i < N:\n c = -1\n while c == -1: \n c_ = np.random.randint(len(self.values))\n if np.random.uniform() < self.prior.pdf(self.values[c_]):\n c = c_\n samples[i] = self.values[c]\n i += 1 \n return samples\n def pdf(self,x):\n return self.prior.pdf(x)\n \nif __name__ == '__main__':\n def sampleX(xPriors,N):\n X = np.zeros([N,len(xPriors)],dtype=np.double)\n for i in range(len(xPriors)):\n X[:,i] = xPriors[i].sample(N)\n return X\n \n def computeAquisition(Xstar,X,y,thetaPriors,iteration=1):\n Xstar = np.atleast_2d(Xstar)\n shape = []\n indices = []\n for thetaPrior in thetaPriors:\n ar = thetaPrior.values\n shape.append(len(ar))\n indices.append(np.arange(len(ar)))\n n = len(thetaPriors)\n postTheta = np.zeros(shape,dtype=np.double)\n COMP = np.zeros(shape,dtype=np.double)\n DF = np.zeros(shape,dtype=np.double)\n LML = np.zeros(shape,dtype=np.double)\n Xboth = np.vstack([X,Xstar])\n XXboth = np.subtract.outer(Xboth,Xboth)\n arg = np.argsort(y)\n xbest = X[arg[0],:]\n fbest = y[arg[0]]\n aq_full = np.zeros([Xstar.shape[0]]+shape,dtype=np.double)\n for idx in product(*indices):\n theta = np.zeros(len(indices),dtype=np.double)\n for i in range(len(idx)):\n theta[i] = thetaPriors[i].values[idx[i]]\n nu = theta[1]\n #Kboth = expK(XXboth,theta)\n Kboth = M52(XXboth,theta)\n K00 = Kboth[0:X.shape[0],0:X.shape[0]] \n K00 += nu**2*np.eye(X.shape[0])\n K01 = Kboth[0:X.shape[0],X.shape[0]:]\n K10 = K01.T\n K11 = Kboth[X.shape[0]:,X.shape[0]:]\n L = np.linalg.cholesky(K00)\n alpha = cho_solve((L,False),y)#choSolve(L,y,False)\n #mu[j] = sum_i alpha[i]K01[i,j]\n mu = K10.dot(alpha)\n #cov = K11 - K10.(K00+sigma)(^-1).K01\n V = choBackSubstitution(L,K01,True,False)\n std = np.sqrt(np.diag(K11 - V.T.dot(V)))\n gamma = (fbest - mu)/std\n #POI\n cum = (1 + erf(gamma/np.sqrt(2)))/2.\n #return\n #EI\n aq = std*(gamma*cum + np.exp(-gamma**2/2)/np.sqrt(2*np.pi))\n #aq = (1./(iteration+1))*std - mu\n datafit = -y.dot(alpha)/2.\n complexity = np.sum(np.log(np.diag(L)))\n marLik = np.exp(datafit - complexity - np.log(2*np.pi)*n/2.)\n COMP[idx] = complexity\n DF[idx] = datafit\n LML[idx] = np.log(marLik)\n prior = 1.\n for t,tp in zip(theta,thetaPriors):\n prior *= tp.pdf(t) \n postTheta[idx] = marLik * prior \n aq_full[ [slice(0,Xstar.shape[0])]+list(idx)] = aq*postTheta[idx]\n prob = np.copy(postTheta)\n for axis in range(len(thetaPriors)):\n aq_full = simps(aq_full,thetaPriors[len(thetaPriors)-axis-1].values,axis=len(thetaPriors)-axis)\n prob = simps(prob,thetaPriors[len(thetaPriors)-axis-1].values,axis=len(thetaPriors)-axis-1)\n aq_full /= prob\n postTheta /= prob\n return aq_full,postTheta\n\n def maximizeAquisition(xPriors,X,y,thetaPriors=None,iteration=0):\n '''Using gradient (or steepest if desired) maximize the Expected Improvment aquisition\n while integration over aquisition hyper parameters.\n '''\n if thetaPriors is None:\n #Set up thetaPriors\n res = 10\n #theta0 ~ max(y) - min(y), uniform, log spacing 4 mag\n m2 = np.max(y) - np.min(y)\n m1 = m2/1e4\n theta0Prior = DiscretePrior(10**np.linspace(np.log10(m1),np.log10(m2),res),\n prior=UniformPrior(m1,m2))\n # nu ~ obs noise. similarly but scaled down by 10%\n m2 = (np.max(y) - np.min(y))/10.\n m1 = (m2/1e4)/10.\n nuPrior = DiscretePrior(10**np.linspace(np.log10(m1),np.log10(m2),res),\n prior=UniformPrior(m1,m2))\n thetaPriors = [theta0Prior,nuPrior]\n for i in range(len(xPriors)):\n #handles uniform x priors right now\n m2 = (xPriors[i].xmax - xPriors[i].xmin)*10.\n m1 = (xPriors[i].xmax - xPriors[i].xmin)/10.\n lsPrior = DiscretePrior(10**np.linspace(np.log10(m1),np.log10(m2),res),\n prior=UniformPrior(m1,m2))\n thetaPriors.append(lsPrior)\n for thetaPrior in thetaPriors:\n assert isinstance(thetaPrior,DiscretePrior), \"one theta prior is not discrete\"\n from itertools import product\n #First sample points to initialize maximization\n #create aquisition at x\n Xstar = sampleX(xPriors,max(2,len(thetaPriors))**max(2,len(xPriors)))\n Xstar = sampleX(xPriors,10**max(2,len(xPriors)))\n arg = np.argsort(y)\n xbest = X[arg[0],:]\n fbest = y[arg[0]]\n aq_all = []\n Xstar_all = []\n N = len(y)\n aq_init,postTheta = computeAquisition(Xstar,X,y,thetaPriors,iteration)\n aq_all.append(aq_init)\n Xstar_all.append(Xstar)\n arg = np.argsort(aq_init)\n Xsimp = Xstar[arg[-len(xPriors)-1:],:]\n aq_simp = aq_init[arg[-len(xPriors)-1:]]\n #min to max\n alpha,gamma,rho,sigma = 1.,2.,0.5,0.5\n iter = 0\n NonCovergent = True\n while NonCovergent:\n if iter >= 5:\n break\n iter += 1\n #order for min (flip aq sign)\n arg = np.argsort(-aq_simp)\n aq_simp = aq_simp[arg]\n Xsimp = Xsimp[arg,:]\n #print(Xsimp,aq_simp)\n #centorid except last\n x0 = np.mean(Xsimp[:-1,:],axis=0)\n #reflection\n xr = x0 + alpha*(x0 - Xsimp[-1,:])\n aq_r,postTheta = computeAquisition(xr,X,y,thetaPriors,iteration)\n #print(xr,aq_r)\n aq_all.append(aq_r)\n Xstar_all.append(xr)\n if -aq_simp[0] <= -aq_r and -aq_r < -aq_simp[-2]:\n Xsimp[-1,:] = xr\n aq_simp[-1] = aq_r\n continue\n #expansion\n if -aq_r < -aq_simp[0]:\n xe = x0 + gamma*(xr - x0)\n aq_e,postTheta = computeAquisition(xe,X,y,thetaPriors,iteration)\n aq_all.append(aq_e)\n Xstar_all.append(xe)\n if -aq_e < -aq_r:\n Xsimp[-1,:] = xe\n aq_simp[-1] = aq_e\n continue\n else:\n Xsimp[-1,:] = xr\n aq_simp[-1] = aq_r\n continue\n #contractions\n xc = x0 + rho*(Xsimp[-1,:] - x0)\n aq_c,postTheta = computeAquisition(xc,X,y,thetaPriors,iteration)\n aq_all.append(aq_c)\n Xstar_all.append(xc)\n if -aq_c < -aq_simp[-1]:\n Xsimp[-1,:] = xc\n aq_simp[-1] = aq_c\n continue\n #shrink\n for i in range(Xsimp.shape[0]):\n Xsimp[i,:] = Xsimp[0,:] + sigma*(Xsimp[i,:] - Xsimp[0,:]) \n xbest_nm = Xsimp[0,:]\n #print(xbest_nm)\n aq_all = np.hstack(aq_all)\n Xstar = np.vstack(Xstar_all)\n arg = np.argsort(aq_all)\n xbest = Xstar[arg[-1],:]\n if True: \n vmin = np.min(aq_all)\n vmax = np.max(aq_all)\n plt.figure()\n sc=plt.scatter(Xstar[:,0],Xstar[:,1],c=aq_all,\n vmin=vmin,vmax=vmax,alpha=0.6)\n plt.scatter(xbest[0],xbest[1],c='red',alpha=0.6)\n plt.scatter(xbest_nm[0],xbest_nm[1],c='red',marker='*',alpha=0.6)\n plt.colorbar(sc)\n plt.show() \n fig,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n ax1.plot(thetaPriors[0].values,\n simps(simps(simps(postTheta,thetaPriors[3].values,axis=3),\n thetaPriors[2].values,axis=2),\n thetaPriors[1].values,axis=1))\n ax1.set_xlabel(\"theta0\")\n ax2.plot(thetaPriors[1].values,\n simps(simps(simps(postTheta,thetaPriors[3].values,axis=3),\n thetaPriors[2].values,axis=2),\n thetaPriors[0].values,axis=0))\n ax2.set_xlabel(\"nu\")\n ax3.plot(thetaPriors[2].values,\n simps(simps(simps(postTheta,thetaPriors[3].values,axis=3),\n thetaPriors[1].values,axis=1),\n thetaPriors[0].values,axis=0))\n ax3.set_xlabel(\"ls0\")\n ax4.plot(thetaPriors[3].values,\n simps(simps(simps(postTheta,thetaPriors[2].values,axis=2),\n thetaPriors[1].values,axis=1),\n thetaPriors[0].values,axis=0))\n ax4.set_xlabel(\"ls1\")\n plt.show() \n return xbest\n \n #Set up data\n np.random.seed(12344)\n nu = 0.01\n xPriors = [UniformPrior(-1,1.5),\n UniformPrior(-1,1.5)]\n thetaPriors = [DiscretePrior(10**np.linspace(np.log10(0.1),np.log10(5),10),prior=UniformPrior(0,5)),\n DiscretePrior(10**np.linspace(np.log10(0.001),np.log10(0.5),10),prior=LogNormalPrior(np.log(0.1),np.log(0.5/0.01))),\n DiscretePrior(np.linspace(0.5,6,10),prior=LogNormalPrior(np.log(1),np.log(6/0.5))),\n DiscretePrior(np.linspace(0.5,6,10),prior=LogNormalPrior(np.log(1),np.log(6/0.5)))]\n \n X,Y = np.meshgrid(np.linspace(xPriors[0].xmin,xPriors[0].xmax,100),\n np.linspace(xPriors[1].xmin,xPriors[1].xmax,100),\n indexing='ij')\n A = []\n for x,y in zip(X.flatten(),Y.flatten()):\n A.append(mean(np.array([x,y])))\n Niter = 10\n minidx = np.zeros([4,Niter],dtype=np.double)\n for r in range(4):\n score = []\n #plt.figure()\n c1 = plt.contour(X,Y,np.array(A).reshape(X.shape),20)\n plt.clabel(c1,inline=1,fontsize=10)\n plt.title(\"True\")\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n arg = np.argsort(A)\n plt.scatter(X.flatten()[arg[0]],Y.flatten()[arg[0]],zorder=20,c='red',marker='*',alpha=1)\n #sample corners and center\n xCorners = []\n for xPrior in xPriors:\n xCorners.append([xPrior.xmin,xPrior.xmax])\n from itertools import product\n Xdata = []\n y = []\n for x in product(*xCorners):\n Xdata.append(np.array(x))\n y.append(mean(Xdata[-1]) + nu*np.random.normal())\n Xdata.append(np.mean(np.array(xCorners),axis=1))\n y.append(mean(Xdata[-1]) + nu*np.random.normal())\n Xdata = np.array(Xdata)\n y = np.array(y) \n sc=plt.scatter(Xdata[:,0],Xdata[:,1],c=y,vmin=np.min(y),vmax=np.max(y),alpha=0.6)\n arg = np.argsort(y)\n plt.scatter(Xdata[arg[0],0],Xdata[arg[0],1],c='red',vmin=np.min(y),vmax=np.max(y),alpha=1)\n plt.colorbar(sc)\n plt.show() \n #do iterations to find min\n arg = np.argsort(y) \n fbest = y[arg[0]]\n xprev = Xdata[arg[0]]\n i = 0\n while i < Niter:\n #do gradient decent to find max of full aquisition\n xnext = maximizeAquisition(xPriors,Xdata,y,thetaPriors=None,iteration=i)\n xprev = xnext\n #print(y)\n f = mean(xnext) + nu*np.random.normal()\n Xdata = np.vstack([Xdata,xnext])\n y = np.hstack([y,f])\n fbest = np.min(y)\n score.append(f)\n print(xnext,f,fbest)\n i += 1\n c1 = plt.contour(X,Y,np.array(A).reshape(X.shape),20)\n plt.clabel(c1,inline=1,fontsize=10)\n plt.title(\"True\")\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n arg = np.argsort(A)\n plt.scatter(X.flatten()[arg[0]],Y.flatten()[arg[0]],zorder=20,c='red',marker='*',alpha=1)\n sc=plt.scatter(Xdata[:,0],Xdata[:,1],c=y,vmin=np.min(y),vmax=np.max(y),alpha=0.6)\n arg = np.argsort(y)\n plt.scatter(Xdata[arg[0],0],Xdata[arg[0],1],c='red',vmin=np.min(y),vmax=np.max(y),alpha=1)\n plt.colorbar(sc)\n plt.show() \n plt.plot(score)\n plt.ylabel('score (lower better)')\n plt.xlabel(\"iteration\")\n plt.show()\n minidx[r,:] = score\n plt.plot(np.mean(minidx,axis=0))\n plt.plot(np.mean(minidx,axis=0)+np.std(minidx,axis=0),ls='--')\n plt.plot(np.mean(minidx,axis=0)-np.std(minidx,axis=0),ls='--')\n plt.show()\n \n \n \n", "\n# coding: utf-8\n\n# In[1]:\n\n\nfrom ionotomo import *\nfrom ionotomo.utils.gaussian_process import *\nfrom rathings.phase_unwrap import *\nimport pylab as plt\nimport numpy as np\nimport matplotlib\nmatplotlib.rcParams['figure.figsize'] = (10,10)\n\n\n\n\n# In[2]:\n\n\n# data intake ~ 28GB\n\ndatapack = DataPack(filename=\"../rvw_data_analysis/rvw_datapack.hdf5\")\nprint(\"Loaded : {}\".format(datapack))\nantennas,antenna_labels = datapack.get_antennas(ant_idx=-1)\ntimes,timestamps = datapack.get_times(time_idx=-1)\ndirections, patch_names = datapack.get_directions(dir_idx=-1)\nphase = datapack.get_phase(ant_idx=-1,time_idx=-1,dir_idx=-1,freq_idx=[0])\n \n\n\n# In[7]:\n\n\n## functions\n\ndef prepare_phase(phase):\n phase = phase_unwrapp1d(phase,axis=0)\n phase -= np.mean(phase)\n return phase\n\ndef opt_kernel(times, phase, K, sigma_y=0):\n \"\"\"Bayes Optimization\"\"\"\n assert len(times) < np.sqrt(1e6), \"Don't want to do too many ops\"\n X = np.array([times.gps]).T\n y = prepare_phase(phase)\n K.hyperparams = level2_solve(X,y,sigma_y,K,n_random_start=0)\n return K\n\ndef multi_opt_kernel(times, phase, K, sigma_y=0):\n \"\"\"Bayes Optimization over multiple directions\n times : array (num_times,)\n time array\n phase : array (num_times, num_directions)\n phases in several directions\n \"\"\"\n assert len(times) < np.sqrt(1e6), \"Don't want to do too many ops\"\n num_directions = phase.shape[1]\n \n X = [np.array([times.gps]).T]*num_directions\n y = prepare_phase(phase).T\n K.hyperparams = level2_multidataset_solve(X,y,[sigma_y]*num_directions,K,n_random_start=10)\n return K\n\ndef plot_prediction(times_predict, times, phase, K, sigma_y = 0,phase_true=None,ant_label=None):\n \"\"\"Level1 predictive\"\"\"\n assert len(times) < np.sqrt(1e6), \"Don't want to do too many ops\"\n X = np.array([times.gps]).T\n \n Xstar = X\n #y = prepare_phase(phase)\n y = phase\n ystar, cov, lml = level1_solve(X,y,sigma_y,Xstar,K)\n \n plt.plot(X[:,0],y,label='data')\n plt.plot(Xstar[:,0],ystar,c='red',ls='--')\n plt.plot(Xstar[:,0],ystar+np.sqrt(np.diag(cov)),c='green',ls='--')\n plt.plot(Xstar[:,0],ystar-np.sqrt(np.diag(cov)),c='blue',ls='--')\n if ant_label is not None:\n plt.title(ant_label)\n plt.xlabel('time (s)')\n plt.ylabel('phase (rad)')\n \n if phase_true is not None:\n #y_true = prepare_phase(phase_true)\n Xstar = np.array([times_predict.gps]).T\n ystar, cov, lml = level1_solve(X,y,sigma_y,Xstar,K)\n y_true = phase_true\n plt.plot(Xstar[:,0],y_true,label=\"true\")\n plt.plot(Xstar[:,0],ystar,c='red',ls='-',label='pred')\n plt.plot(Xstar[:,0],ystar+np.sqrt(np.diag(cov)),c='green',ls='-',label=r'$+\\sigma$')\n plt.plot(Xstar[:,0],ystar-np.sqrt(np.diag(cov)),c='blue',ls='-',label=r'$-\\sigma$')\n plt.legend(frameon=False)\n plt.tight_layout()\n plt.show()\n\n\n# In[15]:\n\n\nant_id = 52\n\nsigma_y = 2*np.pi/180.\n\nfor ant_id in range(62):\n print(\"Using : {}\".format(antenna_labels[ant_id]))\n phases = prepare_phase(phase[ant_id,:,:,0])\n # plt.imshow(phases,aspect='auto')\n # plt.colorbar()\n # plt.show()\n\n K1 = Diagonal(1)\n K1.set_hyperparams_bounds([1e-5,10*np.pi/180.],name='sigma')\n K2 = SquaredExponential(1,l=20)\n K2.set_hyperparams_bounds([8,50],name='l')\n K2.set_hyperparams_bounds([1e-5,5],name='sigma')\n K3 = SquaredExponential(1,l=220)\n K3.set_hyperparams_bounds([50,1000],name='l')\n K3.set_hyperparams_bounds([1e-5,5],name='sigma')\n K = K1 + K2 + K3\n\n K = multi_opt_kernel(times[:200],phases[:200],K,sigma_y=sigma_y)\n print(K)\n #plot_prediction(times[200:300],times[:200:2],phases[:200:2], K,sigma_y=0.03,phase_true=phases[200:300],ant_label=antenna_labels[ant_id])\n\n\n\n\n\n\n# In[8]:\n\n\n#for ant_id in range(62):\n\nant_id=60\nprint(\"Using : {}\".format(antenna_labels[ant_id]))\nprint(phase.shape)\nphases = prepare_phase(phase[ant_id,:,0,0])\n\nK1 = Diagonal(1)\n# K2 = SquaredExponential(1)\n# K2.set_hyperparams_bounds([50,1000],name='l')\nK3 = RationalQuadratic(1)\nK3.set_hyperparams_bounds([50,500],name='l')\n# K4 = DotProduct(1,c=times[0].gps)\nK = K1 + K3\n\nK = opt_kernel(times[:200:2],phases[:200:2],K,sigma_y=0.03)\nprint(K)\nplot_prediction(times[200:300],times[:200:2],phases[:200:2], K,sigma_y=0.03,phase_true=phases[200:300],ant_label=antenna_labels[ant_id])\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "numpy.random.lognormal", "numpy.diag", "numpy.sqrt", "numpy.linspace", "numpy.max", "numpy.mean", "numpy.exp", "numpy.random.randint", "numpy.hstack", "numpy.ones_like", "numpy.arange", "numpy.eye", "scipy.linalg.cho_solve", "numpy.copy", "numpy.std", "numpy.zeros", "numpy.log", "numpy.min", "numpy.atleast_2d", "numpy.int64", "numpy.log10", "numpy.linalg.cholesky", "scipy.integrate.simps", "numpy.argsort", "numpy.array", "numpy.random.seed", "numpy.ones", "numpy.subtract.outer", "numpy.random.normal", "numpy.random.uniform", "numpy.vstack" ], [ "numpy.diag", "numpy.array", "numpy.mean", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rose-brain/deepvariant
[ "59687bab3a93ba0674cc21edf71caf336b01f138" ]
[ "deepvariant/core/python/vcf_writer_wrap_test.py" ]
[ "# Copyright 2017 Google Inc.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Tests for VcfWriter CLIF python wrappers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom deepvariant.core.genomics import variants_pb2\nfrom deepvariant.core import genomics_io\nfrom deepvariant.core import io_utils\nfrom deepvariant.core import test_utils\nfrom deepvariant.core.protos import core_pb2\nfrom deepvariant.core.python import vcf_writer\n\n_DOUBLE_CLOSE_ERROR = 'Cannot close an already closed VcfWriter'\n_WRITE_TO_CLOSED_ERROR = 'Cannot write to closed VCF stream'\n_WRONG_NUMBER_OF_SAMPLES = 'Variant call count must match number of samples'\n_DISCORDANT_SAMPLE_NAMES_ERROR = (\n 'Out-of-order call set names, or unrecognized call set name, with respect '\n 'to samples declared in VCF header.')\n_UNKNOWN_CONTIG_ERROR = \"Record's reference name is not available in VCF header\"\n_FILTER_NOT_FOUND_ERROR = 'Filter must be found in header'\n\n\nclass WrapVcfWriterTest(parameterized.TestCase):\n\n def setUp(self):\n self.out_fname = test_utils.test_tmpfile('output.vcf')\n self.options = core_pb2.VcfWriterOptions(\n contigs=[\n core_pb2.ContigInfo(name='Chr1', n_bases=50, pos_in_fasta=0),\n core_pb2.ContigInfo(name='Chr2', n_bases=25, pos_in_fasta=1),\n ],\n sample_names=['Fido', 'Spot'],\n filters=[])\n self.writer = vcf_writer.VcfWriter.to_file(self.out_fname, self.options)\n self.variant = test_utils.make_variant(\n chrom='Chr1', start=10, alleles=['A', 'C'])\n self.variant.calls.add(genotype=[0, 0], call_set_name='Fido')\n self.variant.calls.add(genotype=[0, 1], call_set_name='Spot')\n\n def test_writing_canned_variants(self):\n \"\"\"Tests writing all the variants that are 'canned' in our tfrecord file.\"\"\"\n\n # This file is in the TF record format\n tfrecord_file = test_utils.genomics_core_testdata(\n 'test_samples.vcf.golden.tfrecord')\n\n writer_options = core_pb2.VcfWriterOptions(\n contigs=[\n core_pb2.ContigInfo(name='chr1', n_bases=248956422),\n core_pb2.ContigInfo(name='chr2', n_bases=242193529),\n core_pb2.ContigInfo(name='chr3', n_bases=198295559),\n core_pb2.ContigInfo(name='chrX', n_bases=156040895)\n ],\n sample_names=['NA12878_18_99'],\n filters=[\n core_pb2.VcfFilterInfo(id='LowQual'),\n core_pb2.VcfFilterInfo(id='VQSRTrancheINDEL95.00to96.00'),\n core_pb2.VcfFilterInfo(id='VQSRTrancheINDEL96.00to97.00'),\n core_pb2.VcfFilterInfo(id='VQSRTrancheINDEL97.00to99.00'),\n core_pb2.VcfFilterInfo(id='VQSRTrancheINDEL99.00to99.50'),\n core_pb2.VcfFilterInfo(id='VQSRTrancheINDEL99.50to99.90'),\n core_pb2.VcfFilterInfo(id='VQSRTrancheINDEL99.90to99.95'),\n core_pb2.VcfFilterInfo(id='VQSRTrancheINDEL99.95to100.00+'),\n core_pb2.VcfFilterInfo(id='VQSRTrancheINDEL99.95to100.00'),\n core_pb2.VcfFilterInfo(id='VQSRTrancheSNP99.50to99.60'),\n core_pb2.VcfFilterInfo(id='VQSRTrancheSNP99.60to99.80'),\n core_pb2.VcfFilterInfo(id='VQSRTrancheSNP99.80to99.90'),\n core_pb2.VcfFilterInfo(id='VQSRTrancheSNP99.90to99.95'),\n core_pb2.VcfFilterInfo(id='VQSRTrancheSNP99.95to100.00+'),\n core_pb2.VcfFilterInfo(id='VQSRTrancheSNP99.95to100.00'),\n ])\n\n variant_records = list(\n io_utils.read_tfrecords(tfrecord_file, proto=variants_pb2.Variant))\n out_fname = test_utils.test_tmpfile('output.vcf')\n with vcf_writer.VcfWriter.to_file(out_fname, writer_options) as writer:\n for record in variant_records[:5]:\n writer.write(record)\n\n # Check: are the variants written as expected?\n # pylint: disable=line-too-long\n expected_vcf_content = [\n '##fileformat=VCFv4.2\\n',\n '##FILTER=<ID=PASS,Description=\"All filters passed\">\\n',\n '##FILTER=<ID=LowQual,Description=\"\">\\n',\n '##FILTER=<ID=VQSRTrancheINDEL95.00to96.00,Description=\"\">\\n',\n '##FILTER=<ID=VQSRTrancheINDEL96.00to97.00,Description=\"\">\\n',\n '##FILTER=<ID=VQSRTrancheINDEL97.00to99.00,Description=\"\">\\n',\n '##FILTER=<ID=VQSRTrancheINDEL99.00to99.50,Description=\"\">\\n',\n '##FILTER=<ID=VQSRTrancheINDEL99.50to99.90,Description=\"\">\\n',\n '##FILTER=<ID=VQSRTrancheINDEL99.90to99.95,Description=\"\">\\n',\n '##FILTER=<ID=VQSRTrancheINDEL99.95to100.00+,Description=\"\">\\n',\n '##FILTER=<ID=VQSRTrancheINDEL99.95to100.00,Description=\"\">\\n',\n '##FILTER=<ID=VQSRTrancheSNP99.50to99.60,Description=\"\">\\n',\n '##FILTER=<ID=VQSRTrancheSNP99.60to99.80,Description=\"\">\\n',\n '##FILTER=<ID=VQSRTrancheSNP99.80to99.90,Description=\"\">\\n',\n '##FILTER=<ID=VQSRTrancheSNP99.90to99.95,Description=\"\">\\n',\n '##FILTER=<ID=VQSRTrancheSNP99.95to100.00+,Description=\"\">\\n',\n '##FILTER=<ID=VQSRTrancheSNP99.95to100.00,Description=\"\">\\n',\n '##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\\n',\n '##FORMAT=<ID=GQ,Number=1,Type=Integer,Description=\"Genotype Quality\">\\n',\n '##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\"Read depth of all '\n 'passing filters reads.\">\\n',\n '##FORMAT=<ID=AD,Number=R,Type=Integer,Description=\"Read depth of all '\n 'passing filters reads for each allele.\">\\n',\n '##FORMAT=<ID=VAF,Number=A,Type=Float,Description=\\\"Variant allele '\n 'fractions.\">\\n',\n '##FORMAT=<ID=GL,Number=G,Type=Float,Description=\"Genotype '\n 'likelihoods, log10 encoded\">\\n',\n '##FORMAT=<ID=PL,Number=G,Type=Integer,Description=\"Genotype '\n 'likelihoods, Phred encoded\">\\n',\n '##INFO=<ID=END,Number=1,Type=Integer,Description=\"Stop position of '\n 'the interval\">\\n', '##contig=<ID=chr1,length=248956422>\\n',\n '##contig=<ID=chr2,length=242193529>\\n',\n '##contig=<ID=chr3,length=198295559>\\n',\n '##contig=<ID=chrX,length=156040895>\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\tNA12878_18_99\\n',\n 'chr1\\t13613\\t.\\tT\\tA\\t39.88\\tVQSRTrancheSNP99.90to99.95\\t.\\tGT:GQ:DP:AD:PL\\t0/1:16:4:1,3:68,0,16\\n',\n 'chr1\\t13813\\t.\\tT\\tG\\t90.28\\tPASS\\t.\\tGT:GQ:DP:AD:PL\\t1/1:9:3:0,3:118,9,0\\n',\n 'chr1\\t13838\\trs28428499\\tC\\tT\\t62.74\\tPASS\\t.\\tGT:GQ:DP:AD:PL\\t1/1:6:2:0,2:90,6,0\\n',\n 'chr1\\t14397\\trs756427959\\tCTGT\\tC\\t37.73\\tPASS\\t.\\tGT:GQ:DP:AD:PL\\t0/1:75:5:3,2:75,0,152\\n',\n 'chr1\\t14522\\t.\\tG\\tA\\t49.77\\tVQSRTrancheSNP99.60to99.80\\t.\\tGT:GQ:DP:AD:PL\\t0/1:78:10:6,4:78,0,118\\n'\n ]\n # pylint: enable=line-too-long\n\n with tf.gfile.GFile(out_fname, 'r') as f:\n self.assertEqual(f.readlines(), expected_vcf_content)\n\n def test_write_variant_is_ok(self):\n self.assertIsNone(self.writer.write(self.variant))\n\n def test_write_raises_with_unknown_contig(self):\n with self.assertRaisesRegexp(ValueError, _UNKNOWN_CONTIG_ERROR):\n self.variant.reference_name = 'BadChrom'\n self.writer.write(self.variant)\n\n def test_write_raises_with_unknown_filter(self):\n with self.assertRaisesRegexp(ValueError, _FILTER_NOT_FOUND_ERROR):\n self.variant.filter[:] = ['BadFilter']\n self.writer.write(self.variant)\n\n @parameterized.parameters(\n ([], _WRONG_NUMBER_OF_SAMPLES),\n (['Spot'], _WRONG_NUMBER_OF_SAMPLES),\n (['Fido'], _WRONG_NUMBER_OF_SAMPLES),\n (['Unknown', 'Fido'], _DISCORDANT_SAMPLE_NAMES_ERROR),\n (['Spot', 'Unknown'], _DISCORDANT_SAMPLE_NAMES_ERROR),\n (['Spot', 'Fido'], _DISCORDANT_SAMPLE_NAMES_ERROR), # Out of order.\n (['Fido', 'Spot', 'Extra'], _WRONG_NUMBER_OF_SAMPLES),\n )\n def test_write_raises_with_unknown_sample(self, sample_names, message):\n with self.assertRaisesRegexp(ValueError, message):\n del self.variant.calls[:]\n for sample_name in sample_names:\n self.variant.calls.add(genotype=[0, 0], call_set_name=sample_name)\n self.writer.write(self.variant)\n\n def test_context_manager(self):\n with self.writer:\n # Writing within the context manager succeeds.\n self.assertIsNone(self.writer.write(self.variant))\n\n # self.writer should be closed, so writing again will fail.\n with self.assertRaisesRegexp(ValueError, _WRITE_TO_CLOSED_ERROR):\n self.writer.write(self.variant)\n\n def test_double_context_manager(self):\n with self.writer:\n # Writing within the context manager succeeds.\n self.assertIsNone(self.writer.write(self.variant))\n\n with self.assertRaisesRegexp(ValueError, _DOUBLE_CLOSE_ERROR):\n # Entering the closed writer should be fine.\n with self.writer:\n pass # We want to raise an error on exit, so nothing to do in context.\n\n\nclass WrapVcfWriterRoundTripTests(parameterized.TestCase):\n\n @parameterized.parameters(('test_samples.vcf',), ('test_samples.vcf.gz',),\n ('test_sites.vcf',))\n def test_round_trip_vcf(self, test_datum_name):\n # Round-trip variants through writing and reading:\n # 1. Read variants v1 from VcfReader;\n # 2. Write v1 to vcf using our VcfWriter;\n # 3. Read back in using VcfReader -- v2;\n # 4. compare v1 and v2.\n in_file = test_utils.genomics_core_testdata(test_datum_name)\n out_file = test_utils.test_tmpfile('output_' + test_datum_name)\n\n v1_reader = genomics_io.make_vcf_reader(in_file, use_index=False)\n v1_records = list(v1_reader.iterate())\n self.assertTrue(v1_records, 'Reader failed to find records')\n\n writer_options = core_pb2.VcfWriterOptions(\n contigs=v1_reader.contigs,\n sample_names=v1_reader.samples,\n filters=v1_reader.filters)\n\n with vcf_writer.VcfWriter.to_file(out_file, writer_options) as writer:\n for record in v1_records:\n writer.write(record)\n\n v2_reader = genomics_io.make_vcf_reader(out_file, use_index=False)\n v2_records = list(v2_reader.iterate())\n\n self.assertEqual(v1_records, v2_records,\n 'Round-tripped variants not as expected')\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "tensorflow.gfile.GFile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
tsutterley/model-harmonics
[ "17f6842d5fa1f2abf42caea51cfb09b6a4b2ee30", "17f6842d5fa1f2abf42caea51cfb09b6a4b2ee30" ]
[ "ECCO/ecco_read_realtime.py", "ECCO/ecco_read_llc_tiles.py" ]
[ "#!/usr/bin/env python\nu\"\"\"\necco_read_realtime.py\nWritten by Tyler Sutterley (10/2021)\n\nReads 12-hour ECCO ocean bottom pressure data from JPL\nCalculates monthly anomalies on an equirectangular grid\n https://ecco.jpl.nasa.gov/drive/files/NearRealTime/Readme\n https://ecco.jpl.nasa.gov/drive/files/NearRealTime/KalmanFilter/\n https://ecco.jpl.nasa.gov/drive/files/NearRealTime/Smoother/\n\nProcesses the data as described in the GRACE Tellus site\n https://grace.jpl.nasa.gov/data/get-data/ocean-bottom-pressure/\nThe global area average of each ocean bottom pressure map is removed\n\nINPUTS:\n ECCO Near Real-Time models\n kf080i: Kalman filter analysis\n dr080i: RTS smoother analysis\n\nCOMMAND LINE OPTIONS:\n -D X, --directory X: working data directory\n -Y X, --year X: years to run\n -m X, --mean X: Year range for mean\n -F X, --format X: input and output data format\n ascii\n netcdf\n HDF5\n -M X, --mode X: Permission mode of directories and files\n -V, --verbose: Output information for each output file\n\nPYTHON DEPENDENCIES:\n numpy: Scientific Computing Tools For Python\n https://numpy.org\n https://numpy.org/doc/stable/user/numpy-for-matlab-users.html\n dateutil: powerful extensions to datetime\n https://dateutil.readthedocs.io/en/stable/\n netCDF4: Python interface to the netCDF C library\n https://unidata.github.io/netcdf4-python/netCDF4/index.html\n h5py: Pythonic interface to the HDF5 binary data format.\n https://www.h5py.org/\n\nPROGRAM DEPENDENCIES:\n spatial.py: spatial data class for reading, writing and processing data\n ncdf_read.py: reads input spatial data from netCDF4 files\n hdf5_read.py: reads input spatial data from HDF5 files\n ncdf_write.py: writes output spatial data to netCDF4\n hdf5_write.py: writes output spatial data to HDF5\n time.py: utilities for calculating time operations\n\nREFERENCES:\n R. J. Greatbatch, \"A note on the representation of steric sea level in\n models that conserve volume rather than mass\", Journal of Geophysical\n Research: Oceans, 99(C6): 12767-12771, 1994.\n https://doi.org/10.1029/94JC00847\n\nUPDATE HISTORY:\n Updated 10/2021: using python logging for handling verbose output\n Updated 03/2021: automatically update years to run based on current time\n Updated 02/2021: replaced numpy bool to prevent deprecation warning\n Updated 12/2020: use argparse to set command line parameters\n using spatial module for read/write operations\n using utilities from time module\n Updated 10/2019: changing Y/N flags to True/False\n Updated 06/2019: recommending kf080i for the Kalman filtered solution\n Updated 06/2018: output file with average absolute ocean bottom pressure\n Updated 03/2018: output data in pascals\n Updated 01/2018: using getopt to set parameters\n Updated 06/2016: can use dr080g model, using __future__ print option\n Updated 02/2016: updates for new kf080g, testing different mean ranges\n Updated 06/2015: code update using regular expressions and no glob\n added main definition and DATAFORM for output ascii and HDF5 formats\n Written 02/2014\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport re\nimport logging\nimport datetime\nimport argparse\nimport numpy as np\nimport gravity_toolkit.time\nimport gravity_toolkit.spatial\n\n#-- PURPOSE: read ECCO ocean bottom pressure data and create monthly anomalies\n#-- on an equirectangular grid\ndef ecco_read_realtime(ddir, MODEL, YEARS, RANGE=None, DATAFORM=None,\n VERBOSE=False, MODE=0o775):\n\n #-- create logger for verbosity level\n loglevel = logging.INFO if VERBOSE else logging.CRITICAL\n logging.basicConfig(level=loglevel)\n\n #-- set up regular expression for finding directories to run from YEAR\n regex_year = '|'.join([r'{0:d}'.format(Y) for Y in YEARS])\n rx = re.compile(r'{0}_({1})'.format(MODEL,regex_year), re.VERBOSE)\n #-- Finding subdirectories\n input_dir = sorted([sd for sd in os.listdir(ddir) if \\\n (os.path.isdir(os.path.join(ddir,sd)) & bool(rx.match(sd)))])\n\n #-- bad value\n fill_value = -1e+10\n #-- output data file format\n suffix = dict(ascii='txt', netCDF4='nc', HDF5='H5')\n\n #-- grid parameters\n a_axis = 6378135.0#-- [m] semimajor axis of the ellipsoid\n flat = 1.0/298.26#-- flattening of the ellipsoid\n #-- semiminor axis of the ellipsoid\n b_axis = (1.0 -flat)*a_axis#-- [m]\n #-- output grid spacing\n LAT_MAX = 78.5\n dlon,dlat = (1.0,1.0)\n dphi = dlon*np.pi/180.0\n\n #-- Reading Mean Ocean Bottom Pressure File\n args = (MODEL,RANGE[0],RANGE[1],suffix[DATAFORM])\n mean_file = 'ECCO_{0}_OBP_MEAN_{1:4d}-{2:4d}.{3}'.format(*args)\n if (DATAFORM == 'ascii'):\n #-- ascii (.txt)\n obp_mean = gravity_toolkit.spatial(spacing=[1.0,1.0],\n nlat=158, nlon=360, extent=[0.5,359.5,-LAT_MAX,LAT_MAX],\n fill_value=fill_value).from_ascii(os.path.join(ddir,mean_file),\n date=False)\n elif (DATAFORM == 'netCDF4'):\n #-- netcdf (.nc)\n obp_mean = gravity_toolkit.spatial().from_netCDF4(\n os.path.join(ddir,mean_file),date=False)\n elif (DATAFORM == 'HDF5'):\n #-- HDF5 (.H5)\n obp_mean = gravity_toolkit.spatial().from_HDF5(\n os.path.join(ddir,mean_file),date=False)\n\n #-- output subdirectory for monthly datasets\n outdir = 'ECCO_{0}_AveRmvd_OBP'.format(MODEL)\n #-- Creating subdirectory if it doesn't exist\n if (not os.access(os.path.join(ddir,outdir), os.F_OK)):\n os.mkdir(os.path.join(ddir,outdir),MODE)\n\n #-- output average ocean bottom pressure to file\n output_average_file = 'ECCO_{0}_Global_Average_OBP.txt'.format(MODEL)\n fid = open(os.path.join(ddir,outdir,output_average_file),'w')\n\n #-- for each yearly subdirectory\n for i in input_dir:\n subdir = sorted([sd for sd in os.listdir(os.path.join(ddir,i)) if\n (os.path.isdir(os.path.join(ddir,i,sd)) &\n bool(re.match(r'n10day_\\d+_\\d+',sd)))])\n #-- for each subdirectory\n for j in subdir:\n #-- find the input file within the subdirectory\n fi = [fi for fi in os.listdir(os.path.join(ddir,i,j)) if\n bool(re.match(r'OBP_(.*?).cdf',fi))]\n #-- skip subdirectory if file not found\n try:\n input_file = os.path.join(ddir,i,j,fi[0])\n os.access(input_file, os.F_OK)\n except:\n continue\n\n #-- Open ECCO CDF datafile for reading\n #-- change order of axes to be lat/lon/time\n obp = gravity_toolkit.spatial(fill_value=fill_value).from_netCDF4(\n input_file,verbose=VERBOSE,varname='OBP').transpose(axes=(1,2,0))\n #-- Getting the data from each netCDF variable\n nlat,nlon,nt = obp.shape\n #-- Dating scheme is hours from UNIX time (1970-01-01)\n #-- calculate Julian day by converting to MJD and adding offset\n time_string = obp.attributes['time']['units']\n epoch1,to_secs = gravity_toolkit.time.parse_date_string(time_string)\n JD = gravity_toolkit.time.convert_delta_time(obp.time*to_secs,\n epoch1=epoch1, epoch2=(1858,11,17,0,0,0),\n scale=1.0/86400.0) + 2400000.5\n #-- convert from Julian days to calendar dates\n YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian(JD,\n FORMAT='tuple')\n\n #-- dlat is the difference in latitude spacing\n dlat0 = np.abs(obp.lat[0:nlat-1]-obp.lat[1:nlat])\n #-- used a midpoint integration method to make the hemispheres\n #-- symmetrical for area\n dlat1 = np.zeros((nlat))\n dlat2 = np.zeros((nlat))\n dlat1[0] = 1\n dlat2[0:-1] = np.copy(dlat0)\n dlat1[1:] = np.copy(dlat0)\n dlat2[-1] = 1\n dth = np.mean(np.array([dlat1,dlat2]),axis=0)*np.pi/180.0\n\n #-- will calculate and remove the area average of the model\n #-- (Greatbatch correction) https://doi.org/10.1029/94JC00847\n #-- Latitude spacing varies in the model\n obp_anomaly = gravity_toolkit.spatial(fill_value=fill_value)\n obp_anomaly.lon = np.arange(dlon/2.0,360+dlon/2.0,dlon)\n obp_anomaly.lat = np.arange(-LAT_MAX,LAT_MAX+dlat,dlat)\n obp_anomaly.data = np.zeros((158,360,nt),dtype=obp.data.dtype)\n obp_anomaly.mask = np.zeros((158,360,nt),dtype=bool)\n #-- convert from calendar dates to year-decimal\n obp_anomaly.time = gravity_toolkit.time.convert_calendar_decimal(\n YY,MM,day=DD,hour=hh,minute=mm,second=ss)\n for t in range(0, nt):\n #-- the global area average of each OBP map is removed\n total_area = 0.0\n total_newton = 0.0\n for k in range(0, nlat):\n #-- Grid point areas (ellipsoidal)\n theta = (90.0 - obp.lat[k])*np.pi/180.0\n phi = obp.lon*np.pi/180.0\n area = np.sin(theta) * np.sqrt((a_axis**2)*(b_axis**2)*\n ((np.sin(theta)**2) * (np.cos(phi)**2) +\n (np.sin(theta)**2)*(np.sin(phi)**2)) +\n (a_axis**4)*(np.cos(theta)**2))*dphi*dth[k]\n #-- calculate the grid point weight in newtons\n newtons = obp.data[k,:,t]*area\n #-- finding ocean points at each lat\n ocean_points, = np.nonzero(~obp.mask[k,:,t])\n #-- total area\n total_area += np.sum(area[ocean_points])\n #-- total weight\n total_newton += np.sum(newtons[ocean_points])\n #-- remove global area average of each OBP map\n ratio = (total_newton/total_area)\n obp_mean_removed = np.ma.zeros((nlat,nlon))\n obp_mean_removed.data[:,:] = obp.data[:,:,t] - ratio\n obp_mean_removed.mask = np.copy(obp.mask[:,:,t])\n #-- output monthly absolute bottom pressure to file\n args = (obp_anomaly.time[t], ratio, total_area)\n fid.write('{0:10.4f} {1:21.14e} {2:21.14e}\\n'.format(*args))\n\n #-- interpolate to equirectangular grid\n obp_interp = np.ma.zeros((158,360))\n obp_interp.mask = np.ones((158,360),dtype=bool)\n theta = (90.0 - obp.lat)*np.pi/180.0\n th = (90.0 - obp_anomaly.lat)*np.pi/180.0\n #-- for each output latitude\n for j in range(158):\n #-- check if there is an exact value\n if np.any(np.isclose(obp.lat,obp_anomaly.lat[j])):\n k, = np.nonzero(np.isclose(obp.lat,obp_anomaly.lat[j]))\n obp_interp.data[j,:] = obp_mean_removed.data[k,:]\n obp_interp.mask[j,:] = obp_mean_removed.mask[k,:]\n else:\n #-- interpolate using inverse distance weights\n #-- calculating the indices for the original grid\n k, = np.nonzero((theta[0:-1] >= th[j]) &\n (theta[1:] < th[j]))\n #-- calculate distance weights\n d1, = np.arccos(np.cos(th[j])*np.cos(theta[k]) +\n np.sin(th[j])*np.sin(theta[k]))\n d2, = np.arccos(np.cos(th[j])*np.cos(theta[k+1]) +\n np.sin(th[j])*np.sin(theta[k+1]))\n W = d1 + d2\n #-- calculate interpolated value using inverse weights\n obp_interp.data[j,:] = (obp_mean_removed.data[k,:]*d2 +\n obp_mean_removed.data[k+1,:]*d1)/W\n obp_interp.mask[j,:] = np.squeeze(\n obp_mean_removed.mask[k,:] |\n obp_mean_removed.mask[k+1,:])\n\n #-- Calculating Departures from the mean field\n obp_anomaly.data[:,:,t] = obp_interp - obp_mean.data\n obp_anomaly.mask[:,:,t] = (obp_interp.mask | obp_mean.mask)\n obp_anomaly.update_mask()\n\n #-- Calculating the monthly averages\n #-- data files cover the first 10 days of the next year\n ind_start_year, = np.nonzero(YY == YY[0])\n uniq_months = np.unique(MM[ind_start_year])\n for t,mm in enumerate(uniq_months):\n #-- Calculating the monthly anomaly\n indices, = np.nonzero(MM == mm)\n obp_monthly_anomaly = obp_anomaly.mean(indices=indices)\n obp_monthly_anomaly.update_mask()\n #-- output to file\n args = (MODEL,YY[0],mm,suffix[DATAFORM])\n FILE='ECCO_{0}_AveRmvd_OBP_{1:4.0f}_{2:02.0f}.{3}'.format(*args)\n output_data(obp_monthly_anomaly,MODEL,DATAFORM=DATAFORM,\n VERBOSE=VERBOSE,FILENAME=os.path.join(ddir,outdir,FILE))\n #-- change the permissions mode of the output file to MODE\n os.chmod(os.path.join(ddir,outdir,FILE),MODE)\n\n #-- close output file and change the permissions to MODE\n fid.close()\n os.chmod(os.path.join(ddir,outdir,output_average_file),MODE)\n\n#-- PURPOSE: wrapper function for outputting data to file\ndef output_data(data,MODEL,FILENAME=None,DATAFORM=None,VERBOSE=False):\n TITLE = 'Ocean_Bottom_Pressure_from_ECCO-JPL_{0}_Model'.format(MODEL)\n if (DATAFORM == 'ascii'):\n #-- ascii (.txt)\n data.to_ascii(FILENAME,verbose=VERBOSE)\n elif (DATAFORM == 'netCDF4'):\n #-- netcdf (.nc)\n data.to_netCDF4(FILENAME, verbose=VERBOSE, UNITS='Pa',\n LONGNAME='Bottom_Pressure', TITLE=TITLE)\n elif (DATAFORM == 'HDF5'):\n #-- HDF5 (.H5)\n data.to_HDF5(FILENAME, verbose=VERBOSE, UNITS='Pa',\n LONGNAME='Bottom_Pressure', TITLE=TITLE)\n\n#-- Main program that calls ecco_read_realtime()\ndef main():\n #-- Read the system arguments listed after the program\n parser = argparse.ArgumentParser(\n description=\"\"\"Reads 12-hour ECCO ocean bottom pressure\n data from JPL and calculates monthly anomalies\n on an equirectangular grid\n \"\"\"\n )\n #-- command line parameters\n parser.add_argument('model',\n metavar='MODEL', type=str, nargs='+',\n default=['kf080i','dr080i'], choices=['kf080i','dr080i'],\n help='ECCO Model')\n #-- working data directory\n parser.add_argument('--directory','-D',\n type=lambda p: os.path.abspath(os.path.expanduser(p)),\n default=os.getcwd(),\n help='Working data directory')\n #-- years to run\n now = datetime.datetime.now()\n parser.add_argument('--year','-Y',\n type=int, nargs='+', default=range(2000,now.year+1),\n help='Years of model outputs to run')\n #-- start and end years to run for mean\n parser.add_argument('--mean','-m',\n metavar=('START','END'), type=int, nargs=2,\n default=[2003,2007],\n help='Start and end year range for mean')\n #-- input and output data format (ascii, netCDF4, HDF5)\n parser.add_argument('--format','-F',\n type=str, default='netCDF4', choices=['ascii','netCDF4','HDF5'],\n help='Input and output data format')\n #-- print information about each output file\n parser.add_argument('--verbose','-V',\n default=False, action='store_true',\n help='Verbose output of run')\n #-- permissions mode of the local directories and files (number in octal)\n parser.add_argument('--mode','-M',\n type=lambda x: int(x,base=8), default=0o775,\n help='Permission mode of directories and files')\n args,_ = parser.parse_known_args()\n\n #-- for each ECCO Near Real-Time model\n for MODEL in args.model:\n #-- run program\n ecco_read_realtime(args.directory, MODEL, args.year, RANGE=args.mean,\n DATAFORM=args.format, VERBOSE=args.verbose, MODE=args.mode)\n\n#-- run main program\nif __name__ == '__main__':\n main()\n", "#!/usr/bin/env python\nu\"\"\"\necco_read_llc_tiles.py\nWritten by Tyler Sutterley (10/2021)\n\nCalculates monthly ocean bottom pressure anomalies from ECCO LLC tiles\nhttps://ecco.jpl.nasa.gov/drive/files/Version4/Release4/nctiles_monthly\nhttps://ecco.jpl.nasa.gov/drive/files/Version5/Alpha/nctiles_monthly\n\nProcesses the data as described in the GRACE Tellus site\n https://grace.jpl.nasa.gov/data/get-data/ocean-bottom-pressure/\nThe global area average of each ocean bottom pressure map is removed\n\nNOTES:\n Bottom Pressure Potential Anomaly (p/rhonil, m^2/s^2)\n To convert to m, divide by g (g=9.81 m/s^2)\n PHIBOT is the anomaly relative to Depth * rhonil * g\n The absolute bottom pressure in Pa is:\n Depth * rhonil * g + PHIBOT * rhonil\n rhonil = 1029 kg/m^3\n\nINPUTS:\n ECCO LLC tile models\n V4r4: Version 4, Revision 4\n V5alpha: Version 5, Alpha release\n\nCOMMAND LINE OPTIONS:\n -D X, --directory X: working data directory\n -Y X, --year X: years to run\n -m X, --mean X: Year range for mean\n -M X, --mode X: Permission mode of directories and files\n -V, --verbose: Output information for each output file\n\nPYTHON DEPENDENCIES:\n numpy: Scientific Computing Tools For Python\n https://numpy.org\n https://numpy.org/doc/stable/user/numpy-for-matlab-users.html\n dateutil: powerful extensions to datetime\n https://dateutil.readthedocs.io/en/stable/\n netCDF4: Python interface to the netCDF C library\n https://unidata.github.io/netcdf4-python/netCDF4/index.html\n\nPROGRAM DEPENDENCIES:\n time.py: utilities for calculating time operations\n\nREFERENCES:\n R. J. Greatbatch, \"A note on the representation of steric sea level in\n models that conserve volume rather than mass\", Journal of Geophysical\n Research: Oceans, 99(C6): 12767-12771, 1994.\n https://doi.org/10.1029/94JC00847\n\nUPDATE HISTORY:\n Updated 10/2021: using python logging for handling verbose output\n Updated 03/2021: automatically update years to run based on current time\n Written 02/2021\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport re\nimport logging\nimport netCDF4\nimport datetime\nimport argparse\nimport numpy as np\nimport gravity_toolkit.time\n\n#-- PURPOSE: read ECCO tiled ocean bottom pressure data and calculate mean\ndef ecco_read_llc_tiles(ddir, MODEL, YEARS, RANGE=None, VERBOSE=False,\n MODE=0o775):\n\n #-- create logger for verbosity level\n loglevel = logging.INFO if VERBOSE else logging.CRITICAL\n logging.basicConfig(level=loglevel)\n\n #-- input and output subdirectories\n d1=os.path.join(ddir,'ECCO-{0}'.format(MODEL),'nctiles_monthly')\n d2=os.path.join(ddir,'ECCO_{0}_AveRmvd_OBP'.format(MODEL),'nctiles_monthly')\n #-- recursively create subdirectory if it doesn't exist\n os.makedirs(d2,MODE) if (not os.access(d2, os.F_OK)) else None\n\n #-- input variable names for each model\n if (MODEL == 'V4r4'):\n LONNAME = 'XC'\n LATNAME = 'YC'\n ZNAME = 'Depth'\n VARNAME = 'PHIBOT'\n TIMENAME = 'time'\n AREANAME = 'rA'\n MASKNAME = 'maskC'\n Nt,Nj,Ni = (13,90,90)\n elif (MODEL == 'V5alpha'):\n LONNAME = 'XC'\n LATNAME = 'YC'\n ZNAME = 'Depth'\n VARNAME = 'PHIBOT'\n TIMENAME = 'time'\n AREANAME = 'rA'\n MASKNAME = 'maskC'\n Nt,Nj,Ni = (13,270,270)\n\n #-- read ECCO tile grid file\n invariant = ncdf_invariant(os.path.join(d1,'ECCO-GRID.nc'),\n lon=LONNAME, lat=LATNAME, depth=ZNAME, area=AREANAME, mask=MASKNAME)\n #-- bad value\n fill_value = -1e+10\n #-- model gamma and rhonil\n gamma = 9.81\n rhonil = 1029\n\n #-- read mean data from ecco_mean_llc_tiles.py\n args = (MODEL, RANGE[0], RANGE[1])\n mean_file = 'ECCO_{0}_OBP_MEAN_{1:4d}-{2:4d}.nc'.format(*args)\n obp_mean = ncdf_mean(os.path.join(d1,mean_file),VARNAME=VARNAME)\n\n #-- output average ocean bottom pressure to file\n output_average_file = 'ECCO_{0}_Global_Average_OBP.txt'.format(MODEL)\n fid = open(os.path.join(d1,output_average_file),'w')\n\n #-- compile regular expression operator for finding files for years\n year_regex = '|'.join('{0:d}'.format(y) for y in YEARS)\n rx1 = re.compile(r'PHIBOT([\\.\\_])({0})(_(\\d+))?.nc$'.format(year_regex))\n #-- find input files\n input_files = [fi for fi in os.listdir(d1) if rx1.match(fi)]\n\n #-- Defining output attributes\n attributes = {}\n TITLE = 'Ocean_Bottom_Pressure_Anomalies_from_ECCO_{0}_Model'\n attributes['title'] = TITLE.format(MODEL)\n #-- dimension attributes\n attributes['i'] = {}\n attributes['i']['long_name'] = 'x-dimension of the t grid'\n attributes['i']['axis'] = 'X'\n attributes['j'] = {}\n attributes['j']['long_name'] = 'y-dimension of the t grid'\n attributes['j']['axis'] = 'Y'\n attributes['tile'] = {}\n attributes['tile']['long_name'] = 'index of llc grid tile'\n attributes[TIMENAME] = {}\n attributes[TIMENAME]['long_name'] = 'Date_in_Decimal_Years'\n attributes[TIMENAME]['units'] = 'years'\n #-- longitude and latitude\n attributes['lon'] = {}\n attributes['lon']['long_name'] = 'longitude'\n attributes['lon']['units'] = 'degrees_east'\n attributes['lat'] = {}\n attributes['lat']['long_name'] = 'latitude'\n attributes['lat']['units'] = 'degrees_north'\n #-- output ocean bottom pressure\n attributes[VARNAME] = {}\n attributes[VARNAME]['long_name'] = 'pressure_at_sea_floor'\n attributes[VARNAME]['units'] = 'Pa'\n\n #-- read each input file\n for fi in sorted(input_files):\n #-- Open netCDF4 datafile for reading\n fileID = netCDF4.Dataset(os.path.join(d1,fi),'r')\n #-- time within netCDF files is days since epoch\n TIME = fileID.variables[TIMENAME][:].copy()\n time_string = fileID.variables[TIMENAME].units\n epoch1,to_secs = gravity_toolkit.time.parse_date_string(time_string)\n #-- read ocean bottom pressure anomalies for each month\n for m,delta_time in enumerate(to_secs*TIME):\n #-- convert from ocean bottom pressure anomalies to absolute\n PHIBOT = fileID.variables[VARNAME][m,:,:,:].copy()\n obp_tile = invariant['depth']*rhonil*gamma + PHIBOT*rhonil\n\n #-- output monthly tile data\n obp = {}\n #-- allocate for output anomaly data\n obp[VARNAME] = np.ma.zeros((Nt,Nj,Ni),fill_value=fill_value)\n obp[VARNAME].mask = np.logical_not(invariant['mask'][0,:,:,:]) | \\\n (invariant['depth'] == 0.0)\n #-- copy geolocation variables\n obp['lon'] = np.copy(invariant['lon'])\n obp['lat'] = np.copy(invariant['lat'])\n #-- copy grid variables\n for key in ('i','j','tile'):\n obp[key] = fileID.variables[key][:].copy()\n\n #-- calculate Julian day by converting to MJD and adding offset\n JD = gravity_toolkit.time.convert_delta_time(delta_time,\n epoch1=epoch1, epoch2=(1858,11,17,0,0,0),\n scale=1.0/86400.0) + 2400000.5\n #-- convert from Julian days to calendar dates\n YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian(JD,\n FORMAT='tuple')\n #-- convert from calendar dates to year-decimal\n obp['time'], = gravity_toolkit.time.convert_calendar_decimal(\n YY,MM,day=DD,hour=hh,minute=mm,second=ss)\n\n #-- global area average of each ocean bottom pressure map is removed\n #-- (Greatbatch correction) https://doi.org/10.1029/94JC00847\n total_area = 0.0\n total_newton = 0.0\n #-- for each tile\n for k in range(0, Nt):\n #-- Grid point areas (m^2)\n area = invariant['area'][k,:,:]\n #-- calculate the tile point weight in newtons\n newtons = obp_tile[k,:,:]*area\n #-- mask for tile\n mask = np.logical_not(obp[VARNAME].mask[k,:,:])\n #-- finding ocean points at each lat\n if np.count_nonzero(mask):\n indj,indi = np.nonzero(mask)\n #-- total area\n total_area += np.sum(area[indj,indi])\n #-- total weight in newtons\n total_newton += np.sum(newtons[indj,indi])\n #-- remove global area average of each OBP map\n ratio = (total_newton/total_area)\n obp[VARNAME].data[:,:,:] = (obp_tile - ratio) - obp_mean\n #-- replace invalid values with fill value\n obp[VARNAME].data[obp[VARNAME].mask] = obp[VARNAME].fill_value\n #-- output monthly absolute bottom pressure to file\n args = (obp['time'], ratio, total_area)\n fid.write('{0:10.4f} {1:21.14e} {2:21.14e}\\n'.format(*args))\n\n #-- output to file\n args = (MODEL, YY, MM)\n FILE='ECCO_{0}_AveRmvd_OBP_{1:4.0f}_{2:02.0f}.nc'.format(*args)\n #-- netcdf (.nc)\n ncdf_tile_write(obp, attributes, FILENAME=os.path.join(d2,FILE),\n LONNAME='lon', LATNAME='lat', TIMENAME=TIMENAME,\n VARNAME=VARNAME)\n #-- change the permissions mode of the output file to MODE\n os.chmod(os.path.join(d2,FILE),MODE)\n\n #-- close output file and change the permissions to MODE\n fid.close()\n os.chmod(os.path.join(d1,output_average_file),MODE)\n\n#-- PURPOSE: read ECCO invariant grid file\ndef ncdf_invariant(invariant_file,**kwargs):\n #-- output dictionary with invariant parameters\n invariant = {}\n #-- open netCDF4 file for reading\n with netCDF4.Dataset(os.path.expanduser(invariant_file),'r') as fileID:\n #-- extract latitude, longitude, depth, area and valid mask\n for key,val in kwargs.items():\n invariant[key] = fileID.variables[val][:].copy()\n #-- return the invariant parameters\n return invariant\n\n#-- PURPOSE: read ECCO mean ocean bottom pressure file\ndef ncdf_mean(mean_file, VARNAME=None):\n #-- open netCDF4 file for reading\n with netCDF4.Dataset(os.path.expanduser(mean_file),'r') as fileID:\n obp_mean = np.copy(fileID.variables[VARNAME][:].copy())\n return obp_mean\n\n#-- PURPOSE: write tiled data to a netCDF4 flie\ndef ncdf_tile_write(output, attributes, FILENAME=None, LONNAME=None,\n LATNAME=None, TIMENAME=None, VARNAME=None):\n\n #-- opening NetCDF file for writing\n fileID = netCDF4.Dataset(os.path.expanduser(FILENAME),'w')\n\n #-- python dictionary with NetCDF variables\n nc = {}\n #-- Defining the NetCDF dimensions and variables\n for key in ('i','j','tile',TIMENAME):\n fileID.createDimension(key, len(np.atleast_1d(output[key])))\n nc[key] = fileID.createVariable(key, output[key].dtype, (key,))\n #-- filling NetCDF variables\n nc[key][:] = np.copy(output[key])\n #-- Defining attributes for variable\n for att_name,att_val in attributes[key].items():\n setattr(nc[key],att_name,att_val)\n\n #-- Defining the NetCDF variables\n for key in (LONNAME,LATNAME,VARNAME):\n if hasattr(output[key],'fill_value'):\n nc[key] = fileID.createVariable(key, output[key].dtype,\n ('tile','j','i'), fill_value=output[key].fill_value,\n zlib=True)\n else:\n nc[key] = fileID.createVariable(key, output[key].dtype,\n ('tile','j','i'))\n #-- filling NetCDF variables\n nc[key][:] = np.copy(output[key])\n #-- Defining attributes for variable\n for att_name,att_val in attributes[key].items():\n setattr(nc[key],att_name,att_val)\n #-- add attribute for date created\n fileID.date_created = datetime.datetime.now().isoformat()\n fileID.title = attributes['title']\n #-- Output NetCDF structure information\n logging.info(FILENAME)\n logging.info(list(fileID.variables.keys()))\n #-- Closing the NetCDF file\n fileID.close()\n\n#-- Main program that calls ecco_read_llc_tiles()\ndef main():\n #-- Read the system arguments listed after the program\n parser = argparse.ArgumentParser(\n description=\"\"\"Reads monthly ECCO ocean bottom pressure\n LLC tile data and calculates multi-annual means\n \"\"\"\n )\n #-- command line parameters\n parser.add_argument('model',\n metavar='MODEL', type=str, nargs='+',\n default=['V4r4','V5alpha'], choices=['V4r4','V5alpha'],\n help='ECCO Version 4 or 5 Model')\n #-- working data directory\n parser.add_argument('--directory','-D',\n type=lambda p: os.path.abspath(os.path.expanduser(p)),\n default=os.getcwd(),\n help='Working data directory')\n #-- years to run\n now = datetime.datetime.now()\n parser.add_argument('--year','-Y',\n type=int, nargs='+', default=range(2000,now.year+1),\n help='Years of model outputs to run')\n #-- start and end years to run for mean\n parser.add_argument('--mean','-m',\n metavar=('START','END'), type=int, nargs=2,\n default=[2003,2007],\n help='Start and end year range for mean')\n #-- print information about each output file\n parser.add_argument('--verbose','-V',\n default=False, action='store_true',\n help='Verbose output of run')\n #-- permissions mode of the local directories and files (number in octal)\n parser.add_argument('--mode','-M',\n type=lambda x: int(x,base=8), default=0o775,\n help='Permission mode of directories and files')\n args,_ = parser.parse_known_args()\n\n #-- for each ECCO LLC tile model\n for MODEL in args.model:\n #-- run program\n ecco_read_llc_tiles(args.directory, MODEL, args.year,\n RANGE=args.mean, VERBOSE=args.verbose, MODE=args.mode)\n\n#-- run main program\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.abs", "numpy.nonzero", "numpy.unique", "numpy.arange", "numpy.squeeze", "numpy.cos", "numpy.ones", "numpy.sin", "numpy.copy", "numpy.array", "numpy.ma.zeros", "numpy.zeros", "numpy.sum", "numpy.isclose" ], [ "numpy.logical_not", "numpy.nonzero", "numpy.atleast_1d", "numpy.copy", "numpy.count_nonzero", "numpy.ma.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hongzhili/pretrained-models.pytorch
[ "40b9212ce38b520dba7335645ad1dd41f3e857b0" ]
[ "pretrainedmodels/models/senet.py" ]
[ "\"\"\"\nResNet code gently borrowed from\nhttps://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n\"\"\"\n\nfrom collections import OrderedDict\nimport math\n\nimport torch.nn as nn\nfrom torch.utils import model_zoo\n\n__all__ = ['SENet', 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152',\n 'se_resnext50_32x4d', 'se_resnext101_32x4d']\n\npretrained_settings = {\n 'senet154': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnet50': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnet101': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnet152': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnext50_32x4d': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnext101_32x4d': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n}\n\n\nclass SEModule(nn.Module):\n\n def __init__(self, channels, reduction):\n super(SEModule, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,\n padding=0)\n self.relu = nn.ReLU(inplace=True)\n self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,\n padding=0)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n module_input = x\n x = self.avg_pool(x)\n x = self.fc1(x)\n x = self.relu(x)\n x = self.fc2(x)\n x = self.sigmoid(x)\n return module_input * x\n\n\nclass Bottleneck(nn.Module):\n \"\"\"\n Base class for bottlenecks that implements `forward()` method.\n \"\"\"\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out = self.se_module(out) + residual\n out = self.relu(out)\n\n return out\n\n\nclass SEBottleneck(Bottleneck):\n \"\"\"\n Bottleneck for SENet154.\n \"\"\"\n expansion = 4\n\n def __init__(self, inplanes, planes, groups, reduction, stride=1,\n downsample=None):\n super(SEBottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes * 2)\n self.conv2 = nn.Conv2d(planes * 2, planes * 4, kernel_size=3,\n stride=stride, padding=1, groups=groups,\n bias=False)\n self.bn2 = nn.BatchNorm2d(planes * 4)\n self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.se_module = SEModule(planes * 4, reduction=reduction)\n self.downsample = downsample\n self.stride = stride\n\n\nclass SEResNetBottleneck(Bottleneck):\n \"\"\"\n ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe\n implementation and uses `stride=stride` in `conv1` and not in `conv2`\n (the latter is used in the torchvision implementation of ResNet).\n \"\"\"\n expansion = 4\n\n def __init__(self, inplanes, planes, groups, reduction, stride=1,\n downsample=None):\n super(SEResNetBottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False,\n stride=stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1,\n groups=groups, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.se_module = SEModule(planes * 4, reduction=reduction)\n self.downsample = downsample\n self.stride = stride\n\n\nclass SEResNeXtBottleneck(Bottleneck):\n \"\"\"\n ResNeXt bottleneck type C with a Squeeze-and-Excitation module.\n \"\"\"\n expansion = 4\n\n def __init__(self, inplanes, planes, groups, reduction, stride=1,\n downsample=None, base_width=4):\n super(SEResNeXtBottleneck, self).__init__()\n width = math.floor(planes * (base_width / 64)) * groups\n self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False,\n stride=1)\n self.bn1 = nn.BatchNorm2d(width)\n self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride,\n padding=1, groups=groups, bias=False)\n self.bn2 = nn.BatchNorm2d(width)\n self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.se_module = SEModule(planes * 4, reduction=reduction)\n self.downsample = downsample\n self.stride = stride\n\n\nclass SENet(nn.Module):\n\n def __init__(self, block, layers, groups, reduction, dropout_p=0.2,\n inplanes=128, input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1, num_classes=1000):\n \"\"\"\n Parameters\n ----------\n block (nn.Module): Bottleneck class.\n - For SENet154: SEBottleneck\n - For SE-ResNet models: SEResNetBottleneck\n - For SE-ResNeXt models: SEResNeXtBottleneck\n layers (list of ints): Number of residual blocks for 4 layers of the\n network (layer1...layer4).\n groups (int): Number of groups for the 3x3 convolution in each\n bottleneck block.\n - For SENet154: 64\n - For SE-ResNet models: 1\n - For SE-ResNeXt models: 32\n reduction (int): Reduction ratio for Squeeze-and-Excitation modules.\n - For all models: 16\n dropout_p (float or None): Drop probability for the Dropout layer.\n If `None` the Dropout layer is not used.\n - For SENet154: 0.2\n - For SE-ResNet models: None\n - For SE-ResNeXt models: None\n inplanes (int): Number of input channels for layer1.\n - For SENet154: 128\n - For SE-ResNet models: 64\n - For SE-ResNeXt models: 64\n input_3x3 (bool): If `True`, use three 3x3 convolutions instead of\n a single 7x7 convolution in layer0.\n - For SENet154: True\n - For SE-ResNet models: False\n - For SE-ResNeXt models: False\n downsample_kernel_size (int): Kernel size for downsampling convolutions\n in layer2, layer3 and layer4.\n - For SENet154: 3\n - For SE-ResNet models: 1\n - For SE-ResNeXt models: 1\n downsample_padding (int): Padding for downsampling convolutions in\n layer2, layer3 and layer4.\n - For SENet154: 1\n - For SE-ResNet models: 0\n - For SE-ResNeXt models: 0\n num_classes (int): Number of outputs in `last_linear` layer.\n - For all models: 1000\n \"\"\"\n super(SENet, self).__init__()\n self.inplanes = inplanes\n if input_3x3:\n layer0_modules = [\n ('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1,\n bias=False)),\n ('bn1', nn.BatchNorm2d(64)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,\n bias=False)),\n ('bn2', nn.BatchNorm2d(64)),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,\n bias=False)),\n ('bn3', nn.BatchNorm2d(inplanes)),\n ('relu3', nn.ReLU(inplace=True)),\n ]\n else:\n layer0_modules = [\n ('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('bn1', nn.BatchNorm2d(inplanes)),\n ('relu1', nn.ReLU(inplace=True)),\n ]\n # To preserve compatibility with Caffe weights `ceil_mode=True`\n # is used instead of `padding=1`.\n layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2,\n ceil_mode=True)))\n self.layer0 = nn.Sequential(OrderedDict(layer0_modules))\n self.layer1 = self._make_layer(\n block,\n planes=64,\n blocks=layers[0],\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=1,\n downsample_padding=0\n )\n self.layer2 = self._make_layer(\n block,\n planes=128,\n blocks=layers[1],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n self.layer3 = self._make_layer(\n block,\n planes=256,\n blocks=layers[2],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n self.layer4 = self._make_layer(\n block,\n planes=512,\n blocks=layers[3],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n #self.avg_pool = nn.AvgPool2d(7, stride=1)\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None\n self.last_linear = nn.Linear(512 * block.expansion, num_classes)\n\n def _make_layer(self, block, planes, blocks, groups, reduction, stride=1,\n downsample_kernel_size=1, downsample_padding=0):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=downsample_kernel_size, stride=stride,\n padding=downsample_padding, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, groups, reduction, stride,\n downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups, reduction))\n\n return nn.Sequential(*layers)\n\n def features(self, x):\n x = self.layer0(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x\n\n def logits(self, x):\n x = self.avg_pool(x)\n if self.dropout is not None:\n x = self.dropout(x)\n x = x.view(x.size(0), -1)\n x = self.last_linear(x)\n return x\n\n def forward(self, x):\n x = self.features(x)\n x = self.logits(x)\n return x\n\n\ndef initialize_pretrained_model(model, num_classes, settings):\n if num_classes == settings['num_classes']:\n model.load_state_dict(model_zoo.load_url(settings['url']))\n else:\n model_dict = model.state_dict()\n pretrained_dict = model_zoo.load_url(settings['url'])\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict and \"last_linear\" not in k}\n model_dict.update(pretrained_dict) \n model.load_state_dict(model_dict)\n \n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n model.mean = settings['mean']\n model.std = settings['std']\n\n\ndef senet154(num_classes=1000, pretrained='imagenet'):\n model = SENet(SEBottleneck, [3, 8, 36, 3], groups=64, reduction=16,\n dropout_p=0.2, num_classes=num_classes)\n if pretrained is not None:\n settings = pretrained_settings['senet154'][pretrained]\n initialize_pretrained_model(model, num_classes, settings)\n return model\n\n\ndef se_resnet50(num_classes=1000, pretrained='imagenet'):\n model = SENet(SEResNetBottleneck, [3, 4, 6, 3], groups=1, reduction=16,\n dropout_p=None, inplanes=64, input_3x3=False,\n downsample_kernel_size=1, downsample_padding=0,\n num_classes=num_classes)\n if pretrained is not None:\n settings = pretrained_settings['se_resnet50'][pretrained]\n initialize_pretrained_model(model, num_classes, settings)\n return model\n\n\ndef se_resnet101(num_classes=1000, pretrained='imagenet'):\n model = SENet(SEResNetBottleneck, [3, 4, 23, 3], groups=1, reduction=16,\n dropout_p=None, inplanes=64, input_3x3=False,\n downsample_kernel_size=1, downsample_padding=0,\n num_classes=num_classes)\n if pretrained is not None:\n settings = pretrained_settings['se_resnet101'][pretrained]\n initialize_pretrained_model(model, num_classes, settings)\n return model\n\n\ndef se_resnet152(num_classes=1000, pretrained='imagenet'):\n model = SENet(SEResNetBottleneck, [3, 8, 36, 3], groups=1, reduction=16,\n dropout_p=None, inplanes=64, input_3x3=False,\n downsample_kernel_size=1, downsample_padding=0,\n num_classes=num_classes)\n if pretrained is not None:\n settings = pretrained_settings['se_resnet152'][pretrained]\n initialize_pretrained_model(model, num_classes, settings)\n return model\n\n\ndef se_resnext50_32x4d(num_classes=1000, pretrained='imagenet'):\n model = SENet(SEResNeXtBottleneck, [3, 4, 6, 3], groups=32, reduction=16,\n dropout_p=None, inplanes=64, input_3x3=False,\n downsample_kernel_size=1, downsample_padding=0,\n num_classes=num_classes)\n if pretrained is not None:\n settings = pretrained_settings['se_resnext50_32x4d'][pretrained]\n initialize_pretrained_model(model, num_classes, settings)\n return model\n\n\ndef se_resnext101_32x4d(num_classes=1000, pretrained='imagenet'):\n model = SENet(SEResNeXtBottleneck, [3, 4, 23, 3], groups=32, reduction=16,\n dropout_p=None, inplanes=64, input_3x3=False,\n downsample_kernel_size=1, downsample_padding=0,\n num_classes=num_classes)\n if pretrained is not None:\n settings = pretrained_settings['se_resnext101_32x4d'][pretrained]\n initialize_pretrained_model(model, num_classes, settings)\n return model\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.Conv2d", "torch.nn.Sigmoid", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.utils.model_zoo.load_url" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ChrisRenka/Tumor3D
[ "32b7fa4a8b5f679211f6a383ef6d9a4697dd60c2" ]
[ "plot_vasos.py" ]
[ "from mpl_toolkits.mplot3d import Axes3D\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\n\r\nprint (\"Número da simulação:\")\r\nnSim = input()\r\nprint (\"Número do frame:\")\r\nnFrame = input()\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111, projection = '3d')\r\n\r\nxs = []\r\nys = []\r\nzs = []\r\n\r\nfVasos = \"Resultados\\\\{}\\\\vasos{}.txt\".format(nSim, nFrame)\r\nwith open(fVasos,'r') as csvfile:\r\n plots = csv.reader(csvfile, delimiter=',')\r\n for row in plots:\r\n xs.append(int(row[0]))\r\n ys.append(int(row[1]))\r\n zs.append(int(row[2]))\r\n\r\nax.scatter(xs, ys, zs, c = 'r', marker = 'o')\r\n\r\nax.set_xlabel('x')\r\nax.set_ylabel('y')\r\nax.set_zlabel('z')\r\n\r\nplt.show()" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Pivoz/Machine-Learning-Collection
[ "ee0b0f0718fac7810bb660713618605c58eb282e" ]
[ "ML/Pytorch/Basics/albumentations_tutorial/segmentation.py" ]
[ "import cv2\nimport albumentations as A\nimport numpy as np\nfrom utils import plot_examples\nfrom PIL import Image\n\nimage = Image.open(\"images/elon.jpeg\")\nmask = Image.open(\"images/mask.jpeg\")\nmask2 = Image.open(\"images/second_mask.jpeg\")\n\ntransform = A.Compose(\n [\n A.Resize(width=1920, height=1080),\n A.RandomCrop(width=1280, height=720),\n A.Rotate(limit=40, p=0.9, border_mode=cv2.BORDER_CONSTANT),\n A.HorizontalFlip(p=0.5),\n A.VerticalFlip(p=0.1),\n A.RGBShift(r_shift_limit=25, g_shift_limit=25, b_shift_limit=25, p=0.9),\n A.OneOf([\n A.Blur(blur_limit=3, p=0.5),\n A.ColorJitter(p=0.5),\n ], p=1.0),\n ]\n)\n\nimages_list = [image]\nimage = np.array(image)\nmask = np.array(mask) # np.asarray(mask), np.array(mask)\nmask2 = np.array(mask2)\nfor i in range(4):\n augmentations = transform(image=image, masks=[mask, mask2])\n augmented_img = augmentations[\"image\"]\n augmented_masks = augmentations[\"masks\"]\n images_list.append(augmented_img)\n images_list.append(augmented_masks[0])\n images_list.append(augmented_masks[1])\nplot_examples(images_list)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dmarvs/bfg-nets
[ "0c8e7f469b6a50c7b167ead98cb545d238dee214" ]
[ "bfgn/reporting/visualizations/model_performance.py" ]
[ "import logging\nfrom typing import List\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn.metrics\nfrom matplotlib import gridspec\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom bfgn.reporting import samples\nfrom bfgn.reporting.visualizations import colormaps, subplots\n\n_logger = logging.getLogger(__name__)\n\n\ndef plot_classification_report(sampled: samples.Samples) -> List[plt.Figure]:\n if sampled.raw_responses is None or sampled.raw_predictions is None:\n _logger.debug(\"Confusion matrix not plotted; no responses or predictions available.\")\n return list()\n\n classes, actual, predicted = _calculate_classification_classes_actual_and_predicted(sampled)\n report = \"Classification report\\n\\n\" + sklearn.metrics.classification_report(actual, predicted, classes)\n fig, ax = plt.subplots(figsize=(8.5, 11), nrows=1, ncols=1)\n ax.text(0, 0, report, **{\"fontsize\": 8, \"fontfamily\": \"monospace\"})\n ax.axis(\"off\")\n fig.suptitle(\"{} Sequence Classification Report\".format(sampled.data_sequence_label))\n return [fig]\n\n\ndef plot_confusion_matrix(sampled: samples.Samples) -> [plt.Figure]:\n if sampled.raw_responses is None or sampled.raw_predictions is None:\n _logger.debug(\"Confusion matrix not plotted; no responses or predictions available.\")\n return list()\n\n classes, actual, predicted = _calculate_classification_classes_actual_and_predicted(sampled)\n confusion_matrix = sklearn.metrics.confusion_matrix(actual, predicted, labels=classes)\n normed_matrix = confusion_matrix.astype(float) / confusion_matrix.sum(axis=1)[:, np.newaxis]\n fig, axes = plt.subplots(figsize=(16, 8), nrows=1, ncols=2)\n\n for idx_ax, ax in enumerate(axes):\n if idx_ax == 0:\n title = \"Confusion matrix, with counts\"\n matrix = confusion_matrix\n value_format = \"d\"\n max_ = np.nanmax(matrix)\n elif idx_ax == 1:\n title = \"Normalized confusion matrix\"\n matrix = normed_matrix\n value_format = \".2f\"\n max_ = 1\n im = ax.imshow(matrix, interpolation=\"nearest\", vmin=0, vmax=max_, cmap=colormaps.COLORMAP_METRICS)\n\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(im, cax=cax)\n\n ax.set(\n xticks=np.arange(matrix.shape[1]),\n yticks=np.arange(matrix.shape[0]),\n xticklabels=classes,\n yticklabels=classes,\n title=title,\n ylabel=\"True label\",\n xlabel=\"Predicted label\",\n )\n\n # Matrix element labels\n for i in range(matrix.shape[0]):\n for j in range(matrix.shape[1]):\n ax.text(\n j,\n i,\n format(matrix[i, j], value_format),\n ha=\"center\",\n va=\"center\",\n color=\"white\" if matrix[i, j] > max_ / 2.0 else \"black\",\n )\n\n fig.suptitle(\"{} Sequence Confusion Matrix\".format(sampled.data_sequence_label or \"\"))\n plt.tight_layout(h_pad=1)\n return [fig]\n\n\ndef _calculate_classification_classes_actual_and_predicted(sampled):\n classes = range(sampled.num_responses)\n actual = np.argmax(sampled.raw_responses, axis=-1).ravel()\n actual = actual[np.isfinite(actual)]\n predicted = np.argmax(sampled.raw_predictions, axis=-1).ravel()\n predicted = predicted[np.isfinite(predicted)]\n return classes, actual, predicted\n\n\ndef plot_spatial_classification_error(\n sampled: samples.Samples, max_pages: int = 8, max_responses_per_page: int = 10\n) -> List[plt.Figure]:\n if sampled.raw_responses is None or sampled.raw_predictions is None:\n _logger.debug(\"Spatial classification residuals not plotted; no raw responses or predictions available.\")\n return list()\n\n actual = np.expand_dims(np.argmax(sampled.raw_responses, axis=-1), -1)\n predicted = np.expand_dims(np.argmax(sampled.raw_predictions, axis=-1), -1)\n error = (actual != predicted).astype(float)\n is_finite = np.logical_and(\n np.isfinite(sampled.raw_responses).all(axis=-1), np.isfinite(sampled.raw_predictions).all(axis=-1)\n )\n error[~is_finite] = np.nan\n error = np.nanmean(error, axis=0)\n return _plot_spatial_error(error, sampled, max_pages, max_responses_per_page)\n\n\ndef plot_spatial_regression_error(\n sampled: samples.Samples, max_pages: int = 8, max_responses_per_page: int = 10\n) -> List[plt.Figure]:\n if sampled.raw_responses is None or sampled.raw_predictions is None:\n _logger.debug(\"Spatial regression residuals not plotted; no raw responses or predictions available.\")\n return list()\n\n abs_error = np.nanmean(np.abs(sampled.raw_predictions - sampled.raw_responses), axis=0)\n return _plot_spatial_error(abs_error, sampled, max_pages, max_responses_per_page)\n\n\ndef _plot_spatial_error(\n error: np.array, sampled: samples.Samples, max_pages: int, max_responses_per_page: int\n) -> List[plt.Figure]:\n figures = []\n\n num_pages = min(max_pages, np.ceil(sampled.num_responses / max_responses_per_page))\n\n loss_window_radius = sampled.config.data_build.loss_window_radius\n window_radius = sampled.config.data_build.window_radius\n buffer = int(window_radius - loss_window_radius)\n\n def _get_axis_generator_for_page(grid, num_rows, num_cols):\n for idx_col in range(num_cols):\n for idx_row in range(num_rows):\n yield plt.subplot(grid[idx_row, idx_col])\n\n idx_page = 0\n idx_response = 0\n while idx_page < num_pages:\n num_figs_on_page = min(max_responses_per_page, error.shape[-1] - idx_response)\n nrows = 1\n ncols = num_figs_on_page\n width = 30 * ncols / (nrows + ncols)\n height = 30 * nrows / (nrows + ncols)\n fig = plt.figure(figsize=(width, height))\n grid = gridspec.GridSpec(nrows, ncols)\n for ax in _get_axis_generator_for_page(grid, nrows, ncols):\n min_ = 0\n if buffer > 0:\n max_ = np.nanmax(error[buffer:-buffer, buffer:-buffer, idx_response])\n else:\n max_ = np.nanmax(error[..., idx_response])\n ax.imshow(error[..., idx_response], vmin=min_, vmax=max_, cmap=colormaps.COLORMAP_ERROR)\n ax.set_xlabel(\"Response {}\".format(idx_response))\n ax.xaxis.set_label_position(\"top\")\n ax.set_xticks([])\n ax.set_yticks([])\n subplots.add_internal_window_to_subplot(sampled, ax)\n idx_response += 1\n if idx_response > error.shape[-1]:\n break\n figures.append(fig)\n idx_page += 1\n fig.suptitle(\n \"{} Sequence Response Spatial Deviation (page {})\".format(sampled.data_sequence_label or \"\", idx_page + 1)\n )\n return figures\n" ]
[ [ "numpy.nanmax", "matplotlib.pyplot.tight_layout", "numpy.abs", "numpy.isfinite", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.ceil", "numpy.argmax", "matplotlib.pyplot.subplot", "numpy.nanmean", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
WachiRatip/MLPipe_workshop
[ "c25b4ebbe5e095e3dace0f1ef9b4e77be551a301" ]
[ "code/regression.py" ]
[ "import pandas as pd\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.svm import SVR, LinearSVR, NuSVR\r\nfrom sklearn.neighbors import KNeighborsRegressor\r\nfrom sklearn.neural_network import MLPRegressor\r\nfrom sklearn.gaussian_process import GaussianProcessRegressor\r\nfrom sklearn.ensemble import AdaBoostRegressor, GradientBoostingRegressor\r\nfrom sklearn.linear_model import HuberRegressor, LinearRegression, ElasticNet\r\n\r\nREGRESSORS = {\r\n ('KNeighborsRegressor', KNeighborsRegressor()),\r\n ('LinearRegression', LinearRegression()),\r\n ('HuberRegressor', HuberRegressor()),\r\n ('ElasticNet', ElasticNet()),\r\n ('LinearSVR', LinearSVR()),\r\n ('SVR', SVR()),\r\n ('NuSVR', NuSVR()),\r\n ('GradientBoostingRegressor', GradientBoostingRegressor()),\r\n ('AdaBoostRegressor', AdaBoostRegressor()),\r\n ('GaussianProcessRegressor', GaussianProcessRegressor()),\r\n ('MLPRegressor', MLPRegressor()),\r\n}\r\n\r\ndef train_cv(path, standardize, cv):\r\n if path == \"solar\":\r\n df = pd.read_csv(\"./data/solar/solar.csv\")\r\n X = df[[\"Solar_rad\",\"Temp\",\"TempAmb\"]].values\r\n y = df[['INV01.Ppv']].values.ravel()\r\n else:\r\n raise ValueError(\"Path to data must be specified.\")\r\n \r\n if standardize:\r\n scaler = StandardScaler().fit(X)\r\n X = scaler.transform(X)\r\n \r\n kf = KFold(n_splits=cv, shuffle=True)\r\n\r\n datasets = [\r\n (X[train_index], X[test_index], y[train_index], y[test_index]) for train_index, test_index in kf.split(X, y) \r\n ]\r\n \r\n print(\"name, fold, Train_R2, R2, MSE, RMSE\")\r\n for name, reg in REGRESSORS:\r\n for ds_cnt, ds in enumerate(datasets):\r\n X_train, X_test, y_train, y_test = ds\r\n reg.fit(X_train,y_train)\r\n self_rsq = reg.score(X_train, y_train)\r\n rsq = reg.score(X_test, y_test)\r\n mse = mean_squared_error(y_test, reg.predict(X_test))\r\n rmse = mean_squared_error(y_test, reg.predict(X_test), squared=False)\r\n print(f\"{name}, {ds_cnt+1}, {self_rsq}, {rsq}, {mse}, {rmse}\")" ]
[ [ "pandas.read_csv", "sklearn.linear_model.HuberRegressor", "sklearn.linear_model.ElasticNet", "sklearn.model_selection.KFold", "sklearn.svm.NuSVR", "sklearn.neighbors.KNeighborsRegressor", "sklearn.svm.SVR", "sklearn.ensemble.GradientBoostingRegressor", "sklearn.gaussian_process.GaussianProcessRegressor", "sklearn.linear_model.LinearRegression", "sklearn.preprocessing.StandardScaler", "sklearn.neural_network.MLPRegressor", "sklearn.svm.LinearSVR", "sklearn.ensemble.AdaBoostRegressor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
yerfor/Soft-DRGN
[ "0c96d1ea295077b949229261c37d8dde25001a03" ]
[ "scenarios/continuous_uav_base/render_utils.py" ]
[ "import pygame\nimport numpy as np\nimport cv2\n\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nYELLOW = (255, 255, 0)\nBROWN = (184, 134, 11)\nGRAY = (169, 169, 169)\nDISPLAY_BATTERY = True\n\n\nclass ContinuousWorldRenderer:\n def __init__(self, num_grid=500, obs_range=25, window_size=(900, 800), scale_factor=1, fps=20):\n pygame.init()\n self.num_grid = num_grid\n self.obs_range = obs_range\n self.window_size = window_size\n self.scale_factor = scale_factor\n self.fps = fps\n self.food_maze = None\n self.resource_maze = None\n self.agent_maze = None\n\n self.screen = pygame.display.set_mode(self.window_size)\n self.background_arr = self.draw_background()\n self.init()\n\n def get_mazes(self, food_maze, resource_maze, agent_maze):\n self.food_maze = food_maze\n self.resource_maze = resource_maze\n self.agent_maze = agent_maze\n return food_maze, resource_maze, agent_maze\n\n def draw_background(self):\n self.screen.fill(WHITE)\n pygame.draw.rect(self.screen, BLACK,\n [0, 0, self.scale_factor * (self.num_grid + 2 * self.obs_range),\n self.scale_factor * self.obs_range])\n pygame.draw.rect(self.screen, BLACK, [0, 0, self.obs_range * self.scale_factor,\n self.scale_factor * (self.num_grid + 2 * self.obs_range)])\n pygame.draw.rect(self.screen, BLACK,\n [self.scale_factor * (self.num_grid + self.obs_range), 0, self.obs_range * self.scale_factor,\n self.scale_factor * (self.num_grid + 2 * self.obs_range)])\n pygame.draw.rect(self.screen, BLACK,\n [0, self.scale_factor * (self.num_grid + self.obs_range),\n self.scale_factor * (self.num_grid + 2 * self.obs_range), self.obs_range * self.scale_factor])\n pygame.display.update()\n return pygame.surfarray.array2d(self.screen)\n\n def draw_foods(self, food_pos, color=GREEN, raidius=1.5):\n for p in food_pos:\n pygame.draw.circle(self.screen, color, (p * self.num_grid + self.obs_range)*self.scale_factor, raidius*self.scale_factor)\n\n def draw_charging_stations(self, charger_pos, color=BLUE, raidius=3):\n for p in charger_pos:\n pygame.draw.circle(self.screen, color, (p * self.num_grid + self.obs_range)*self.scale_factor, raidius*self.scale_factor)\n\n def draw_battery_usage(self, battery_usage, font, color=BLACK):\n for i in range(len(battery_usage)):\n color = RED if battery_usage[i] <= 0 else BLACK\n text_surf = font.render(\"UAV {:d}: {:d}\".format((i), battery_usage[i]), True, color)\n text_rect = text_surf.get_rect()\n text_rect.center = (750, 200+25*i)\n self.screen.blit(text_surf, text_rect)\n\n def draw_agents(self, agent_pos, color=BLUE, radius=2, agent_surface=None, show_id=True):\n for id,p in enumerate(agent_pos):\n if agent_surface:\n core_pos = (p * self.num_grid + self.obs_range) * self.scale_factor\n r = self.scale_factor*radius\n self.screen.blit(agent_surface,[core_pos[0]-r, core_pos[1]-r,2*r,2*r])\n else:\n pygame.draw.circle(self.screen, color, (p * self.num_grid + self.obs_range) * self.scale_factor, radius * self.scale_factor)\n if show_id:\n font_str = pygame.font.get_default_font()\n font = pygame.font.Font(font_str, 15)\n text_surf = font.render(str(id), True, BLACK)\n text_rect = text_surf.get_rect()\n text_rect.center = (core_pos[0]+10, core_pos[1]+10)\n self.screen.blit(text_surf, text_rect)\n\n def draw_agents_coverage(self, agent_pos, cover_range=1, color=BLUE):\n for p in agent_pos:\n pygame.draw.circle(self.screen, color, (p * self.num_grid + self.obs_range)*self.scale_factor, cover_range*self.scale_factor, width=1)\n\n def init(self):\n pygame.display.update()\n\n def render_uav(self, render_info, update=True,agent_surface=None):\n\n poi_pos = render_info['poi_pos']\n agent_pos = render_info['agent_pos']\n rect_obstacles = render_info['rect_obstacles']\n circle_obstacles = render_info['circle_obstacles']\n poi_cover_id = render_info['poi_cover_id']\n poi_cover_percent = render_info['poi_cover_percent']\n episode_coverage_item = render_info['episode_coverage_item']\n episode_fairness_item = render_info['episode_fairness_item']\n energy_consumption = render_info['energy_consumption']\n timeslot = render_info['timeslot']\n pygame.surfarray.blit_array(self.screen, self.background_arr)\n covered_poi_pos = []\n uncovered_poi_pos = []\n for i, pos in enumerate(poi_pos):\n if poi_cover_id[i] != 0:\n covered_poi_pos.append(pos)\n else:\n uncovered_poi_pos.append(pos)\n self.draw_foods(covered_poi_pos, color=GREEN)\n self.draw_foods(uncovered_poi_pos, color=GRAY)\n\n for (x_min,y_min,height,width) in rect_obstacles:\n rect = np.array([x_min, y_min, height, width])*self.scale_factor\n pygame.draw.rect(self.screen, BLACK, rect)\n for (x,y,r) in circle_obstacles:\n pygame.draw.circle(self.screen,BLACK, np.array([x,y])*self.scale_factor, r*self.scale_factor)\n\n self.draw_agents(agent_pos, agent_surface=agent_surface)\n self.draw_agents_coverage(agent_pos, cover_range=18, color=BROWN) # COMM\n self.draw_agents_coverage(agent_pos, cover_range=13, color=BLUE) # OBS\n self.draw_agents_coverage(agent_pos, cover_range=10, color=RED) # COVER\n\n font_str = pygame.font.get_default_font()\n font = pygame.font.Font(font_str, 15)\n text_surf = font.render(\"Timeslot: {}\".format(timeslot), True, BLACK)\n text_rect = text_surf.get_rect()\n text_rect.center = (680,25)\n self.screen.blit(text_surf, text_rect)\n\n font_str = pygame.font.get_default_font()\n font = pygame.font.Font(font_str, 15)\n text_surf = font.render(\"PoI Covered Percentage: {:.2f}\".format(poi_cover_percent), True, BLACK)\n text_rect = text_surf.get_rect()\n text_rect.center = (680,50)\n self.screen.blit(text_surf, text_rect)\n\n text_surf = font.render(\"Final Coverage Index: {:.2f}\".format(episode_coverage_item), True, BLACK)\n text_rect = text_surf.get_rect()\n text_rect.center = (680,100)\n self.screen.blit(text_surf, text_rect)\n\n text_surf = font.render(\"Final Fairness Index: {:.2f}\".format(episode_fairness_item), True, BLACK)\n text_rect = text_surf.get_rect()\n text_rect.center = (680,150)\n self.screen.blit(text_surf, text_rect)\n\n text_surf = font.render(\"Current Energy Index: {:.2f}\".format(energy_consumption), True, BLACK)\n text_rect = text_surf.get_rect()\n text_rect.center = (680,200)\n self.screen.blit(text_surf, text_rect)\n\n if update:\n pygame.display.update()\n ret_array = pygame.surfarray.array3d(self.screen)\n pygame.time.delay(1000 // self.fps)\n return ret_array\n\n def render_mcs(self, render_info, update=True,agent_surface=None):\n\n poi_pos = render_info['poi_pos']\n agent_pos = render_info['agent_pos']\n rect_obstacles = render_info['rect_obstacles']\n circle_obstacles = render_info['circle_obstacles']\n poi_cover_id = render_info['poi_cover_id']\n poi_cover_percent = render_info['poi_cover_percent']\n episode_coverage_item = render_info['episode_coverage_item']\n episode_fairness_item = render_info['episode_fairness_item']\n energy_consumption = render_info['energy_consumption']\n timeslot = render_info['timeslot']\n charger_pos = render_info['charger_pos']\n battery_usage = render_info['battery'] # UAV如何显示电量\n\n pygame.surfarray.blit_array(self.screen, self.background_arr)\n covered_poi_pos = []\n uncovered_poi_pos = []\n for i, pos in enumerate(poi_pos):\n if poi_cover_id[i] != 0:\n covered_poi_pos.append(pos)\n else:\n uncovered_poi_pos.append(pos)\n self.draw_foods(covered_poi_pos, color=GREEN)\n self.draw_foods(uncovered_poi_pos, color=GRAY)\n self.draw_charging_stations(charger_pos, color=BLUE)\n\n for (x_min,y_min,height,width) in rect_obstacles:\n rect = np.array([x_min, y_min, height, width])*self.scale_factor\n pygame.draw.rect(self.screen, BLACK, rect)\n for (x,y,r) in circle_obstacles:\n pygame.draw.circle(self.screen,BLACK, np.array([x,y])*self.scale_factor, r*self.scale_factor)\n\n self.draw_agents(agent_pos, agent_surface=agent_surface)\n self.draw_agents_coverage(agent_pos, cover_range=18, color=BROWN) # COMM\n self.draw_agents_coverage(agent_pos, cover_range=13, color=BLUE) # OBS\n self.draw_agents_coverage(agent_pos, cover_range=10, color=RED) # COVER\n # self.draw_agents_coverage(charger_pos, cover_range=5,color=RED) # Draw Charging Range\n\n font_str = pygame.font.get_default_font()\n font = pygame.font.Font(font_str, 15)\n text_surf = font.render(\"Timeslot: {}\".format(timeslot), True, BLACK)\n text_rect = text_surf.get_rect()\n text_rect.center = (790, 25)\n self.screen.blit(text_surf, text_rect)\n\n font_str = pygame.font.get_default_font()\n font = pygame.font.Font(font_str, 15)\n text_surf = font.render(\"PoI Covered Percentage: {:.2f}\".format(poi_cover_percent), True, BLACK)\n text_rect = text_surf.get_rect()\n text_rect.center = (790, 50)\n self.screen.blit(text_surf, text_rect)\n\n text_surf = font.render(\"Final Coverage Index: {:.2f}\".format(episode_coverage_item), True, BLACK)\n text_rect = text_surf.get_rect()\n text_rect.center = (790, 75)\n self.screen.blit(text_surf, text_rect)\n\n text_surf = font.render(\"Final Fairness Index: {:.2f}\".format(episode_fairness_item), True, BLACK)\n text_rect = text_surf.get_rect()\n text_rect.center = (790, 100)\n self.screen.blit(text_surf, text_rect)\n\n text_surf = font.render(\"Current Energy Index: {:.2f}\".format(energy_consumption), True, BLACK)\n text_rect = text_surf.get_rect()\n text_rect.center = (790, 125)\n self.screen.blit(text_surf, text_rect)\n\n if DISPLAY_BATTERY:\n self.draw_battery_usage(battery_usage, font, color=BLACK)\n\n if update:\n pygame.display.update()\n ret_array = pygame.surfarray.array3d(self.screen)\n pygame.time.delay(1000 // self.fps)\n return ret_array" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sarthakpati/nncf
[ "29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac", "29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac", "29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac", "29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac", "29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac", "29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac", "29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac", "29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac", "29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac", "29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac" ]
[ "tests/torch/quantization/test_onnx_export.py", "examples/tensorflow/segmentation/evaluation.py", "examples/tensorflow/segmentation/train.py", "examples/tensorflow/common/object_detection/utils/ops.py", "nncf/torch/nncf_network.py", "nncf/tensorflow/quantization/quantizers.py", "tools/debug/compare_accuracy.py", "tools/debug/common.py", "tests/torch/test_models/ssd_vgg.py", "examples/tensorflow/common/utils.py" ]
[ "\"\"\"\n Copyright (c) 2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nfrom itertools import product\nfrom typing import Tuple\n\nimport onnx\nimport pytest\nimport torch\n\nfrom nncf import NNCFConfig\nfrom nncf.torch.quantization.layers import PTQuantizerSpec\nfrom nncf.torch.quantization.layers import QUANTIZATION_MODULES\nfrom nncf.torch.quantization.layers import QuantizationMode\nfrom nncf.torch.quantization.layers import QuantizerExportMode\nfrom tests.torch.helpers import get_nodes_by_type\nfrom tests.torch.helpers import register_bn_adaptation_init_args\nfrom tests.torch.helpers import resolve_constant_node_inputs_to_values\nfrom tests.torch.test_helpers import TwoConvTestModel\nfrom tests.torch.test_helpers import load_exported_onnx_version\n\n\ndef get_config_for_export_mode(should_be_onnx_standard: bool) -> NNCFConfig:\n nncf_config = NNCFConfig()\n nncf_config.update({\n \"input_info\": {\n \"sample_size\": [1, 1, 4, 4]\n },\n \"compression\": {\n \"algorithm\": \"quantization\",\n \"export_to_onnx_standard_ops\": should_be_onnx_standard\n }\n })\n register_bn_adaptation_init_args(nncf_config)\n return nncf_config\n\n\ndef test_onnx_export_to_fake_quantize(tmp_path):\n model = TwoConvTestModel()\n nncf_config = get_config_for_export_mode(should_be_onnx_standard=False)\n onnx_model_proto = load_exported_onnx_version(nncf_config, model,\n path_to_storage_dir=tmp_path)\n num_fq = 0\n num_model_nodes = 0\n num_other_nodes = 0\n # pylint:disable=no-member\n for node in onnx_model_proto.graph.node:\n op_type = node.op_type\n if op_type == 'FakeQuantize':\n num_fq += 1\n elif op_type in ['Conv', 'Constant']:\n num_model_nodes += 1\n else:\n num_other_nodes += 1\n assert num_fq == 4\n assert num_other_nodes == 0\n\n\ndef test_onnx_export_to_quantize_dequantize(tmp_path):\n # It doesn't work with CPU target_device because\n # per-channel quantization is not supported in onnxruntime.\n model = TwoConvTestModel()\n nncf_config = get_config_for_export_mode(should_be_onnx_standard=True)\n nncf_config['target_device'] = 'TRIAL'\n onnx_model_proto = load_exported_onnx_version(nncf_config, model,\n path_to_storage_dir=tmp_path)\n num_q = 0\n num_dq = 0\n num_model_nodes = 0\n num_other_nodes = 0\n # pylint:disable=no-member\n for node in onnx_model_proto.graph.node:\n op_type = node.op_type\n if op_type == 'QuantizeLinear':\n num_q += 1\n elif op_type == 'DequantizeLinear':\n num_dq += 1\n elif op_type in ['Conv', 'Constant']:\n num_model_nodes += 1\n else:\n num_other_nodes += 1\n assert num_q == 4\n assert num_q == num_dq\n assert num_other_nodes == 0\n\n\nINPUT_TENSOR_SHAPE = (2, 64, 15, 10)\nPER_CHANNEL_AQ_SCALE_SHAPE = (1, INPUT_TENSOR_SHAPE[1], 1, 1)\n\n\[email protected]('per_channel, qmode, export_mode',\n product(\n [True, False],\n [QuantizationMode.SYMMETRIC, QuantizationMode.ASYMMETRIC],\n [QuantizerExportMode.FAKE_QUANTIZE, QuantizerExportMode.ONNX_QUANTIZE_DEQUANTIZE_PAIRS]\n ))\ndef test_onnx_export_to_quantize_dequantize_per_channel(per_channel: bool,\n qmode: QuantizationMode,\n export_mode: QuantizerExportMode):\n scale_shape = PER_CHANNEL_AQ_SCALE_SHAPE if per_channel else (1,)\n qspec = PTQuantizerSpec(\n scale_shape=scale_shape,\n num_bits=8,\n mode=qmode,\n signedness_to_force=None,\n logarithm_scale=False,\n narrow_range=False,\n half_range=False,\n )\n\n q_cls = QUANTIZATION_MODULES.get(qmode)\n quantizer = q_cls(qspec)\n if qmode is QuantizationMode.SYMMETRIC:\n quantizer.scale = torch.nn.Parameter(torch.rand_like(quantizer.scale))\n else:\n quantizer.input_low = torch.nn.Parameter(torch.rand_like(quantizer.input_low))\n quantizer.input_range = torch.nn.Parameter(torch.rand_like(quantizer.input_range))\n # pylint: disable=protected-access\n quantizer._export_mode = export_mode\n\n x = torch.rand(INPUT_TENSOR_SHAPE)\n if quantizer.per_channel and export_mode is QuantizerExportMode.ONNX_QUANTIZE_DEQUANTIZE_PAIRS:\n with pytest.raises(RuntimeError):\n quantizer.run_export_quantization(x)\n else:\n quantizer.run_export_quantization(x)\n\n\nclass TargetCompressionIdxTestModel(torch.nn.Module):\n CONV2D_TARGET_CHANNEL_COUNT = 5\n CONV2D_TRANSPOSE_TARGET_CHANNEL_COUNT = 10\n\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(in_channels=1,\n out_channels=self.CONV2D_TARGET_CHANNEL_COUNT,\n kernel_size=(1, 1))\n self.conv_t = torch.nn.ConvTranspose2d(in_channels=self.CONV2D_TARGET_CHANNEL_COUNT,\n out_channels=self.CONV2D_TRANSPOSE_TARGET_CHANNEL_COUNT,\n kernel_size=(1, 1))\n\n def forward(self, x):\n x = self.conv(x)\n x = self.conv_t(x)\n return x\n\n\ndef get_weight_fq_for_conv_node(node: onnx.NodeProto, graph: onnx.GraphProto):\n weight_input_tensor_id = node.input[1]\n matches = [x for x in graph.node if weight_input_tensor_id in x.output]\n assert len(matches) == 1\n match = next(iter(matches))\n assert match.op_type == \"FakeQuantize\"\n return match\n\n\ndef get_input_low_input_high_for_wfq_node(wfq_node: onnx.NodeProto, graph: onnx.GraphProto) \\\n -> Tuple[onnx.AttributeProto, onnx.AttributeProto]:\n assert wfq_node.op_type == \"FakeQuantize\"\n conv_wfq_inputs = list(resolve_constant_node_inputs_to_values(wfq_node, graph).values())\n return conv_wfq_inputs[1], conv_wfq_inputs[2]\n\n\ndef test_target_compression_idx(tmp_path):\n model = TargetCompressionIdxTestModel()\n nncf_config = get_config_for_export_mode(should_be_onnx_standard=False)\n onnx_model_proto = load_exported_onnx_version(nncf_config, model,\n path_to_storage_dir=tmp_path)\n onnx_graph = onnx_model_proto.graph # pylint:disable=no-member\n conv_nodes = get_nodes_by_type(onnx_model_proto, \"Conv\")\n assert len(conv_nodes) == 1\n conv_node = next(iter(conv_nodes))\n conv_wfq_node = get_weight_fq_for_conv_node(conv_node, onnx_graph)\n input_low_attr, input_high_attr = get_input_low_input_high_for_wfq_node(conv_wfq_node,\n onnx_graph)\n assert input_low_attr.shape == (TargetCompressionIdxTestModel.CONV2D_TARGET_CHANNEL_COUNT, 1, 1, 1)\n assert input_low_attr.shape == input_high_attr.shape\n\n conv_t_nodes = get_nodes_by_type(onnx_model_proto, \"ConvTranspose\")\n assert len(conv_t_nodes) == 1\n conv_t_node = next(iter(conv_t_nodes))\n conv_t_wfq_node = get_weight_fq_for_conv_node(conv_t_node, onnx_graph)\n input_low_t_attr, input_high_t_attr = get_input_low_input_high_for_wfq_node(conv_t_wfq_node,\n onnx_graph)\n assert input_low_t_attr.shape == (1, TargetCompressionIdxTestModel.CONV2D_TRANSPOSE_TARGET_CHANNEL_COUNT, 1, 1)\n assert input_low_t_attr.shape == input_high_t_attr.shape\n", "\"\"\"\n Copyright (c) 2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport sys\n\nimport tensorflow as tf\n\nfrom nncf.tensorflow import create_compressed_model\nfrom nncf.tensorflow import register_default_init_args\nfrom nncf.tensorflow.helpers.model_manager import TFOriginalModelManager\nfrom nncf.tensorflow.utils.state import TFCompressionState\nfrom nncf.tensorflow.utils.state import TFCompressionStateLoader\n\nfrom examples.tensorflow.common.argparser import get_common_argument_parser\nfrom examples.tensorflow.common.distributed import get_distribution_strategy\nfrom examples.tensorflow.common.logger import logger\nfrom examples.tensorflow.common.object_detection.datasets.builder import COCODatasetBuilder\nfrom examples.tensorflow.common.object_detection.checkpoint_utils import get_variables\nfrom examples.tensorflow.common.sample_config import create_sample_config\nfrom examples.tensorflow.common.sample_config import SampleConfig\nfrom examples.tensorflow.common.utils import configure_paths\nfrom examples.tensorflow.common.utils import get_saving_parameters\nfrom examples.tensorflow.common.utils import print_args\nfrom examples.tensorflow.common.utils import SummaryWriter\nfrom examples.tensorflow.common.utils import write_metrics\nfrom examples.tensorflow.common.utils import Timer\nfrom examples.tensorflow.segmentation.models.model_selector import get_predefined_config\nfrom examples.tensorflow.segmentation.models.model_selector import get_model_builder\n\n\ndef get_argument_parser():\n parser = get_common_argument_parser(mode=False,\n weights=False,\n epochs=False,\n precision=False,\n save_checkpoint_freq=False,\n to_h5=False,\n dataset_type=False)\n\n parser.add_argument(\n '--mode',\n '-m',\n nargs='+',\n choices=['train', 'test', 'export'],\n default='train',\n help='train: performs validation of a checkpoint that was saved during training '\n '(use --checkpoint-save-dir to specify a path to the train-time checkpoint directory) ;'\n ' test: tests the model checkpoint (use --resume to specify the checkpoint file itself);'\n ' export: exports the model.')\n\n parser.add_argument(\n '--eval-timeout',\n default=None,\n type=int,\n help='The maximum number of seconds to wait between checkpoints. '\n 'If left as None, then the process will wait indefinitely.'\n )\n\n parser.add_argument(\n '--weights',\n default=None,\n type=str,\n help='Path to pretrained weights in ckpt format.'\n )\n\n return parser\n\n\ndef get_config_from_argv(argv, parser):\n args = parser.parse_args(args=argv)\n\n sample_config = SampleConfig(\n {'dataset_type': 'tfrecords'}\n )\n\n config_from_json = create_sample_config(args, parser)\n predefined_config = get_predefined_config(config_from_json.model)\n\n sample_config.update(predefined_config)\n sample_config.update(config_from_json)\n configure_paths(sample_config)\n\n return sample_config\n\n\ndef get_dataset_builders(config, num_devices):\n val_builder = COCODatasetBuilder(config=config,\n is_train=False,\n num_devices=num_devices)\n config_ = config.deepcopy()\n config_.batch_size = val_builder.batch_size\n calibration_builder = COCODatasetBuilder(config=config_,\n is_train=True,\n num_devices=1)\n return val_builder, calibration_builder\n\n\ndef load_checkpoint(checkpoint, ckpt_path):\n logger.info('Load from checkpoint is enabled')\n if tf.io.gfile.isdir(ckpt_path):\n path_to_checkpoint = tf.train.latest_checkpoint(ckpt_path)\n logger.info('Latest checkpoint: {}'.format(path_to_checkpoint))\n else:\n path_to_checkpoint = ckpt_path if tf.io.gfile.exists(ckpt_path + '.index') else None\n logger.info('Provided checkpoint: {}'.format(path_to_checkpoint))\n\n if not path_to_checkpoint:\n logger.info('No checkpoint detected')\n return 0\n\n logger.info('Checkpoint file {} found and restoring from checkpoint'.format(path_to_checkpoint))\n status = checkpoint.restore(path_to_checkpoint)\n status.expect_partial()\n logger.info('Completed loading from checkpoint')\n\n return None\n\n\ndef load_compression_state(ckpt_path: str):\n checkpoint = tf.train.Checkpoint(compression_state=TFCompressionStateLoader())\n load_checkpoint(checkpoint, ckpt_path)\n return checkpoint.compression_state.state\n\n\ndef evaluate(test_step, metric, test_dist_dataset, num_batches, print_freq):\n \"\"\"Runs evaluation steps and aggregate metrics\"\"\"\n timer = Timer()\n timer.tic()\n\n logger.info('Testing...')\n for batch_idx, x in enumerate(test_dist_dataset):\n labels, outputs = test_step(x)\n metric.update_state(labels, outputs)\n\n if batch_idx % print_freq == 0:\n time = timer.toc(average=False)\n logger.info('Predict for batch: {}/{} Time: {:.3f} sec'.format(batch_idx, num_batches, time))\n timer.tic()\n\n logger.info('Total time: {:.3f} sec'.format(timer.total_time))\n\n timer.reset()\n\n logger.info('Evaluating predictions...')\n timer.tic()\n result = metric.result()\n timer.toc(average=False)\n logger.info('Total time: {:.3f} sec'.format(timer.total_time))\n\n return result\n\n\ndef create_test_step_fn(strategy, model, predict_post_process_fn):\n \"\"\"Creates a distributed test step\"\"\"\n\n def _test_step_fn(inputs):\n inputs, labels = inputs\n model_outputs = model(inputs, training=False)\n labels, prediction_outputs = predict_post_process_fn(labels, model_outputs)\n\n return labels, prediction_outputs\n\n @tf.function\n def test_step(dataset_inputs):\n labels, outputs = strategy.run(_test_step_fn, args=(dataset_inputs,))\n outputs = tf.nest.map_structure(strategy.experimental_local_results, outputs)\n labels = tf.nest.map_structure(strategy.experimental_local_results, labels)\n\n return labels, outputs\n\n return test_step\n\n\ndef restore_compressed_model(config, strategy, model_builder, ckpt_path = None):\n compression_state = None\n if ckpt_path:\n compression_state = load_compression_state(ckpt_path)\n\n with TFOriginalModelManager(model_builder.build_model,\n weights=config.get('weights', None),\n is_training=False) as model:\n with strategy.scope():\n compression_ctrl, compress_model = create_compressed_model(model,\n config.nncf_config,\n compression_state)\n\n variables = get_variables(compress_model)\n checkpoint = tf.train.Checkpoint(variables=variables,\n compression_state=TFCompressionState(compression_ctrl),\n step=tf.Variable(0))\n if ckpt_path:\n load_checkpoint(checkpoint, config.ckpt_path)\n\n return compression_ctrl, compress_model, checkpoint\n\n\ndef run_evaluation(config, eval_timeout=None):\n \"\"\"Runs evaluation on checkpoint save directory\"\"\"\n strategy = get_distribution_strategy(config)\n if config.metrics_dump is not None:\n write_metrics(0, config.metrics_dump)\n\n validation_builder, calibration_builder = get_dataset_builders(config, strategy.num_replicas_in_sync)\n calibration_dataset = calibration_builder.build()\n val_dataset = validation_builder.build()\n num_batches = validation_builder.steps_per_epoch\n test_dist_dataset = strategy.experimental_distribute_dataset(val_dataset)\n\n config.nncf_config = register_default_init_args(nncf_config=config.nncf_config,\n data_loader=calibration_dataset,\n batch_size=validation_builder.global_batch_size)\n\n # We use `model_batch_size` to create input layer for model\n config.model_batch_size = validation_builder.batch_size\n\n model_builder = get_model_builder(config)\n eval_metric = model_builder.eval_metrics()\n predict_post_process_fn = model_builder.post_processing\n\n if 'test' in config.mode:\n compression_ctrl, compress_model, _ = restore_compressed_model(config, strategy, model_builder,\n config.ckpt_path)\n test_step = create_test_step_fn(strategy, compress_model, predict_post_process_fn)\n\n statistics = compression_ctrl.statistics()\n logger.info(statistics.to_str())\n metric_result = evaluate(test_step, eval_metric, test_dist_dataset, num_batches, config.print_freq)\n eval_metric.reset_states()\n logger.info('Test metric = {}'.format(metric_result))\n\n if 'export' in config.mode:\n save_path, save_format = get_saving_parameters(config)\n compression_ctrl.export_model(save_path, save_format)\n logger.info(\"Saved to {}\".format(save_path))\n\n elif 'train' in config.mode:\n validation_summary_writer = SummaryWriter(config.log_dir, 'validation')\n\n is_first_checkpoint = True\n for checkpoint_path in tf.train.checkpoints_iterator(config.checkpoint_save_dir, config.eval_timeout):\n if is_first_checkpoint:\n is_first_checkpoint = False\n _, compress_model, checkpoint = restore_compressed_model(config, strategy, model_builder,\n checkpoint_path)\n test_step = create_test_step_fn(strategy, compress_model, predict_post_process_fn)\n else:\n checkpoint.restore(checkpoint_path).expect_partial()\n\n logger.info('Checkpoint file {} found and restoring from checkpoint'.format(checkpoint_path))\n logger.info('Checkpoint step: {}'.format(checkpoint.step.numpy()))\n metric_result = evaluate(test_step, eval_metric, test_dist_dataset, num_batches, config.print_freq)\n\n current_step = checkpoint.step.numpy()\n validation_summary_writer(metrics=metric_result, step=current_step)\n\n eval_metric.reset_states()\n logger.info('Validation metric = {}'.format(metric_result))\n\n validation_summary_writer.close()\n\n if config.metrics_dump is not None:\n write_metrics(metric_result['AP'], config.metrics_dump)\n\n\ndef export(config):\n model_builder = get_model_builder(config)\n\n strategy = tf.distribute.get_strategy()\n compression_ctrl, _, _ = restore_compressed_model(config, strategy, model_builder, config.ckpt_path)\n\n save_path, save_format = get_saving_parameters(config)\n compression_ctrl.export_model(save_path, save_format)\n logger.info(\"Saved to {}\".format(save_path))\n\n\ndef main(argv):\n tf.get_logger().setLevel('INFO')\n parser = get_argument_parser()\n config = get_config_from_argv(argv, parser)\n print_args(config)\n\n if config.dataset_type != 'tfrecords':\n raise RuntimeError('The train.py does not support TensorFlow Datasets (TFDS). '\n 'Please use TFRecords.')\n\n if 'train' in config.mode or 'test' in config.mode:\n run_evaluation(config)\n elif 'export' in config.mode:\n export(config)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n", "\"\"\"\n Copyright (c) 2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport os\nimport sys\nfrom pathlib import Path\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom nncf.tensorflow import create_compressed_model\nfrom nncf.tensorflow.helpers.model_manager import TFOriginalModelManager\nfrom nncf.tensorflow.initialization import register_default_init_args\nfrom nncf.common.utils.tensorboard import prepare_for_tensorboard\nfrom nncf.tensorflow.utils.state import TFCompressionState\nfrom nncf.tensorflow.utils.state import TFCompressionStateLoader\n\nfrom examples.tensorflow.common.argparser import get_common_argument_parser\nfrom examples.tensorflow.common.distributed import get_distribution_strategy\nfrom examples.tensorflow.common.logger import logger\nfrom examples.tensorflow.common.object_detection.checkpoint_utils import get_variables\nfrom examples.tensorflow.common.object_detection.datasets.builder import COCODatasetBuilder\nfrom examples.tensorflow.common.optimizer import build_optimizer\nfrom examples.tensorflow.common.sample_config import create_sample_config\nfrom examples.tensorflow.common.sample_config import SampleConfig\nfrom examples.tensorflow.common.scheduler import build_scheduler\nfrom examples.tensorflow.common.utils import configure_paths\nfrom examples.tensorflow.common.utils import create_code_snapshot\nfrom examples.tensorflow.common.utils import print_args\nfrom examples.tensorflow.common.utils import serialize_config\nfrom examples.tensorflow.common.utils import serialize_cli_args\nfrom examples.tensorflow.common.utils import SummaryWriter\nfrom examples.tensorflow.common.utils import Timer\nfrom examples.tensorflow.segmentation.models.model_selector import get_model_builder\nfrom examples.tensorflow.segmentation.models.model_selector import get_predefined_config\n\n\ndef get_argument_parser():\n parser = get_common_argument_parser(mode=False,\n weights=False,\n precision=False,\n save_checkpoint_freq=False,\n export_args=False,\n dataset_type=False,\n cpu_only=False,\n metrics_dump=False)\n\n parser.add_argument('--backbone-checkpoint',\n default=None,\n type=str,\n help='Path to backbone checkpoint.')\n\n parser.add_argument('--weights',\n default=None,\n type=str,\n help='Path to pretrained weights in ckpt format.')\n\n return parser\n\n\ndef get_config_from_argv(argv, parser):\n args = parser.parse_args(args=argv)\n\n sample_config = SampleConfig(\n {'dataset_type': 'tfrecords'}\n )\n\n config_from_json = create_sample_config(args, parser)\n predefined_config = get_predefined_config(config_from_json.model)\n\n sample_config.update(predefined_config)\n sample_config.update(config_from_json)\n configure_paths(sample_config)\n\n return sample_config\n\n\ndef get_dataset_builders(config, num_devices):\n train_builder = COCODatasetBuilder(config=config,\n is_train=True,\n num_devices=num_devices)\n\n config_ = config.deepcopy()\n config_.batch_size = train_builder.batch_size\n calibration_builder = COCODatasetBuilder(config=config_,\n is_train=True,\n num_devices=1)\n\n return train_builder, calibration_builder\n\n\ndef load_checkpoint(checkpoint, ckpt_path):\n logger.info('Load from checkpoint is enabled')\n if tf.io.gfile.isdir(ckpt_path):\n path_to_checkpoint = tf.train.latest_checkpoint(ckpt_path)\n logger.info('Latest checkpoint: {}'.format(path_to_checkpoint))\n else:\n path_to_checkpoint = ckpt_path if tf.io.gfile.exists(ckpt_path + '.index') else None\n logger.info('Provided checkpoint: {}'.format(path_to_checkpoint))\n\n if not path_to_checkpoint:\n logger.info('No checkpoint detected')\n return 0\n\n logger.info('Checkpoint file {} found and restoring from checkpoint'.format(path_to_checkpoint))\n status = checkpoint.restore(path_to_checkpoint)\n status.expect_partial()\n logger.info('Completed loading from checkpoint')\n\n return None\n\n\ndef resume_from_checkpoint(checkpoint_manager, ckpt_path, steps_per_epoch):\n if load_checkpoint(checkpoint_manager.checkpoint, ckpt_path) == 0:\n return 0\n\n optimizer = checkpoint_manager.checkpoint.optimizer\n initial_step = optimizer.iterations.numpy()\n initial_epoch = initial_step // steps_per_epoch\n\n logger.info('Resuming from epoch %d (global step %d)', initial_epoch, initial_step)\n return initial_epoch, initial_step\n\n\ndef load_compression_state(ckpt_path: str):\n checkpoint = tf.train.Checkpoint(compression_state=TFCompressionStateLoader())\n load_checkpoint(checkpoint, ckpt_path)\n return checkpoint.compression_state.state\n\n\ndef create_train_step_fn(strategy, model, loss_fn, optimizer):\n \"\"\"Creates a distributed training step\"\"\"\n\n def _train_step_fn(inputs):\n inputs, labels = inputs\n with tf.GradientTape() as tape:\n outputs = model(inputs, training=True)\n all_losses = loss_fn(labels, outputs)\n losses = {}\n for k, v in all_losses.items():\n losses[k] = tf.reduce_mean(v)\n per_replica_loss = losses['total_loss'] / strategy.num_replicas_in_sync\n\n grads = tape.gradient(per_replica_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n return losses\n\n @tf.function\n def train_step(dataset_inputs):\n per_replica_losses = strategy.run(_train_step_fn, args=(dataset_inputs,))\n losses = tf.nest.map_structure(lambda x: strategy.reduce(tf.distribute.ReduceOp.MEAN, x, axis=None),\n per_replica_losses)\n return losses\n\n return train_step\n\n\ndef train(train_step, train_dist_dataset, initial_epoch, initial_step,\n epochs, steps_per_epoch, checkpoint_manager, compression_ctrl, log_dir, optimizer, print_freq):\n\n train_summary_writer = SummaryWriter(log_dir, 'train')\n compression_summary_writer = SummaryWriter(log_dir, 'compression')\n\n timer = Timer()\n timer.tic()\n\n logger.info('Training...')\n for epoch in range(initial_epoch, epochs):\n logger.info('Epoch: {}/{}'.format(epoch, epochs))\n compression_ctrl.scheduler.epoch_step(epoch)\n\n for step, x in enumerate(train_dist_dataset):\n if epoch == initial_epoch and step < initial_step % steps_per_epoch:\n continue\n\n checkpoint_manager.checkpoint.step.assign_add(1)\n\n if step == steps_per_epoch:\n save_path = checkpoint_manager.save()\n logger.info('Saved checkpoint for epoch={}: {}'.format(epoch, save_path))\n break\n\n compression_ctrl.scheduler.step()\n train_loss = train_step(x)\n train_metric_result = tf.nest.map_structure(lambda s: s.numpy().astype(float), train_loss)\n\n if np.isnan(train_metric_result['total_loss']):\n raise ValueError('total loss is NaN')\n\n train_metric_result.update({'learning_rate': optimizer.lr(optimizer.iterations).numpy()})\n\n train_summary_writer(metrics=train_metric_result, step=optimizer.iterations.numpy())\n\n if step % print_freq == 0:\n time = timer.toc(average=False)\n logger.info('Step: {}/{} Time: {:.3f} sec'.format(step, steps_per_epoch, time))\n logger.info('Training metric = {}'.format(train_metric_result))\n timer.tic()\n\n statistics = compression_ctrl.statistics()\n logger.info(statistics.to_str())\n statistics = {\n f'compression/statistics/{name}': value for name, value in prepare_for_tensorboard(statistics).items()\n }\n compression_summary_writer(metrics=statistics,\n step=optimizer.iterations.numpy())\n\n train_summary_writer.close()\n compression_summary_writer.close()\n\n\ndef run_train(config):\n strategy = get_distribution_strategy(config)\n\n # Create dataset\n builders = get_dataset_builders(config, strategy.num_replicas_in_sync)\n\n datasets = [builder.build() for builder in builders]\n train_builder, _ = builders\n train_dataset, calibration_dataset = datasets\n train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)\n\n # Training parameters\n epochs = config.epochs\n steps_per_epoch = train_builder.steps_per_epoch\n\n # We use `model_batch_size` to create input layer for model\n config.model_batch_size = train_builder.batch_size\n\n # Create model builder\n model_builder = get_model_builder(config)\n\n # Register additional parameters in the NNCFConfig for initialization\n # the compressed model during building\n nncf_config = config.nncf_config\n nncf_config = register_default_init_args(nncf_config=nncf_config,\n data_loader=calibration_dataset,\n batch_size=train_builder.global_batch_size)\n\n resume_training = config.ckpt_path is not None\n\n compression_state = None\n if resume_training:\n compression_state = load_compression_state(config.ckpt_path)\n\n with TFOriginalModelManager(model_builder.build_model,\n weights=config.get('weights', None),\n is_training=True) as model:\n with strategy.scope():\n compression_ctrl, compress_model = create_compressed_model(model, nncf_config, compression_state)\n\n scheduler = build_scheduler(\n config=config,\n steps_per_epoch=steps_per_epoch)\n\n optimizer = build_optimizer(\n config=config,\n scheduler=scheduler)\n\n loss_fn = model_builder.build_loss_fn(compress_model, compression_ctrl.loss)\n\n variables = get_variables(compress_model)\n checkpoint = tf.train.Checkpoint(variables=variables,\n optimizer=optimizer,\n compression_state=TFCompressionState(compression_ctrl),\n step=tf.Variable(0))\n checkpoint_manager = tf.train.CheckpointManager(checkpoint, config.checkpoint_save_dir, max_to_keep=None)\n\n initial_epoch = initial_step = 0\n if resume_training:\n initial_epoch, initial_step = resume_from_checkpoint(checkpoint_manager,\n config.ckpt_path,\n steps_per_epoch)\n\n statistics = compression_ctrl.statistics()\n logger.info(statistics.to_str())\n\n train_step = create_train_step_fn(strategy, compress_model, loss_fn, optimizer)\n\n train(train_step, train_dist_dataset, initial_epoch, initial_step,\n epochs, steps_per_epoch, checkpoint_manager, compression_ctrl, config.log_dir, optimizer, config.print_freq)\n\n logger.info('Compression statistics')\n statistics = compression_ctrl.statistics()\n logger.info(statistics.to_str())\n\n\ndef main(argv):\n parser = get_argument_parser()\n config = get_config_from_argv(argv, parser)\n print_args(config)\n\n serialize_config(config.nncf_config, config.log_dir)\n serialize_cli_args(parser, argv, config.log_dir)\n\n nncf_root = Path(__file__).absolute().parents[3]\n create_code_snapshot(nncf_root, os.path.join(config.log_dir, \"snapshot.tar.gz\"))\n\n if config.dataset_type != 'tfrecords':\n raise RuntimeError('The train.py does not support TensorFlow Datasets (TFDS). '\n 'Please use TFRecords.')\n\n run_train(config)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n", "\"\"\"\n Copyright (c) 2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport tensorflow as tf\n\nfrom examples.tensorflow.common.object_detection.utils import shape_utils\n\n\ndef indices_to_dense_vector(indices,\n size,\n indices_value=1.,\n default_value=0,\n dtype=tf.float32):\n \"\"\"Creates dense vector with indices set to specific value and rest to zeros.\n\n This function exists because it is unclear if it is safe to use\n tf.sparse_to_dense(indices, [size], 1, validate_indices=False)\n with indices which are not ordered.\n This function accepts a dynamic size (e.g. tf.shape(tensor)[0])\n\n Args:\n indices: 1d Tensor with integer indices which are to be set to\n indices_values.\n size: scalar with size (integer) of output Tensor.\n indices_value: values of elements specified by indices in the output vector\n default_value: values of other elements in the output vector.\n dtype: data type.\n\n Returns:\n dense 1D Tensor of shape [size] with indices set to indices_values and the\n rest set to default_value.\n \"\"\"\n size = tf.cast(size, tf.int32)\n zeros = tf.ones([size], dtype=dtype) * default_value\n values = tf.ones_like(indices, dtype=dtype) * indices_value\n\n return tf.dynamic_stitch(\n [tf.range(size), tf.cast(indices, tf.int32)], [zeros, values])\n\n\ndef matmul_gather_on_zeroth_axis(params, indices, scope=None):\n \"\"\"Matrix multiplication based implementation of tf.gather on zeroth axis.\n\n Args:\n params: A float32 Tensor. The tensor from which to gather values. Must be at\n least rank 1.\n indices: A Tensor. Must be one of the following types: int32, int64. Must be\n in range [0, params.shape[0])\n scope: A name for the operation (optional).\n\n Returns:\n A Tensor. Has the same type as params. Values from params gathered\n from indices given by indices, with shape indices.shape + params.shape[1:].\n \"\"\"\n scope = scope or 'MatMulGather'\n with tf.name_scope(scope):\n params_shape = shape_utils.combined_static_and_dynamic_shape(params)\n indices_shape = shape_utils.combined_static_and_dynamic_shape(indices)\n params2d = tf.reshape(params, [params_shape[0], -1])\n indicator_matrix = tf.one_hot(indices, params_shape[0], on_value=None, off_value=None)\n gathered_result_flattened = tf.matmul(indicator_matrix, params2d)\n return tf.reshape(gathered_result_flattened,\n tf.stack(indices_shape + params_shape[1:]))\n", "\"\"\"\n Copyright (c) 2019-2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport inspect\nfrom collections import OrderedDict\nfrom enum import Enum\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import TypeVar\n\nimport functools\nimport torch\nfrom copy import deepcopy\nfrom torch import nn\n\nfrom nncf.common.graph.definitions import MODEL_INPUT_OP_NAME\nfrom nncf.common.graph.definitions import MODEL_OUTPUT_OP_NAME\nfrom nncf.common.graph import NNCFNode\nfrom nncf.common.graph import NNCFNodeName\nfrom nncf.common.graph.model_transformer import ModelTransformer\nfrom nncf.common.graph.transformations.commands import TargetType\nfrom nncf.common.graph.transformations.commands import TransformationPriority\nfrom nncf.common.insertion_point_graph import InsertionPointGraph\nfrom nncf.common.insertion_point_graph import PostHookInsertionPoint\nfrom nncf.common.insertion_point_graph import PreHookInsertionPoint\nfrom nncf.common.utils.logger import logger as nncf_logger\nfrom nncf.common.utils.ordered_enum import OrderedEnum\nfrom nncf.torch.debug import CombinedDebugInterface\nfrom nncf.torch.debug import debuggable_forward\nfrom nncf.common.utils.debug import is_debug\nfrom nncf.torch.dynamic_graph.context import TracingContext\nfrom nncf.torch.dynamic_graph.graph import DynamicGraph\nfrom nncf.torch.dynamic_graph.graph import ShapeIgnoringTensorMetaComparator\nfrom nncf.torch.dynamic_graph.graph_tracer import GraphTracer\nfrom nncf.torch.dynamic_graph.graph_tracer import ModelInputInfo\nfrom nncf.torch.dynamic_graph.graph_tracer import PostGraphBuildActing\nfrom nncf.torch.dynamic_graph.graph_tracer import create_dummy_forward_fn\nfrom nncf.torch.dynamic_graph.io_handling import InputInfoWrapManager\nfrom nncf.torch.dynamic_graph.io_handling import replicate_same_tensors\nfrom nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_outputs_with_objwalk\nfrom nncf.torch.dynamic_graph.operation_address import OperationAddress\nfrom nncf.torch.dynamic_graph.patch_pytorch import ignore_scope\nfrom nncf.torch.dynamic_graph.scope import Scope\nfrom nncf.torch.dynamic_graph.trace_tensor import TracedTensor\nfrom nncf.torch.dynamic_graph.transform_graph import replace_modules_by_nncf_modules\nfrom nncf.torch.graph.graph import PTNNCFGraph\nfrom nncf.torch.graph.graph_builder import GraphBuilder\nfrom nncf.torch.graph.graph_builder import GraphConverter\nfrom nncf.torch.graph.operator_metatypes import SplitMetatype\nfrom nncf.torch.graph.transformations.commands import PTInsertionCommand\nfrom nncf.torch.graph.transformations.commands import PTTargetPoint\nfrom nncf.torch.graph.transformations.layout import PTTransformationLayout\nfrom nncf.torch.knowledge_distillation.knowledge_distillation_handler import KnowledgeDistillationLossHandler\nfrom nncf.torch.layers import NNCF_MODULES\nfrom nncf.torch.layers import NNCF_WRAPPED_USER_MODULES_DICT\nfrom nncf.torch.module_operations import UpdateWeight\nfrom nncf.torch.quantization.layers import QUANTIZATION_MODULES\nfrom nncf.torch.utils import compute_FLOPs_hook\nfrom nncf.torch.utils import get_all_modules_by_type\nfrom nncf.torch.utils import get_state_dict_names_with_modules\nfrom nncf.torch.nested_objects_traversal import objwalk\n\nMODEL_WRAPPED_BY_NNCF_ATTR_NAME = 'nncf_module'\nLEGACY_ACT_STORAGE_NAME = \"activation_quantizers\"\nEXTERNAL_QUANTIZERS_STORAGE_NAME = \"external_quantizers\"\n\nModule = TypeVar('Module', bound=nn.Module)\n\n\nclass ExtraCompressionModuleType(Enum):\n EXTERNAL_QUANTIZER = 0\n\n\nclass LoadStateListener:\n \"\"\"\n Resets the initialization flags (`initialized`) for all quantization modules on `load_state_dict` call.\n These flags are used to update not loaded params (from checkpoint or model's state)\n on initialization stage of algorithm.\n Flags reset is required on each call of `load_state_dict`, because internal method (`build_graph`)\n restores model state by calling this method.\n \"\"\"\n\n def __init__(self, model, all_quantizations):\n # pylint: disable=protected-access\n self.hook = model._register_load_state_dict_pre_hook(\n functools.partial(self.hook_fn, quantize_modules=all_quantizations.values()))\n\n def hook_fn(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs,\n quantize_modules):\n for module in quantize_modules:\n module.initialized = False\n\n def close(self):\n self.hook.remove()\n\n\nclass PTInsertionType(OrderedEnum):\n NNCF_MODULE_PRE_OP = 0\n NNCF_MODULE_POST_OP = 1\n OPERATOR_PRE_HOOK = 2\n OPERATOR_POST_HOOK = 3\n\n\nclass PTInsertionPoint:\n TARGET_TYPE_VS_PT_INSERTION_TYPE_DICT = {\n TargetType.PRE_LAYER_OPERATION: PTInsertionType.NNCF_MODULE_PRE_OP,\n TargetType.POST_LAYER_OPERATION: PTInsertionType.NNCF_MODULE_POST_OP,\n TargetType.OPERATION_WITH_WEIGHTS: PTInsertionType.NNCF_MODULE_PRE_OP,\n TargetType.OPERATOR_PRE_HOOK: PTInsertionType.OPERATOR_PRE_HOOK,\n TargetType.OPERATOR_POST_HOOK: PTInsertionType.OPERATOR_POST_HOOK\n }\n\n def _get_pt_insertion_type(self, target_type: TargetType) -> PTInsertionType:\n if target_type not in PTInsertionPoint.TARGET_TYPE_VS_PT_INSERTION_TYPE_DICT:\n raise RuntimeError(\"Unsupported target type for PyTorch: {}\".format(target_type))\n return PTInsertionPoint.TARGET_TYPE_VS_PT_INSERTION_TYPE_DICT[target_type]\n\n def __init__(self, target_type: TargetType, op_address: OperationAddress,\n input_port_id: int = None):\n self.insertion_type = self._get_pt_insertion_type(target_type)\n self.op_address = op_address\n self.module_scope = op_address.scope_in_model\n self.input_port_id = input_port_id\n\n def __eq__(self, other: 'PTInsertionPoint'):\n return self.insertion_type == other.insertion_type and \\\n self.op_address == other.op_address and \\\n self.module_scope == other.module_scope and \\\n self.input_port_id == other.input_port_id\n\n def __str__(self):\n return ' '.join([str(v) for v in self.__dict__.values()])\n\n def __hash__(self):\n return hash(str(self))\n\n# pylint: disable=too-many-public-methods\n\n\n@ignore_scope\nclass NNCFNetwork(nn.Module, PostGraphBuildActing):\n MODEL_STATE_VERSION_ATTR = '_nncf_model_state_version'\n MODEL_STATE_VERSION = 1\n\n def __init__(self, module, input_infos: List[ModelInputInfo],\n dummy_forward_fn=None, wrap_inputs_fn=None, scopes_without_shape_matching=None,\n ignored_scopes=None, target_scopes=None, reset: bool = False, wrap_outputs_fn=None,\n original_model_accuracy=None):\n super().__init__()\n self._set_nncf_wrapped_model(module)\n self._forward_signature = inspect.signature(module.forward)\n self.input_infos = input_infos\n\n self._original_model_accuracy = original_model_accuracy\n\n self.ignored_scopes = ignored_scopes\n self.target_scopes = target_scopes\n self._user_dummy_forward_fn = dummy_forward_fn\n self._kd_loss_handler = None\n\n try:\n device = next(module.parameters()).device\n except StopIteration:\n # Param-less model, assume CPU\n device = 'cpu'\n\n if wrap_inputs_fn is not None:\n self._wrap_inputs_fn = wrap_inputs_fn\n else:\n self.__input_infos_based_input_wrapper = InputInfoWrapManager(self.input_infos,\n self._forward_signature,\n module_ref_for_device=self)\n self._wrap_inputs_fn = self.__input_infos_based_input_wrapper.wrap_inputs\n\n if wrap_outputs_fn is not None:\n self._wrap_outputs_fn = wrap_outputs_fn\n else:\n self._wrap_outputs_fn = wrap_nncf_model_outputs_with_objwalk\n\n self._nncf_module_scopes = [] # type: List[Scope]\n self.scopes_without_shape_matching = scopes_without_shape_matching\n self.debug_interface = CombinedDebugInterface() if is_debug() else None\n self._extra_module_types = [] # type: List[ExtraCompressionModuleType]\n # pylint:disable=line-too-long\n self._insertions_into_original_graph = {} # type: Dict[PTTargetPoint, List[Tuple[Callable, TransformationPriority]]]\n\n _orig_graph_build_forward_fn = self._get_dummy_forward_fn_for_graph_building(with_input_tracing=True,\n with_output_tracing=True)\n\n nncf_wrapped_model = self.get_nncf_wrapped_model()\n eval_only_op_scopes = self._collect_eval_only_op_scopes(nncf_wrapped_model,\n _orig_graph_build_forward_fn)\n\n # all modules called in eval mode should be replaced prior to graph building\n self._replace_modules_by_nncf_modules(device, eval_only_op_scopes, reset)\n\n _orig_context = TracingContext()\n\n _orig_context.add_node_comparators([MODEL_INPUT_OP_NAME], ShapeIgnoringTensorMetaComparator())\n _orig_context.add_node_comparators([MODEL_OUTPUT_OP_NAME], ShapeIgnoringTensorMetaComparator())\n if self.scopes_without_shape_matching:\n _orig_context.add_node_comparators(scopes_without_shape_matching,\n ShapeIgnoringTensorMetaComparator())\n\n self._original_dynamic_graph = GraphTracer(_orig_graph_build_forward_fn).trace_graph(nncf_wrapped_model,\n _orig_context,\n as_eval=True)\n self._original_graph = GraphConverter.convert(self._original_dynamic_graph,\n input_infos=self.input_infos)\n self._compressed_graph = None # type: PTNNCFGraph\n\n self._compressed_context = TracingContext()\n\n self._dummy_forward_fn = self._get_dummy_forward_fn_for_graph_building(with_input_tracing=False,\n with_output_tracing=False)\n self._in_user_dummy_forward = False\n\n self._compressed_context.add_node_comparators([MODEL_INPUT_OP_NAME], ShapeIgnoringTensorMetaComparator())\n self._compressed_context.add_node_comparators([MODEL_OUTPUT_OP_NAME], ShapeIgnoringTensorMetaComparator())\n if self.scopes_without_shape_matching:\n self._compressed_context.add_node_comparators(scopes_without_shape_matching,\n ShapeIgnoringTensorMetaComparator())\n self._load_listener = None\n\n @debuggable_forward\n def forward(self, *args, **kwargs):\n with self._compressed_context as ctx: # type: TracingContext\n ctx.base_module_thread_local_replica = self\n args, kwargs = replicate_same_tensors((args, kwargs))\n if not self._in_user_dummy_forward:\n # If a user supplies own dummy forward, he is responsible for\n # correctly wrapping inputs inside it as well.\n args, kwargs = self._strip_traced_tensors(args, kwargs)\n args, kwargs = self._wrap_inputs_fn(args, kwargs)\n retval = self.get_nncf_wrapped_model()(*args, **kwargs)\n retval = replicate_same_tensors(retval)\n if not self._in_user_dummy_forward:\n retval = self._wrap_outputs_fn(retval)\n\n if self._kd_loss_handler is not None and self.get_nncf_wrapped_model().training:\n self._kd_loss_handler(retval, *args, **kwargs)\n return retval\n\n def _strip_traced_tensors(self, args: Tuple, kwargs: Dict) -> Tuple[Tuple, Dict]:\n \"\"\"\n Required to guard against new forward calls on tensors that have already passed\n through NNCF's forward once and got turned into TracedTensors by reference access.\n \"\"\"\n is_traced_tensor_predicate = lambda x: isinstance(x, TracedTensor)\n\n def strip_fn(tensor: TracedTensor) -> torch.Tensor:\n if hasattr(torch.Tensor, 'as_subclass'):\n return torch.Tensor.as_subclass(tensor, torch.Tensor)\n # Torch < 1.7.0 fallback\n return torch.tensor(tensor, device=tensor.device, requires_grad=tensor.requires_grad)\n\n args = objwalk(args, is_traced_tensor_predicate, strip_fn)\n kwargs = objwalk(kwargs, is_traced_tensor_predicate, strip_fn)\n return args, kwargs\n\n def create_knowledge_distillation_loss_handler(self, kd_original_model: nn.Module, calculate_fn)\\\n -> KnowledgeDistillationLossHandler:\n \"\"\"\n Creates KnowledgeDistillationLossHandler instance for enabling Knowledge Distillation feature.\n Also returns created KnowledgeDistillationLossHandler for control over Knowledge Distillation logic.\n\n :param kd_original_model: original non compressed model used for distillation\n :param calculate_fn: function used to parse model outputs and calculate knowledge distillation loss\n :return: KnowledgeDistillationLossHandler instance\n \"\"\"\n device = next(self.get_nncf_wrapped_model().parameters()).device\n self._kd_loss_handler = KnowledgeDistillationLossHandler(self._compressed_context,\n kd_original_model,\n calculate_fn,\n device)\n return self._kd_loss_handler\n\n # Cannnot use property syntax here, otherwise the wrapped module will end up\n # being twice in the same checkpoint with different prefixes\n def get_nncf_wrapped_model(self):\n return getattr(self, MODEL_WRAPPED_BY_NNCF_ATTR_NAME)\n\n def _set_nncf_wrapped_model(self, value):\n setattr(self, MODEL_WRAPPED_BY_NNCF_ATTR_NAME, value)\n\n def get_clean_shallow_copy(self) -> 'NNCFNetwork':\n # WARNING: Will reset pre- and post-ops of the underlying model. Use save_nncf_module_additions\n # and load_nncf_module_additions to preserve these, or temporary_clean_view().\n from nncf.torch.utils import save_module_state, load_module_state\n saved_state = save_module_state(self)\n model_copy = NNCFNetwork(self.get_nncf_wrapped_model(), self.input_infos,\n self._user_dummy_forward_fn, self._wrap_inputs_fn,\n self.scopes_without_shape_matching, self.ignored_scopes, self.target_scopes,\n reset=True)\n load_module_state(model_copy, saved_state)\n return model_copy\n\n def get_modules_in_nncf_modules_by_type(self, types) -> Dict[Scope, nn.Module]:\n nncf_modules = self.get_nncf_modules()\n retval = {}\n for nncf_module_scope, nncf_module in nncf_modules.items():\n nncf_module_scope.pop()\n for relative_scope, target_module in get_all_modules_by_type(nncf_module, types).items():\n retval[nncf_module_scope + relative_scope] = target_module\n return retval\n\n def insert_at_point(self, point: PTInsertionPoint, fn_list: List[Callable]):\n if point.insertion_type == PTInsertionType.OPERATOR_PRE_HOOK:\n self._compressed_context.register_pre_hooks(fn_list, point.op_address, point.input_port_id)\n elif point.insertion_type == PTInsertionType.OPERATOR_POST_HOOK:\n self._compressed_context.register_post_hooks(fn_list, point.op_address)\n elif point.insertion_type in [PTInsertionType.NNCF_MODULE_PRE_OP,\n PTInsertionType.NNCF_MODULE_POST_OP]:\n norm_target_scope = self._normalize_variable_recurrent_scope(point.module_scope)\n norm_nncf_scopes = [self._normalize_variable_recurrent_scope(x) for x in self._nncf_module_scopes]\n assert norm_target_scope in norm_nncf_scopes # Required for proper Recurrent/VariableRecurrent addressing\n nncf_module = self.get_module_by_scope(point.module_scope)\n if point.insertion_type == PTInsertionType.NNCF_MODULE_PRE_OP:\n for fn in fn_list:\n nncf_module.register_pre_forward_operation(fn)\n elif point.insertion_type == PTInsertionType.NNCF_MODULE_POST_OP:\n for fn in fn_list:\n nncf_module.register_post_forward_operation(fn)\n else:\n raise RuntimeError(\"Unsupported insertion type: {}\".format(point.insertion_type))\n\n def __getattr__(self, name):\n wrapped_module = super().__getattr__(MODEL_WRAPPED_BY_NNCF_ATTR_NAME)\n if hasattr(wrapped_module, name):\n return getattr(wrapped_module, name)\n return super().__getattr__(name)\n\n def get_graph(self) -> PTNNCFGraph:\n if self._compressed_context.graph.get_nodes_count() == 0 or self._compressed_graph is None:\n self.rebuild_graph()\n return self._compressed_graph\n\n def get_dynamic_graph(self) -> DynamicGraph:\n return self._compressed_context.graph\n\n def get_original_graph(self) -> PTNNCFGraph:\n return self._original_graph\n\n def get_tracing_context(self) -> TracingContext:\n return self._compressed_context\n\n def enable_dynamic_graph_building(self):\n self._compressed_context.enable_node_additions()\n\n def disable_dynamic_graph_building(self):\n self._compressed_context.disable_node_additions()\n\n def _get_dummy_forward_fn_for_graph_building(self, with_input_tracing, with_output_tracing):\n if self._user_dummy_forward_fn is None:\n return create_dummy_forward_fn(self.input_infos,\n with_input_tracing=with_input_tracing,\n wrap_inputs_fn=self._wrap_inputs_fn,\n wrap_outputs_fn=self._wrap_outputs_fn,\n with_output_tracing=with_output_tracing)\n\n def wrapped_user_dummy_forward_fn(*args, **kwargs):\n self._in_user_dummy_forward = True\n retval = self._user_dummy_forward_fn(*args, **kwargs)\n self._in_user_dummy_forward = False\n return retval\n\n return wrapped_user_dummy_forward_fn\n\n def _replace_modules_by_nncf_modules(self, device, eval_only_op_scopes: List[Scope] = None,\n reset: bool = False):\n module, self._nncf_module_scopes = replace_modules_by_nncf_modules(\n self.get_nncf_wrapped_model(), ignored_scopes=self.ignored_scopes,\n target_scopes=self.target_scopes, eval_op_scopes=eval_only_op_scopes,\n reset=reset)\n self._set_nncf_wrapped_model(module.to(device))\n\n def get_nncf_module_scopes(self) -> List[Scope]:\n return self._nncf_module_scopes\n\n def get_nncf_modules(self) -> Dict[Scope, torch.nn.Module]:\n nncf_module_names_list = NNCF_MODULES + [x.__name__ for x in NNCF_WRAPPED_USER_MODULES_DICT.values()]\n return get_all_modules_by_type(self.get_nncf_wrapped_model(), nncf_module_names_list)\n\n def get_weighted_original_graph_nodes(self, nncf_module_names: List[str] = None) -> List[NNCFNode]:\n retval = []\n for nncf_module_scope in self._nncf_module_scopes:\n if nncf_module_names is not None:\n module_name = nncf_module_scope[-1].calling_module_class_name\n if module_name not in nncf_module_names:\n continue\n nodes_in_scope = self._original_graph.get_op_nodes_in_scope(nncf_module_scope)\n for node in nodes_in_scope:\n if node.layer_attributes is not None: # TODO(vshampor): implement more explicit filtering\n retval.append(node)\n return retval\n\n def get_nncf_modules_by_module_names(self, nncf_module_names_list: List[str]) -> Dict[\"Scope\", torch.nn.Module]:\n return get_all_modules_by_type(self.get_nncf_wrapped_model(), nncf_module_names_list)\n\n def rebuild_graph(self, *input_args):\n self._compressed_context.reset_graph()\n dummy_forward_fn = self._get_dummy_forward_fn_for_graph_building(with_input_tracing=False,\n with_output_tracing=False)\n builder = GraphBuilder(dummy_forward_fn)\n self._compressed_graph = builder.build_graph(self, self._compressed_context,\n input_infos=self.input_infos)\n\n def post_build_graph_actions(self):\n # Reset initialization flags (`initialized`) for all quantization modules\n # after dummy `load_state_dict` call.\n quantization_types = [class_type.__name__ for class_type in QUANTIZATION_MODULES.registry_dict.values()]\n all_quantizations = get_state_dict_names_with_modules(self, quantization_types)\n for module in all_quantizations.values():\n module.initialized = False\n\n def is_scope_in_nncf_module_scope(self, scope: Scope):\n # TODO: optimize\n norm_nncf_scopes = [self._normalize_variable_recurrent_scope(x) for x in self._nncf_module_scopes]\n norm_op_scope = self._normalize_variable_recurrent_scope(scope)\n for nncf_scope in norm_nncf_scopes:\n if norm_op_scope in nncf_scope:\n return True\n return False\n\n def register_compression_module_type(self, compression_module_type: ExtraCompressionModuleType):\n attr_name = self._compression_module_type_to_attr_name(compression_module_type)\n if compression_module_type in self._extra_module_types:\n raise RuntimeError(\"Module type {} is already registered\".format(compression_module_type))\n self.__setattr__(attr_name, nn.ModuleDict())\n self._extra_module_types.append(compression_module_type)\n\n def add_compression_module(self, module_key: str, module: nn.Module,\n compression_module_type: ExtraCompressionModuleType):\n attr_name = self._compression_module_type_to_attr_name(compression_module_type)\n if compression_module_type not in self._extra_module_types:\n raise RuntimeError(\"Module type {} was not registered\".format(compression_module_type))\n storage = self.__getattr__(attr_name)\n if module_key in storage:\n raise RuntimeError(\"Module {} is already registered under {}\".format(module_key, attr_name))\n storage[module_key] = module\n\n def get_compression_modules_by_type(self, compression_module_type: ExtraCompressionModuleType) -> nn.ModuleDict:\n attr_name = self._compression_module_type_to_attr_name(compression_module_type)\n if compression_module_type not in self._extra_module_types:\n raise RuntimeError(\"Module type {} was not registered\".format(compression_module_type))\n return self.__getattr__(attr_name)\n\n @staticmethod\n def _compression_module_type_to_attr_name(compression_module_type: ExtraCompressionModuleType):\n \"\"\"\n Required for backward compatibility with checkpoints that store function and activation\n quantizers directly under corresponding attributes of NNCFNetwork.\n \"\"\"\n if compression_module_type == ExtraCompressionModuleType.EXTERNAL_QUANTIZER:\n return EXTERNAL_QUANTIZERS_STORAGE_NAME\n raise RuntimeError(\"Unknown extra module type\")\n\n def sort_compression_modules(self, compression_module_type: ExtraCompressionModuleType):\n attr_name = self._compression_module_type_to_attr_name(compression_module_type)\n if compression_module_type not in self._extra_module_types:\n raise RuntimeError(\"Module type {} was not registered\".format(compression_module_type))\n module_dict = self.__getattr__(attr_name)\n # pylint: disable=protected-access\n module_dict._modules = OrderedDict(sorted(module_dict._modules.items()))\n self.__setattr__(attr_name, module_dict)\n\n @staticmethod\n def _normalize_variable_recurrent_scope(scope: Scope):\n \"\"\"\n Two scopes pointing to an NNCF module that only differ in a Recurrent/VariableRecurrent/VariableRecurrentReverse\n scope node actually point to one and the same module.\n \"\"\"\n ret_scope = scope.copy()\n for scope_element in ret_scope:\n if scope_element.calling_module_class_name in [\"Recurrent\", \"VariableRecurrent\",\n \"VariableRecurrentReverse\"]:\n scope_element.calling_module_class_name = \"NormalizedName_Recurrent\"\n return ret_scope\n\n def do_dummy_forward(self, force_eval=False):\n \"\"\"\n Attention: If run with force_eval=False, this may spoil the batchnorm statistics,\n and an eval run of the model will perform much worse than the train run.\n \"\"\"\n if force_eval:\n train_mode = self.training\n self.eval()\n with torch.no_grad():\n with self._compressed_context as ctx:\n ctx.base_module_thread_local_replica = self\n self._dummy_forward_fn(self)\n if force_eval:\n if train_mode:\n self.train()\n\n def get_insertion_point_graph(self) -> InsertionPointGraph:\n # Set up a pre- and post-hooks on almost every op in PyTorch\n nncf_graph = self.get_original_graph()\n pre_hooks = [] # type: List[PreHookInsertionPoint]\n post_hooks = [] # type: List[PostHookInsertionPoint]\n for node in nncf_graph.get_all_nodes():\n # Pre-hook insertion point nodes\n # Will insert a pre-hook IP for each input edge. The input edge must be marked with\n # a port ID attribute.\n in_edges = nncf_graph.get_input_edges(node)\n for edge in in_edges:\n port_id = edge.input_port_id\n pre_hook_ip = PreHookInsertionPoint(target_node_name=node.node_name,\n input_port_id=port_id)\n pre_hooks.append(pre_hook_ip)\n\n if issubclass(node.metatype, SplitMetatype):\n # chunk returns a tuple of tensors, which can only be handled in NNCF\n # once post-hook ports are enabled. Work around it for now by disallowing post-hook\n # insertion for chunks\n # TODO: enable post-hook ports and remove this\n continue\n\n # Post-hook insertion point nodes\n post_hook_ip = PostHookInsertionPoint(node.node_name)\n post_hooks.append(post_hook_ip)\n\n weighted_nodes = self.get_weighted_original_graph_nodes()\n weighted_node_names = [weighted_node.node_name for weighted_node in weighted_nodes]\n\n ip_graph = InsertionPointGraph(self._original_graph, weight_modifiable_node_names=weighted_node_names,\n allowed_pre_hook_insertion_points=pre_hooks,\n allowed_post_hook_insertion_points=post_hooks)\n return ip_graph\n\n def get_module_by_scope(self, scope: Scope) -> Optional[torch.nn.Module]:\n curr_module = self.get_nncf_wrapped_model()\n for scope_element in scope[1:]: # omit first scope element which corresponds to base module\n if scope_element.calling_field_name is None:\n # The module used is being created in-place every time and never stored in the model,\n # happens for nn.Softmax in BERT implementations.\n return None\n # pylint: disable=protected-access\n next_module = curr_module._modules.get(scope_element.calling_field_name)\n if next_module is None:\n raise RuntimeError(\"Could not find a {} module member in {} module of scope {} during node search\"\n .format(scope_element.calling_field_name,\n scope_element.calling_module_class_name,\n str(scope)))\n curr_module = next_module\n return curr_module\n\n def get_containing_module(self, node_name: NNCFNodeName) -> torch.nn.Module:\n if self._compressed_graph is not None:\n try:\n scope = self._compressed_graph.get_scope_by_node_name(node_name)\n except RuntimeError:\n nncf_logger.debug(\"Node {} not found in compressed graph when trying to determine containing module, \"\n \"trying the original graph to see if the node was present there \"\n \"during graph building\")\n scope = self._original_graph.get_scope_by_node_name(node_name)\n else:\n scope = self._original_graph.get_scope_by_node_name(node_name)\n return self.get_module_by_scope(scope)\n\n def get_parameters_count_in_model(self):\n \"\"\"\n Return total amount of model parameters.\n \"\"\"\n count = 0\n for param in self.parameters():\n count = count + param.numel()\n return count\n\n def get_flops_per_module(self) -> Dict[NNCFNodeName, int]:\n \"\"\"\n Calculates FLOPS count for modules.\n \"\"\"\n model = self\n flops_count_dict = {}\n\n def get_hook(name):\n return functools.partial(compute_FLOPs_hook, dict_to_save=flops_count_dict,\n module_node_name=name)\n\n hook_list = []\n for nncf_node in self._original_graph.get_all_nodes():\n node_module = self.get_containing_module(nncf_node.node_name)\n hook_list.append(node_module.register_forward_hook(get_hook(nncf_node.node_name)))\n model.do_dummy_forward(force_eval=True)\n\n for h in hook_list:\n h.remove()\n return flops_count_dict\n\n def get_MACs_in_model(self):\n \"\"\"\n Calculates MAC units count for model.\n \"\"\"\n flops_count_dict = self.get_flops_per_module()\n total_MACs_count = sum(v // 2 for v in flops_count_dict.values())\n return total_MACs_count\n\n def get_input_infos(self) -> List[ModelInputInfo]:\n return deepcopy(self.input_infos)\n\n def save_nncf_module_additions(self) -> Dict[Scope, Tuple[torch.nn.ModuleDict, torch.nn.ModuleDict]]:\n retval = {}\n for module_scope, nncf_module in self.get_nncf_modules().items():\n retval[module_scope] = (deepcopy(nncf_module.pre_ops), deepcopy(nncf_module.post_ops))\n return retval\n\n def load_nncf_module_additions(self,\n scope_vs_pre_post_ops_dict: Dict[Scope, Tuple[torch.nn.ModuleDict,\n torch.nn.ModuleDict]]):\n for module_scope, nncf_module in self.get_nncf_modules().items():\n nncf_module.pre_ops = scope_vs_pre_post_ops_dict[module_scope][0]\n nncf_module.post_ops = scope_vs_pre_post_ops_dict[module_scope][1]\n\n def temporary_clean_view(self):\n class Mgr:\n def __init__(self, model: NNCFNetwork):\n self.model = model\n self.storage_dict = {}\n\n def __enter__(self):\n self.storage_dict = self.model.save_nncf_module_additions()\n clean_model = self.model.get_clean_shallow_copy()\n return clean_model\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.model.load_nncf_module_additions(self.storage_dict)\n\n return Mgr(self)\n\n def _collect_eval_only_op_scopes(self, model: nn.Module, dummy_forward_fn: Callable) -> List[Scope]:\n \"\"\"\n Returns scopes of the modules which are executed in evaluation mode only.\n \"\"\"\n\n tracer = GraphTracer(dummy_forward_fn)\n result = []\n eval_graph = tracer.trace_graph(model, as_eval=True)\n for dyn_graph_node in eval_graph.get_all_nodes():\n result.append(dyn_graph_node.op_exec_context.scope_in_model)\n return result\n\n @property\n def original_model_accuracy(self):\n return self._original_model_accuracy\n\n def get_node_to_op_address_mapping(self) -> Dict[NNCFNodeName, OperationAddress]:\n # The IDs of corresponding nodes of the original dynamic graph and original NNCF graph\n # must be equal for this to work.\n retval = {}\n for node in self._original_dynamic_graph.get_all_nodes():\n node_id = node.node_id\n op_address = node.op_exec_context.op_address\n nncf_node = self._original_graph.get_node_by_id(node_id)\n retval[nncf_node.node_name] = op_address\n return retval\n\n\nclass PTModelTransformer(ModelTransformer):\n def __init__(self, model: NNCFNetwork):\n super().__init__(model)\n self._node_to_op_address_mapping = model.get_node_to_op_address_mapping()\n\n def transform(self, transformation_layout: PTTransformationLayout) -> NNCFNetwork:\n fns_grouped_by_points = {} # type: Dict[PTInsertionPoint, List[Tuple[Callable, TransformationPriority]]]\n for transformation_command in transformation_layout.transformations: # type: PTInsertionCommand\n target_point = transformation_command.target_point # type: PTTargetPoint\n target_node_name = target_point.target_node_name\n pt_ip = PTInsertionPoint(target_type=target_point.target_type,\n op_address=self._node_to_op_address_mapping[target_node_name],\n input_port_id=target_point.input_port_id)\n fn = transformation_command.fn\n if target_point.type is TargetType.OPERATION_WITH_WEIGHTS:\n fn = UpdateWeight(fn)\n tup = (fn, transformation_command.priority)\n if pt_ip not in fns_grouped_by_points:\n fns_grouped_by_points[pt_ip] = [tup]\n else:\n fns_grouped_by_points[pt_ip].append(tup)\n\n for pt_ip, fn_list_with_priority in fns_grouped_by_points.items():\n fn_list_with_priority = sorted(fn_list_with_priority, key=lambda x: x[1])\n self._model.insert_at_point(pt_ip, [x[0] for x in fn_list_with_priority])\n return self._model\n", "\"\"\"\n Copyright (c) 2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom functools import partial\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Optional\n\nimport tensorflow as tf\n\nfrom nncf.common.quantization.structs import QuantizationMode\nfrom nncf.common.quantization.structs import QuantizerConfig\nfrom nncf.common.quantization.structs import QuantizerSpec\nfrom nncf.tensorflow.layers.custom_objects import NNCF_CUSTOM_OBJECTS\nfrom nncf.tensorflow.layers.custom_objects import NNCF_QUANTIZATION_OPERATONS\nfrom nncf.tensorflow.layers.data_layout import get_channel_axis\nfrom nncf.tensorflow.layers.data_layout import get_channel_size\nfrom nncf.tensorflow.layers.operation import NNCFOperation\nfrom nncf.tensorflow.quantization.functions import asymmetric_quantize\nfrom nncf.tensorflow.quantization.functions import symmetric_quantize\n\n\nclass TFQuantizerSpec(QuantizerSpec):\n def __init__(self, num_bits: int,\n mode: QuantizationMode,\n signedness_to_force: Optional[bool],\n narrow_range: bool,\n half_range: bool,\n per_channel: bool):\n super().__init__(num_bits, mode, signedness_to_force, narrow_range, half_range)\n self.per_channel = per_channel\n\n @classmethod\n def from_config(cls, qconfig: QuantizerConfig, narrow_range: bool, half_range: bool) -> 'TFQuantizerSpec':\n return cls(qconfig.num_bits,\n qconfig.mode,\n qconfig.signedness_to_force,\n narrow_range,\n half_range,\n qconfig.per_channel)\n\n def get_state(self) -> Dict[str, Any]:\n \"\"\"\n Returns a dictionary with Python data structures (dict, list, tuple, str, int, float, True, False, None) that\n represents state of the object.\n\n :return: state of the object\n \"\"\"\n return {\n 'num_bits': self.num_bits,\n 'mode': self.mode,\n 'signedness_to_force': self.signedness_to_force,\n 'narrow_range': self.narrow_range,\n 'half_range': self.half_range,\n 'per_channel': self.per_channel\n }\n\n @classmethod\n def from_state(cls, state: Dict[str, Any]) -> 'TFQuantizerSpec':\n \"\"\"\n Creates the object from its state.\n\n :param state: Output of `get_state()` method.\n \"\"\"\n return cls(**state)\n\n\nclass Quantizer(NNCFOperation):\n \"\"\"\n Base class for all NNCF quantization operations.\n \"\"\"\n def __init__(self, name: str):\n \"\"\"\n Initializes internal NNCF quantization operation state.\n\n :param name: Unique operation name in algorithm scope.\n \"\"\"\n super().__init__(name)\n self.enabled = True\n self._eps = 1e-16\n self._pre_processing_fn = self._make_pre_processing_fn()\n self._post_processing_fn = self._make_post_processing_fn()\n\n @property\n def mode(self) -> str:\n \"\"\"\n Returns mode of the quantization (symmetric or asymmetric).\n\n :return: The mode of the quantization.\n \"\"\"\n raise NotImplementedError\n\n def call(self, inputs, weights, training):\n \"\"\"\n The method applies quantization to the input tensor if the quantizer is enabled,\n otherwise, if the quantizer is disabled, the method returns the input tensor as is.\n\n :param inputs: Input tensor.\n :param weights: Quantizer's weights.\n :param training: True if operation called in training mode else False\n :return: Output tensor.\n \"\"\"\n if not self.enabled:\n return inputs\n transformed = self._pre_processing_fn(inputs)\n quantized = self.quantize(transformed, weights, training)\n outputs = self._post_processing_fn(quantized)\n return outputs\n\n def quantize(self, inputs, weights, training):\n \"\"\"\n Apply quantization to the input tensor.\n\n :param inputs: Input tensor.\n :param weights: Quantizer's weights.\n :param training: True if operation called in training mode else False\n :return: Quantized tensor.\n \"\"\"\n raise NotImplementedError\n\n def apply_range_initialization(self, weights, min_values, max_values, min_range=0.1, eps=0.01):\n \"\"\"\n Initialize quantizer parameters using minimum and maximum weight values.\n\n :param weights: Quantizer's weights.\n :param min_values: Minimum weight values.\n :param max_values: Maximum weight values.\n :param min_range: Minimum range.\n :param eps: Smoothing coefficient for ranges: min_range = maximum(min_range, eps * max_range).\n \"\"\"\n raise NotImplementedError\n\n def setup_input_transformation(self, input_shape, input_type, input_name, layer):\n \"\"\"\n Setup input transformation that the per-channel quantization can be applied to input tensor.\n The TensorFlow fake_quant_with_min_max_vars_per_channel supports only inputs tensor one of\n the shapes: [d], [b, d] [b, h, w, d]. For this reason, Quantizer transforms any inputs tensor\n to one of the supported shapes, then quantizes and then transforms quantized tensor to\n the original inputs shape.\n\n :param input_shape: Shape of the input.\n :param input_type: Type of the input identifies that inputs are layer weights\n or inputs of the layer.\n :param input_name: Input name.\n :param layer: Layer, where the Quantizer is registered.\n \"\"\"\n self._pre_processing_fn, self._post_processing_fn = \\\n self._make_transformation_fns(input_shape, input_type, input_name, layer)\n\n def _make_transformation_fns(self, input_shape, input_type, input_name, layer):\n channel_axes = get_channel_axis(input_type, input_name, layer)\n\n fns_registry = []\n if isinstance(channel_axes, (tuple, list)):\n switch_counter = 0\n accumulate = False\n new_shape = []\n new_channel_axes = None\n for axis, val in enumerate(input_shape):\n if axis in channel_axes:\n if accumulate:\n new_shape[-1] *= val\n else:\n accumulate = True\n new_channel_axes = len(new_shape)\n new_shape.append(val)\n switch_counter += 1\n else:\n accumulate = False\n new_shape.append(val)\n if switch_counter > 1:\n raise NotImplementedError(\n 'Quntizer could not transform input to apply per-channel quantization: '\n 'input shape {}, input type {}, input name {}, channel_axes {} '\n 'from layer {}'.format(\n input_shape, input_type, input_name, channel_axes, layer.name))\n forward_params = {'shape': new_shape}\n backward_params = {'shape': input_shape}\n fns_registry.append((tf.reshape, forward_params, backward_params))\n input_shape = new_shape\n channel_axes = new_channel_axes\n\n ndims = len(input_shape)\n if channel_axes % ndims != ndims - 1:\n perm = [i for i, _ in enumerate(input_shape)]\n perm[channel_axes], perm[-1] = perm[-1], perm[channel_axes]\n params = {'perm': perm}\n fns_registry.append((tf.transpose, params, params))\n new_shape = list(input_shape)\n new_shape[channel_axes], new_shape[-1] = new_shape[-1], new_shape[channel_axes]\n input_shape = new_shape\n\n if ndims not in [1, 2, 4]:\n size = 1\n for val in input_shape[:-1]:\n size *= val\n forward_params = {'shape': [size, input_shape[-1]]}\n backward_params = {'shape': input_shape}\n fns_registry.append((tf.reshape, forward_params, backward_params))\n\n def fuse_functions(fns_registry):\n if not fns_registry:\n return fns_registry\n\n fused_fns_registry = []\n fn1 = fns_registry[0]\n for fn2 in fns_registry[1:]:\n if fn1[0] == fn2[0] == tf.reshape:\n fn1 = (tf.reshape, fn2[1], fn1[2])\n else:\n fused_fns_registry.append(fn1)\n fn1 = fn2\n fused_fns_registry.append(fn1)\n return fused_fns_registry\n\n fused_fns_registry = fuse_functions(fns_registry)\n return self._make_pre_processing_fn(fused_fns_registry), self._make_post_processing_fn(fused_fns_registry)\n\n @staticmethod\n def _make_pre_processing_fn(fns_registry=None):\n fns_list = []\n if fns_registry is None:\n fns_registry = []\n for fn in fns_registry:\n fns_list.append(partial(fn[0], **fn[1]))\n\n def pre_processing_fn(inputs):\n result = inputs\n for func in fns_list:\n result = func(result)\n return result\n\n return pre_processing_fn\n\n @staticmethod\n def _make_post_processing_fn(fns_registry=None):\n fns_list = []\n if fns_registry is None:\n fns_registry = []\n for fn in reversed(fns_registry):\n fns_list.append(partial(fn[0], **fn[2]))\n\n def post_processing_fn(inputs):\n result = inputs\n for func in fns_list:\n result = func(result)\n return result\n\n return post_processing_fn\n\n @staticmethod\n def _min_adj(bits, low, range_len, narrow_range):\n quants_count = 2 ** bits - (2 if narrow_range else 1)\n return range_len / quants_count * tf.round(quants_count * low / range_len)\n\n def get_quantizer_config(self) -> QuantizerConfig:\n \"\"\"\n Used to get a current quantizer state in terms of QuantizerConfig objects.\n\n :return: A QuantizerConfig struct that corresponds to current state of the quantizer.\n \"\"\"\n raise NotImplementedError\n\n def get_config(self):\n raise NotImplementedError\n\n\n@NNCF_CUSTOM_OBJECTS.register()\n@NNCF_QUANTIZATION_OPERATONS.register(QuantizationMode.SYMMETRIC)\nclass SymmetricQuantizer(Quantizer):\n def __init__(self, name: str, qspec: TFQuantizerSpec):\n super().__init__(name)\n self.num_bits = qspec.num_bits\n self.per_channel = qspec.per_channel\n self.narrow_range = qspec.narrow_range\n self.signedness_to_force = qspec.signedness_to_force\n self._half_range = qspec.half_range\n\n @property\n def half_range(self):\n return self._half_range\n\n @property\n def mode(self) -> str:\n return QuantizationMode.SYMMETRIC\n\n def signed(self, op_weights) -> bool:\n \"\"\"\n Returns `True` for signed quantization, `False` for unsigned.\n\n :return: `True` for signed quantization, `False` for unsigned.\n \"\"\"\n signed_var = op_weights['signed_var']\n return signed_var.numpy() < 0.0\n\n def build(self, input_shape, input_type, name, layer):\n shape = None\n if self.per_channel:\n self.setup_input_transformation(input_shape, input_type, name, layer)\n shape = (get_channel_size(input_shape, input_type, name, layer),)\n\n scale = layer.add_weight(\n name + '_scale',\n shape=shape,\n initializer=tf.keras.initializers.Constant(1.0),\n trainable=True)\n signed = layer.add_weight(\n name + '_signed',\n initializer=tf.keras.initializers.Constant(\n -1.0 if self.signedness_to_force in (True, None) else 0.0),\n trainable=False)\n return {\n 'scale_var': scale,\n 'signed_var': signed\n }\n\n def apply_saturation_fix(self, weights):\n if self.num_bits != 8 or not self._half_range:\n raise RuntimeError('Attempt to apply saturation issue fix '\n 'to quantizer which is not configured for that.')\n\n # Multiplier to expand scale from 7 bit to 8 bit\n multiplier = 127 / 63 if self.narrow_range else 255 / 127\n weights['scale_var'].assign(multiplier * weights['scale_var'])\n self._eps *= multiplier\n self._half_range = False\n\n def quantize(self, inputs, weights, _):\n def _half_range_quantize():\n return symmetric_quantize(\n inputs,\n weights['scale_var'],\n weights['signed_var'],\n num_bits=self.num_bits - 1,\n per_channel=self.per_channel,\n narrow_range=self.narrow_range,\n eps=self._eps\n )\n\n def _default_quantize():\n return symmetric_quantize(\n inputs,\n weights['scale_var'],\n weights['signed_var'],\n num_bits=self.num_bits,\n per_channel=self.per_channel,\n narrow_range=self.narrow_range,\n eps=self._eps\n )\n\n if self._half_range:\n return _half_range_quantize()\n\n return _default_quantize()\n\n def apply_range_initialization(self, weights, min_values, max_values, min_range=0.1, eps=0.01):\n if self.signedness_to_force is None:\n sign = tf.reduce_any(tf.less(min_values, 0))\n weights['signed_var'].assign(-1.0 if sign else 0.0)\n ranges = tf.maximum(tf.abs(max_values), tf.abs(min_values))\n max_range = tf.reduce_max(ranges)\n lower_threshold = tf.maximum(eps * max_range, min_range)\n scale = tf.maximum(ranges, lower_threshold)\n weights['scale_var'].assign(scale)\n\n def get_quantizer_config(self) -> QuantizerConfig:\n return QuantizerConfig(\n num_bits=self.num_bits,\n mode=QuantizationMode.SYMMETRIC,\n signedness_to_force=self.signedness_to_force,\n per_channel=self.per_channel\n )\n\n def get_config(self):\n qspec_dict = {\n 'num_bits': self.num_bits,\n 'mode': QuantizationMode.SYMMETRIC,\n 'signedness_to_force': self.signedness_to_force,\n 'narrow_range': self.narrow_range,\n 'half_range': self._half_range,\n 'per_channel': self.per_channel,\n }\n config = {\n 'quantizer_spec': qspec_dict,\n 'name': self.name,\n }\n return config\n\n @classmethod\n def from_config(cls, config):\n qspec_dict = config['quantizer_spec']\n qspec = TFQuantizerSpec(num_bits=qspec_dict['num_bits'],\n mode=QuantizationMode.SYMMETRIC,\n signedness_to_force=qspec_dict['signedness_to_force'],\n narrow_range=qspec_dict['narrow_range'],\n half_range=qspec_dict['half_range'],\n per_channel=qspec_dict['per_channel'])\n name = config['name']\n return cls(name, qspec)\n\n\n\n@NNCF_CUSTOM_OBJECTS.register()\n@NNCF_QUANTIZATION_OPERATONS.register(QuantizationMode.ASYMMETRIC)\nclass AsymmetricQuantizer(Quantizer):\n def __init__(self, name: str, qspec: TFQuantizerSpec):\n super().__init__(name)\n self.num_bits = qspec.num_bits\n self.narrow_range = qspec.narrow_range\n self.per_channel = qspec.per_channel\n self._half_range = qspec.half_range\n\n @property\n def half_range(self):\n return self._half_range\n\n @property\n def mode(self) -> str:\n return QuantizationMode.ASYMMETRIC\n\n def build(self, input_shape, input_type, name, layer):\n shape = None\n if self.per_channel:\n self.setup_input_transformation(input_shape, input_type, name, layer)\n shape = (get_channel_size(input_shape, input_type, name, layer),)\n\n input_low = layer.add_weight(\n name + '_input_low',\n shape=shape,\n initializer=tf.keras.initializers.Constant(0.0),\n trainable=True)\n input_range = layer.add_weight(\n name + '_input_range',\n shape=shape,\n initializer=tf.keras.initializers.Constant(1.0),\n trainable=True)\n return {\n 'input_low_var': input_low,\n 'input_range_var': input_range\n }\n\n def apply_saturation_fix(self, weights):\n if self.num_bits != 8 or not self._half_range:\n raise RuntimeError('Attempt to apply saturation issue fix '\n 'to quantizer which is not configured for that.')\n\n # Low value shift to expand quantize range from 7 bit to 8 bit properly\n weights['input_low_var'].assign(weights['input_low_var'] + self._min_adj(\n 7, weights['input_low_var'],\n weights['input_range_var'] + self._eps,\n self.narrow_range))\n # Multiplier to expand scale from 7 bit to 8 bit\n multiplier = 127 / 63 if self.narrow_range else 255 / 127\n weights['input_range_var'].assign(multiplier * weights['input_range_var'])\n self._eps *= multiplier\n self._half_range = False\n\n def quantize(self, inputs, weights, _):\n def _half_range_quantize():\n return asymmetric_quantize(\n inputs,\n weights['input_low_var'],\n weights['input_range_var'],\n num_bits=self.num_bits - 1,\n per_channel=self.per_channel,\n narrow_range=self.narrow_range,\n eps=self._eps\n )\n\n def _default_quantize():\n return asymmetric_quantize(\n inputs,\n weights['input_low_var'],\n weights['input_range_var'],\n num_bits=self.num_bits,\n per_channel=self.per_channel,\n narrow_range=self.narrow_range,\n eps=self._eps\n )\n\n if self._half_range:\n return _half_range_quantize()\n\n return _default_quantize()\n\n def apply_range_initialization(self, weights, min_values, max_values, min_range=0.1, eps=0.01):\n ranges = max_values - min_values\n max_range = tf.reduce_max(ranges)\n lower_threshold = tf.maximum(eps * max_range, min_range)\n correction = (tf.maximum(ranges, lower_threshold) - ranges) * 0.5\n input_low = min_values - correction\n input_range = ranges + 2 * correction\n weights['input_low_var'].assign(input_low)\n weights['input_range_var'].assign(input_range)\n\n def get_quantizer_config(self) -> QuantizerConfig:\n return QuantizerConfig(\n num_bits=self.num_bits,\n mode=QuantizationMode.ASYMMETRIC,\n signedness_to_force=None,\n per_channel=self.per_channel\n )\n\n def get_config(self):\n qspec_dict = {\n 'num_bits': self.num_bits,\n 'mode': QuantizationMode.ASYMMETRIC,\n 'signedness_to_force': None,\n 'narrow_range': self.narrow_range,\n 'half_range': self._half_range,\n 'per_channel': self.per_channel,\n }\n config = {\n 'quantizer_spec': qspec_dict,\n 'name': self.name,\n }\n return config\n\n @classmethod\n def from_config(cls, config):\n qspec_dict = config['quantizer_spec']\n qspec = TFQuantizerSpec(num_bits=qspec_dict['num_bits'],\n mode=QuantizationMode.ASYMMETRIC,\n signedness_to_force=None,\n narrow_range=qspec_dict['narrow_range'],\n half_range=qspec_dict['half_range'],\n per_channel=qspec_dict['per_channel'])\n name = config['name']\n return cls(name, qspec)\n", "\"\"\"\n Copyright (c) 2019 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport argparse\n\nimport os\nfrom functools import partial\nfrom openvino.inference_engine import IENetwork, IEPlugin, get_version\n\nfrom nncf.config import NNCFConfig\nfrom nncf.torch.dynamic_graph.graph_tracer import create_input_infos\nfrom tools.ir_utils import get_ir_paths\n\n\ndef getExecNet(plugin, net):\n return plugin.load(network=net)\n\n\nargparser = argparse.ArgumentParser()\nargparser.add_argument(\"-m\", \"--model\", help=\"input IR name\", required=True)\nargparser.add_argument(\"--bin\", help=\"Input *.bin file name\")\nargparser.add_argument(\"-o\", \"--output-dir\", help=\"Output directory to dump weights\", required=True)\nargparser.add_argument(\"-c\", \"--config\", type=str, help=\"Model's config\", required=True)\nargparser.add_argument(\"--cuda\", help=\"inference PyTorch model on CUDA\", action='store_true')\nargparser.add_argument('--data', metavar='DIR', help='path to dataset', required=True)\nargparser.add_argument('--cpu-plugin-dir', metavar='DIR',\n help='path to the directory with CPU Plugin and CPU Extension libraries', required=True)\nargparser.add_argument(\"-n\", \"--num-layers\", type=int, default=-1, help=\"Dump activations for given number of layers\")\nargparser.add_argument(\"--dump\", action='store_true', help=\"Enables dump of activations\")\n\nargs = argparser.parse_args()\n\n\ndef validate_torch_model(output_dir, config, num_layers, dump, val_loader=None, cuda=False):\n from tools.debug.common import load_torch_model, register_print_hooks\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n model = load_torch_model(config, cuda)\n\n model_e = model.eval()\n if dump:\n register_print_hooks(output_dir, model_e, num_layers=num_layers, data_to_compare=None, dump_activations=True)\n\n validate_general(val_loader, model_e, infer_pytorch_model, cuda)\n\n\ndef main():\n model_bin, model_xml = get_ir_paths(args.model, args.bin)\n\n config = NNCFConfig.from_json(args.config)\n\n input_infos_list = create_input_infos(config)\n image_size = input_infos_list[0].shape[-1]\n\n size = int(image_size / 0.875)\n\n print('IE version: {}'.format(get_version()))\n\n # NOTE: importing torch after loading IE to plugin to avoid issue with built-in MKLDNN of PyTorch\n plugin = IEPlugin(device='CPU',\n plugin_dirs=args.cpu_plugin_dir)\n plugin.add_cpu_extension(os.path.join(args.cpu_plugin_dir, \"libcpu_extension.so\"))\n net = IENetwork(model=model_xml, weights=model_bin)\n exec_net = getExecNet(plugin, net)\n from torch.utils.data import DataLoader\n import torchvision.datasets as datasets\n import torchvision.transforms as transforms\n\n val_loader = DataLoader(\n datasets.ImageFolder(args.data, transforms.Compose([\n transforms.Resize(size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])),\n batch_size=1, shuffle=False, num_workers=4, pin_memory=True)\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n config['log_dir'] = args.output_dir\n\n infer_fn = partial(infer_ie_model, net=net)\n validate_general(val_loader, exec_net, infer_fn)\n\n validate_torch_model(os.path.join(args.output_dir, \"PTH\"), config=config, num_layers=args.num_layers,\n dump=args.dump, val_loader=val_loader, cuda=args.cuda)\n\n\ndef infer_ie_model(exec_net, inputs, net):\n input_cpu = inputs.numpy()\n input_name = next(iter(net.inputs))\n output_name = next(iter(net.outputs))\n res = exec_net.infer(inputs={input_name: input_cpu})\n output = res[output_name]\n import torch\n torch_output = torch.from_numpy(output)\n return torch_output\n\n\ndef infer_pytorch_model(model, inputs):\n return model(inputs)\n\n\ndef validate_general(val_loader, model, infer_model_fn, cuda=False):\n from examples.torch.classification.main import AverageMeter, accuracy\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n for i, (input_, target) in enumerate(val_loader):\n # compute output\n output = infer_model_fn(model, input_)\n\n if cuda:\n target = target.cuda(None, non_blocking=True)\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n\n top1.update(acc1, input_.size(0))\n top5.update(acc5, input_.size(0))\n\n if i % 10 == 0:\n print('IE Test : [{0}/{1}]\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(val_loader), top1=top1, top5=top5))\n\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n return top1.avg, top5.avg\n\n\nif __name__ == '__main__':\n main()\n", "\"\"\"\n Copyright (c) 2019 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport numpy as np\nimport os\nimport torch\nfrom functools import partial\nfrom torch import nn\n\nfrom examples.torch.common.model_loader import load_model\nfrom nncf.torch.checkpoint_loading import load_state\nfrom nncf.torch.layers import NNCFConv1d, NNCFConv2d, NNCFLinear\nfrom nncf.torch.model_creation import create_compressed_model\n\n\ndef dump_in_out_hook(module, inputs, output):\n dump_out_hook(module, inputs, output)\n dump_path = get_dump_path(module)\n if dump_path:\n key = 0\n output_dir = os.path.abspath(os.path.join(dump_path, os.pardir))\n file_name = os.path.basename(dump_path)\n for input_ in inputs:\n key += 1\n input_data = input_.data.cpu().numpy().flatten()\n dump_name = '.'.join([file_name, \"in\", str(key)])\n npy_path, _ = save_dump(dump_name, output_dir, input_data)\n add_full_dump_path(module, npy_path)\n\n\ndef dump_out_hook(module, inputs, output):\n dump_path = get_dump_path(module)\n if dump_path:\n output_data = output.data.cpu().numpy().flatten()\n output_dir = os.path.abspath(os.path.join(dump_path, os.pardir))\n file_name = os.path.basename(dump_path)\n dump_name = '.'.join([file_name, \"out\"])\n npy_path, _ = save_dump(dump_name, output_dir, output_data, force=False)\n add_full_dump_path(module, npy_path)\n\n\ndef get_dump_path(module):\n if hasattr(module, \"dump_path\"):\n return module.dump_path\n return None\n\n\ndef set_dump_path(layer, path):\n layer.dump_path = path\n\n\ndef add_full_dump_path(layer, full_path):\n if not hasattr(layer, 'dump_full_paths'):\n layer.dump_full_paths = []\n layer.dump_full_paths.append(full_path)\n\n\ndef get_full_dump_paths(layer):\n if hasattr(layer, 'dump_full_paths'):\n return layer.dump_full_paths\n return None\n\n\ndef is_weightable(layer):\n return isinstance(layer, (nn.Conv1d, nn.Conv2d, nn.Linear)) and \\\n not isinstance(layer, (NNCFConv1d, NNCFConv2d, NNCFLinear))\n\n\ndef has_sparse_quant_weights(layer, name):\n from nncf.torch.quantization.layers import SymmetricQuantizer\n from nncf.torch.sparsity.rb.layers import RBSparsifyingWeight\n return (isinstance(layer, RBSparsifyingWeight) and ('sparsified_weight' in name)) or \\\n (isinstance(layer, SymmetricQuantizer) and ('quantized_weight' in name))\n\n\ndef save_dump_(path, ext, saver, data, force=False):\n full_path = '.'.join([path, ext])\n if not os.path.exists(full_path) or force:\n print(\"Saving dump to {}\".format(full_path))\n saver(full_path, data)\n else:\n print(\"Dump already exists \" + full_path)\n return full_path\n\n\ndef save_dump(dump_name, output_dir, data, force=False):\n path = os.path.join(output_dir, dump_name)\n npy_path = save_dump_(path, \"npy\", np.save, data, force)\n txt_path = save_dump_(path, \"txt\", partial(np.savetxt, fmt=\"%s\"), data, force)\n return npy_path, txt_path\n\n\ndef register_print_hooks(path, model, data_to_compare, num_layers=-1, dump_activations=False, prefix='', idx=0):\n for name, children in model.named_children():\n name_full = \"{}{}\".format(prefix, name)\n idx = register_print_hooks(path, children, data_to_compare, num_layers, dump_activations,\n prefix=name_full + \".\", idx=idx)\n\n within_range = (num_layers == -1) or idx < num_layers\n has_weights = has_sparse_quant_weights(children, name_full) or is_weightable(children)\n within_type = has_weights if not dump_activations else dump_activations\n if within_range and within_type:\n # always there for activations if dump_activation is enabled\n # always there for weights if dump_activation is disabled\n name_full = name_full.replace('/', '_')\n dump_path = os.path.join(path, '.'.join([str(idx), name_full]))\n idx += 1\n if is_weightable(children):\n output_dir = os.path.abspath(os.path.join(dump_path, os.pardir))\n file_name = os.path.basename(dump_path)\n\n def dump_attr(attr):\n if hasattr(children, attr):\n dump_name = '.'.join([file_name, attr])\n data = children.weight.data.numpy()\n save_dump(dump_name, output_dir, data, force=False)\n data_to_compare[dump_name] = data\n\n dump_attr('weight')\n dump_attr('bias')\n else:\n set_dump_path(children, dump_path)\n hook = dump_in_out_hook if dump_activations else dump_out_hook\n children.register_forward_hook(hook)\n return idx\n\n\ndef load_torch_model(config, cuda=False):\n weights = config.get('weights')\n model = load_model(config.get('model'),\n pretrained=config.get('pretrained', True) if weights is None else False,\n num_classes=config.get('num_classes', 1000),\n model_params=config.get('model_params', {}))\n compression_ctrl, model = create_compressed_model(model, config)\n if weights:\n sd = torch.load(weights, map_location='cpu')\n load_state(model, sd)\n if cuda:\n model = model.cuda()\n model = torch.nn.DataParallel(model)\n print(compression_ctrl.statistics().to_str())\n return model\n\n\ndef compare_activations(ir_dump_txt, torch_dump_npy):\n with open(ir_dump_txt, 'r') as fin:\n first_line = fin.readline()\n if \"shape:\" in first_line:\n data = fin.read().splitlines(True)\n with open(ir_dump_txt, 'w') as fout:\n fout.writelines(data)\n ie = np.loadtxt(ir_dump_txt, dtype=np.float32)\n pt = np.load(torch_dump_npy)\n print(\"Size, [ MIN, MAX ]\")\n print_info = lambda np_array: print(\n \"{} [{:.3f}, {:.3f}]\".format(np_array.size, np_array.min().item(), np_array.max().item()))\n print_info(ie)\n print_info(pt)\n print(\"Maximum of absolute difference: {:.7f}\".format(abs(ie - pt).max()))\n\n\ndef print_args(args):\n for arg in sorted(vars(args)):\n print(\"{: <27s}: {}\".format(arg, getattr(args, arg)))\n", "\"\"\"\n Copyright (c) 2019 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport os\nimport torch\nimport torch.nn as nn\nfrom examples.torch.common.sample_config import SampleConfig\n\nfrom examples.torch.object_detection.layers import L2Norm\nfrom examples.torch.object_detection.layers.modules.ssd_head import MultiOutputSequential, SSDDetectionOutput\nfrom nncf.torch.checkpoint_loading import load_state\n\nBASE_NUM_OUTPUTS = {\n 300: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M', 512, 512, 512],\n 512: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512],\n}\nEXTRAS_NUM_OUTPUTS = {\n 300: [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],\n 512: [256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128, 'K', 256],\n}\n\nBASE_OUTPUT_INDICES = {\n 300: [12],\n 512: [12],\n}\n\nEXTRA_OUTPUT_INDICES = {\n 300: [2, 5, 7, 9],\n 512: [2, 5, 8, 11, 14],\n}\n\n\nclass SSD_VGG(nn.Module):\n def __init__(self, cfg, size, num_classes, batch_norm=False):\n super().__init__()\n self.config = cfg\n self.num_classes = num_classes\n self.size = size\n self.enable_batchmorm = batch_norm\n\n base_layers, base_outs, base_feats = build_vgg_ssd_layers(\n BASE_NUM_OUTPUTS[size], BASE_OUTPUT_INDICES[size], batch_norm=batch_norm\n )\n extra_layers, extra_outs, extra_feats = build_vgg_ssd_extra(\n EXTRAS_NUM_OUTPUTS[size], EXTRA_OUTPUT_INDICES[size], batch_norm=batch_norm\n )\n self.basenet = MultiOutputSequential(base_outs, base_layers)\n self.extras = MultiOutputSequential(extra_outs, extra_layers)\n\n self.detection_head = SSDDetectionOutput(base_feats + extra_feats, num_classes, cfg)\n self.L2Norm = L2Norm(512, 20, 1e-10)\n\n def forward(self, x):\n img_tensor = x[0].clone().unsqueeze(0)\n\n sources, x = self.basenet(x)\n sources[0] = self.L2Norm(sources[0])\n\n extra_sources, x = self.extras(x)\n\n return self.detection_head(sources + extra_sources, img_tensor)\n\n def load_weights(self, base_file):\n _, ext = os.path.splitext(base_file)\n if ext in ['.pkl', '.pth']:\n print('Loading weights into state dict...')\n self.load_state_dict(torch.load(base_file,\n map_location=lambda storage, loc: storage))\n print('Finished!')\n else:\n print('Sorry only .pth and .pkl files supported.')\n\n\ndef make_ssd_vgg_layer(input_features, output_features, kernel=3, padding=1, dilation=1, modifier=None,\n batch_norm=False):\n stride = 1\n if modifier == 'S':\n stride = 2\n padding = 1\n elif modifier == 'K':\n kernel = 4\n padding = 1\n\n layer = [nn.Conv2d(input_features, output_features, kernel_size=kernel, stride=stride, padding=padding,\n dilation=dilation)]\n if batch_norm:\n layer.append(nn.BatchNorm2d(output_features))\n layer.append(nn.ReLU(inplace=True))\n return layer\n\n\ndef build_vgg_ssd_layers(num_outputs, output_inddices, start_input_channels=3, batch_norm=False):\n vgg_layers = []\n output_num_features = []\n source_indices = []\n in_planes = start_input_channels\n modifier = None\n for i, out_planes in enumerate(num_outputs):\n if out_planes in ('M', 'C'):\n vgg_layers.append(nn.MaxPool2d(kernel_size=2, stride=2, padding=1 if modifier == 'C' else 0))\n continue\n if isinstance(out_planes, str):\n modifier = out_planes\n continue\n vgg_layers.extend(make_ssd_vgg_layer(in_planes, out_planes, modifier=modifier, batch_norm=batch_norm))\n modifier = None\n in_planes = out_planes\n if i in output_inddices:\n source_indices.append(len(vgg_layers) - 1)\n output_num_features.append(out_planes)\n\n vgg_layers.append(nn.MaxPool2d(kernel_size=3, stride=1, padding=1))\n vgg_layers.extend(make_ssd_vgg_layer(in_planes, 1024, kernel=3, padding=6, dilation=6, batch_norm=batch_norm))\n vgg_layers.extend(make_ssd_vgg_layer(1024, 1024, kernel=1, batch_norm=batch_norm))\n\n source_indices.append(len(vgg_layers) - 1)\n output_num_features.append(1024)\n return vgg_layers, source_indices, output_num_features\n\n\ndef build_vgg_ssd_extra(num_outputs, output_indices, statrt_input_channels=1024, batch_norm=False):\n extra_layers = []\n output_num_features = []\n source_indices = []\n in_planes = statrt_input_channels\n modifier = None\n kernel_sizes = (1, 3)\n for i, out_planes in enumerate(num_outputs):\n if isinstance(out_planes, str):\n modifier = out_planes\n continue\n kernel = kernel_sizes[len(extra_layers) % 2]\n extra_layers.extend(make_ssd_vgg_layer(in_planes, out_planes, modifier=modifier, kernel=kernel, padding=0,\n batch_norm=batch_norm))\n modifier = None\n in_planes = out_planes\n if i in output_indices:\n source_indices.append(len(extra_layers) - 1)\n output_num_features.append(out_planes)\n\n return extra_layers, source_indices, output_num_features\n\n\ndef build_ssd_vgg(cfg, size, num_classes, config):\n ssd_vgg = SSD_VGG(cfg, size, num_classes, batch_norm=config.get('batchnorm', False))\n\n if config.basenet and (config.resuming_checkpoint_path is None) and (config.weights is None):\n print('Loading base network...')\n #\n # ** WARNING: torch.load functionality uses Python's pickling facilities that\n # may be used to perform arbitrary code execution during unpickling. Only load the data you\n # trust.\n #\n basenet_weights = torch.load(config.basenet)\n new_weights = {}\n for wn, wv in basenet_weights.items():\n wn = wn.replace('features.', '')\n new_weights[wn] = wv\n\n load_state(ssd_vgg.basenet, new_weights, is_resume=False)\n return ssd_vgg\n\n\ndef ssd_vgg300():\n ssd_params = SampleConfig({\n \"clip\": False,\n \"variance\": [0.1, 0.1, 0.2, 0.2],\n \"max_sizes\": [60, 111, 162, 213, 264, 315],\n \"min_sizes\": [30, 60, 111, 162, 213, 264],\n \"steps\": [8, 16, 32, 64, 100, 300],\n \"aspect_ratios\": [[2], [2, 3], [2, 3], [2, 3], [2], [2]],\n \"flip\": True\n })\n\n return SSD_VGG(ssd_params, 300, 21, True)\n", "\"\"\"\n Copyright (c) 2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport time\nimport datetime\nimport json\nimport logging\nimport os\nimport tarfile\nimport resource\nfrom os import path as osp\nfrom pathlib import Path\n\nimport tensorflow as tf\n\nfrom nncf.common.utils.logger import logger as nncf_logger\nfrom examples.tensorflow.common.logger import logger as default_logger\nfrom examples.tensorflow.common.sample_config import CustomArgumentParser\n\nGENERAL_LOG_FILE_NAME = \"output.log\"\nNNCF_LOG_FILE_NAME = \"nncf_output.log\"\n\nSAVED_MODEL_FORMAT = 'tf'\nKERAS_H5_FORMAT = 'h5'\nFROZEN_GRAPH_FORMAT = 'frozen_graph'\n\n\ndef get_name(config):\n dataset = config.get('dataset', 'imagenet2012')\n retval = config[\"model\"] + \"_\" + dataset\n compression_config = config.get('compression', [])\n if not isinstance(compression_config, list):\n compression_config = [compression_config, ]\n for algo_dict in compression_config:\n algo_name = algo_dict[\"algorithm\"]\n if algo_name == \"quantization\":\n initializer = algo_dict.get(\"initializer\", {})\n precision = initializer.get(\"precision\", {})\n if precision:\n retval += \"_mixed_int\"\n else:\n activations = algo_dict.get('activations', {})\n a_bits = activations.get('bits', 8)\n weights = algo_dict.get('weights', {})\n w_bits = weights.get('bits', 8)\n if a_bits == w_bits:\n retval += \"_int{}\".format(a_bits)\n else:\n retval += \"_a_int{}_w_int{}\".format(a_bits, w_bits)\n else:\n retval += \"_{}\".format(algo_name)\n return retval\n\n\ndef write_metrics(acc, filename):\n avg = round(acc * 100, 2)\n metrics = {\"Accuracy\": avg}\n with open(filename, 'w') as outfile:\n json.dump(metrics, outfile)\n\n\ndef configure_paths(config):\n d = datetime.datetime.now()\n run_id = '{:%Y-%m-%d__%H-%M-%S}'.format(d)\n config.name = get_name(config)\n config.log_dir = osp.join(config.log_dir, \"{}/{}\".format(config.name, run_id))\n os.makedirs(config.log_dir)\n\n compression_config = config.get('compression', [])\n if not isinstance(compression_config, list):\n compression_config = [compression_config, ]\n if config.nncf_config is not None:\n config.nncf_config[\"log_dir\"] = config.log_dir\n\n if config.checkpoint_save_dir is None:\n config.checkpoint_save_dir = config.log_dir\n\n # create aux dirs\n os.makedirs(config.checkpoint_save_dir, exist_ok=True)\n\n\ndef configure_logging(sample_logger, config):\n training_pipeline_log_file_handler = logging.FileHandler(osp.join(config.log_dir, GENERAL_LOG_FILE_NAME))\n training_pipeline_log_file_handler.setFormatter(logging.Formatter(\"%(message)s\"))\n sample_logger.addHandler(training_pipeline_log_file_handler)\n\n nncf_log_file_handler = logging.FileHandler(osp.join(config.log_dir, NNCF_LOG_FILE_NAME))\n nncf_log_file_handler.setFormatter(logging.Formatter(\"%(levelname)s:%(name)s:%(message)s\"))\n nncf_logger.addHandler(nncf_log_file_handler)\n\n\ndef create_code_snapshot(root, dst_path, extensions=(\".py\", \".json\", \".cpp\", \".cu\", \"h\", \".cuh\")):\n \"\"\"Creates tarball with the source code\"\"\"\n with tarfile.open(str(dst_path), \"w:gz\") as tar:\n for path in Path(root).rglob(\"*\"):\n if '.git' in path.parts:\n continue\n if path.suffix.lower() in extensions:\n tar.add(path.as_posix(), arcname=path.relative_to(root).as_posix(), recursive=True)\n\n\ndef print_args(config, logger=default_logger):\n args = 'Command line arguments\\n'\n args += '\\n'.join([\"{: <27s}: {}\".format(arg, config.get(arg)) for arg in sorted(config)])\n logger.info(args)\n\n\ndef serialize_config(config, log_dir):\n with open(osp.join(log_dir, 'config.json'), 'w') as f:\n json.dump(config, f, indent=4)\n\n\ndef serialize_cli_args(argparser, argv, log_dir):\n args = argparser.parse_args(args=argv)\n if isinstance(argparser, CustomArgumentParser):\n cli_args = {k:v for k, v in vars(args).items() if k in argparser.seen_actions}\n else:\n cli_args = {k:v for k, v in vars(args).items() if v is not None}\n with open(osp.join(log_dir, 'cli_args.json'), 'w') as f:\n json.dump(cli_args, f, indent=4)\n\n\ndef get_saving_parameters(config):\n if config.to_frozen_graph is not None:\n return config.to_frozen_graph, FROZEN_GRAPH_FORMAT\n if config.to_saved_model is not None:\n return config.to_saved_model, SAVED_MODEL_FORMAT\n if config.to_h5 is not None:\n return config.to_h5, KERAS_H5_FORMAT\n save_path = os.path.join(config.log_dir, 'frozen_model.pb')\n return save_path, FROZEN_GRAPH_FORMAT\n\n\ndef set_hard_limit_num_open_files():\n _, high = resource.getrlimit(resource.RLIMIT_NOFILE)\n resource.setrlimit(resource.RLIMIT_NOFILE, (high, high))\n\n\nclass SummaryWriter:\n \"\"\"Simple SummaryWriter for writing dictionary of metrics\n\n Attributes:\n writer: tf.SummaryWriter\n \"\"\"\n\n def __init__(self, log_dir, name):\n \"\"\"Inits SummaryWriter with paths\n\n Arguments:\n log_dir: the model folder path\n name: the summary subfolder name\n \"\"\"\n self.writer = tf.summary.create_file_writer(os.path.join(log_dir, name)) # pylint: disable=E1101\n\n def __call__(self, metrics, step):\n \"\"\"Write metrics to summary with the given writer\n\n Args:\n metrics: a dictionary of metrics values\n step: integer. The training step\n \"\"\"\n\n with self.writer.as_default(): # pylint: disable=E1129\n for metric_name, value in metrics.items():\n tf.summary.scalar(metric_name, value, step=step)\n self.writer.flush()\n\n def close(self):\n self.writer.close()\n\n\nclass Timer:\n \"\"\"A simple timer.\"\"\"\n\n def __init__(self):\n self.reset()\n\n def tic(self):\n # using time.time instead of time.clock because time time.clock\n # does not normalize for multithreading\n self.start_time = time.time()\n\n def toc(self, average=True):\n self.diff = time.time() - self.start_time\n self.total_time += self.diff\n self.calls += 1\n self.average_time = self.total_time / self.calls\n if average:\n return self.average_time\n return self.diff\n\n def reset(self):\n self.total_time = 0.\n self.calls = 0\n self.start_time = 0.\n self.diff = 0.\n self.average_time = 0.\n" ]
[ [ "torch.rand_like", "torch.nn.Conv2d", "torch.rand", "torch.nn.ConvTranspose2d" ], [ "tensorflow.io.gfile.isdir", "tensorflow.train.latest_checkpoint", "tensorflow.Variable", "tensorflow.io.gfile.exists", "tensorflow.train.checkpoints_iterator", "tensorflow.get_logger", "tensorflow.distribute.get_strategy", "tensorflow.nest.map_structure" ], [ "tensorflow.io.gfile.isdir", "tensorflow.train.CheckpointManager", "tensorflow.train.latest_checkpoint", "tensorflow.Variable", "tensorflow.reduce_mean", "tensorflow.io.gfile.exists", "numpy.isnan", "tensorflow.GradientTape" ], [ "tensorflow.matmul", "tensorflow.range", "tensorflow.stack", "tensorflow.reshape", "tensorflow.cast", "tensorflow.ones_like", "tensorflow.ones", "tensorflow.name_scope", "tensorflow.one_hot" ], [ "torch.nn.ModuleDict", "torch.no_grad", "torch.Tensor.as_subclass", "torch.tensor" ], [ "tensorflow.keras.initializers.Constant", "tensorflow.reduce_max", "tensorflow.less", "tensorflow.maximum", "tensorflow.round", "tensorflow.abs" ], [ "torch.from_numpy" ], [ "numpy.load", "torch.nn.DataParallel", "numpy.loadtxt", "torch.load" ], [ "torch.load", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ], [ "tensorflow.summary.scalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
diavy/tensorflow-without-a-phd
[ "7270d75cf259c49e9457b7041f56941928308b4a" ]
[ "tensorflow-nmt-tutorial/nmt/nmt.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"TensorFlow NMT model implementation.\"\"\"\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport random\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom . import inference\nfrom . import train\nfrom .utils import evaluation_utils\nfrom .utils import misc_utils as utils\nfrom .utils import vocab_utils\n\nutils.check_tensorflow_version()\n\nFLAGS = None\n\nINFERENCE_KEYS = [\"src_max_len_infer\", \"tgt_max_len_infer\", \"subword_option\",\n \"infer_batch_size\", \"beam_width\",\n \"length_penalty_weight\", \"sampling_temperature\",\n \"num_translations_per_input\", \"infer_mode\"]\n\n\ndef add_arguments(parser):\n \"\"\"Build ArgumentParser.\"\"\"\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\")\n\n # network\n parser.add_argument(\"--num_units\", type=int, default=32, help=\"Network size.\")\n parser.add_argument(\"--num_layers\", type=int, default=2,\n help=\"Network depth.\")\n parser.add_argument(\"--num_encoder_layers\", type=int, default=None,\n help=\"Encoder depth, equal to num_layers if None.\")\n parser.add_argument(\"--num_decoder_layers\", type=int, default=None,\n help=\"Decoder depth, equal to num_layers if None.\")\n parser.add_argument(\"--encoder_type\", type=str, default=\"uni\", help=\"\"\"\\\n uni | bi | gnmt.\n For bi, we build num_encoder_layers/2 bi-directional layers.\n For gnmt, we build 1 bi-directional layer, and (num_encoder_layers - 1)\n uni-directional layers.\\\n \"\"\")\n parser.add_argument(\"--residual\", type=\"bool\", nargs=\"?\", const=True,\n default=False,\n help=\"Whether to add residual connections.\")\n parser.add_argument(\"--time_major\", type=\"bool\", nargs=\"?\", const=True,\n default=True,\n help=\"Whether to use time-major mode for dynamic RNN.\")\n parser.add_argument(\"--num_embeddings_partitions\", type=int, default=0,\n help=\"Number of partitions for embedding vars.\")\n\n # attention mechanisms\n parser.add_argument(\"--attention\", type=str, default=\"\", help=\"\"\"\\\n luong | scaled_luong | bahdanau | normed_bahdanau or set to \"\" for no\n attention\\\n \"\"\")\n parser.add_argument(\n \"--attention_architecture\",\n type=str,\n default=\"standard\",\n help=\"\"\"\\\n standard | gnmt | gnmt_v2.\n standard: use top layer to compute attention.\n gnmt: GNMT style of computing attention, use previous bottom layer to\n compute attention.\n gnmt_v2: similar to gnmt, but use current bottom layer to compute\n attention.\\\n \"\"\")\n parser.add_argument(\n \"--output_attention\", type=\"bool\", nargs=\"?\", const=True,\n default=True,\n help=\"\"\"\\\n Only used in standard attention_architecture. Whether use attention as\n the cell output at each timestep.\n .\\\n \"\"\")\n parser.add_argument(\n \"--pass_hidden_state\", type=\"bool\", nargs=\"?\", const=True,\n default=True,\n help=\"\"\"\\\n Whether to pass encoder's hidden state to decoder when using an attention\n based model.\\\n \"\"\")\n\n # optimizer\n parser.add_argument(\"--optimizer\", type=str, default=\"sgd\", help=\"sgd | adam\")\n parser.add_argument(\"--learning_rate\", type=float, default=1.0,\n help=\"Learning rate. Adam: 0.001 | 0.0001\")\n parser.add_argument(\"--warmup_steps\", type=int, default=0,\n help=\"How many steps we inverse-decay learning.\")\n parser.add_argument(\"--warmup_scheme\", type=str, default=\"t2t\", help=\"\"\"\\\n How to warmup learning rates. Options include:\n t2t: Tensor2Tensor's way, start with lr 100 times smaller, then\n exponentiate until the specified lr.\\\n \"\"\")\n parser.add_argument(\n \"--decay_scheme\", type=str, default=\"\", help=\"\"\"\\\n How we decay learning rate. Options include:\n luong234: after 2/3 num train steps, we start halving the learning rate\n for 4 times before finishing.\n luong5: after 1/2 num train steps, we start halving the learning rate\n for 5 times before finishing.\\\n luong10: after 1/2 num train steps, we start halving the learning rate\n for 10 times before finishing.\\\n \"\"\")\n\n parser.add_argument(\n \"--num_train_steps\", type=int, default=12000, help=\"Num steps to train.\")\n parser.add_argument(\"--colocate_gradients_with_ops\", type=\"bool\", nargs=\"?\",\n const=True,\n default=True,\n help=(\"Whether try colocating gradients with \"\n \"corresponding op\"))\n\n # initializer\n parser.add_argument(\"--init_op\", type=str, default=\"uniform\",\n help=\"uniform | glorot_normal | glorot_uniform\")\n parser.add_argument(\"--init_weight\", type=float, default=0.1,\n help=(\"for uniform init_op, initialize weights \"\n \"between [-this, this].\"))\n\n # data\n parser.add_argument(\"--src\", type=str, default=None,\n help=\"Source suffix, e.g., en.\")\n parser.add_argument(\"--tgt\", type=str, default=None,\n help=\"Target suffix, e.g., de.\")\n parser.add_argument(\"--train_prefix\", type=str, default=None,\n help=\"Train prefix, expect files with src/tgt suffixes.\")\n parser.add_argument(\"--dev_prefix\", type=str, default=None,\n help=\"Dev prefix, expect files with src/tgt suffixes.\")\n parser.add_argument(\"--test_prefix\", type=str, default=None,\n help=\"Test prefix, expect files with src/tgt suffixes.\")\n parser.add_argument(\"--out_dir\", type=str, default=None,\n help=\"Store log/model files.\")\n\n # Vocab\n parser.add_argument(\"--vocab_prefix\", type=str, default=None, help=\"\"\"\\\n Vocab prefix, expect files with src/tgt suffixes.\\\n \"\"\")\n parser.add_argument(\"--embed_prefix\", type=str, default=None, help=\"\"\"\\\n Pretrained embedding prefix, expect files with src/tgt suffixes.\n The embedding files should be Glove formated txt files.\\\n \"\"\")\n parser.add_argument(\"--sos\", type=str, default=\"<s>\",\n help=\"Start-of-sentence symbol.\")\n parser.add_argument(\"--eos\", type=str, default=\"</s>\",\n help=\"End-of-sentence symbol.\")\n parser.add_argument(\"--share_vocab\", type=\"bool\", nargs=\"?\", const=True,\n default=False,\n help=\"\"\"\\\n Whether to use the source vocab and embeddings for both source and\n target.\\\n \"\"\")\n parser.add_argument(\"--check_special_token\", type=\"bool\", default=True,\n help=\"\"\"\\\n Whether check special sos, eos, unk tokens exist in the\n vocab files.\\\n \"\"\")\n\n # Sequence lengths\n parser.add_argument(\"--src_max_len\", type=int, default=50,\n help=\"Max length of src sequences during training.\")\n parser.add_argument(\"--tgt_max_len\", type=int, default=50,\n help=\"Max length of tgt sequences during training.\")\n parser.add_argument(\"--src_max_len_infer\", type=int, default=None,\n help=\"Max length of src sequences during inference.\")\n parser.add_argument(\"--tgt_max_len_infer\", type=int, default=None,\n help=\"\"\"\\\n Max length of tgt sequences during inference. Also use to restrict the\n maximum decoding length.\\\n \"\"\")\n\n # Default settings works well (rarely need to change)\n parser.add_argument(\"--unit_type\", type=str, default=\"lstm\",\n help=\"lstm | gru | layer_norm_lstm | nas\")\n parser.add_argument(\"--forget_bias\", type=float, default=1.0,\n help=\"Forget bias for BasicLSTMCell.\")\n parser.add_argument(\"--dropout\", type=float, default=0.2,\n help=\"Dropout rate (not keep_prob)\")\n parser.add_argument(\"--max_gradient_norm\", type=float, default=5.0,\n help=\"Clip gradients to this norm.\")\n parser.add_argument(\"--batch_size\", type=int, default=128, help=\"Batch size.\")\n\n parser.add_argument(\"--steps_per_stats\", type=int, default=100,\n help=(\"How many training steps to do per stats logging.\"\n \"Save checkpoint every 10x steps_per_stats\"))\n parser.add_argument(\"--max_train\", type=int, default=0,\n help=\"Limit on the size of training data (0: no limit).\")\n parser.add_argument(\"--num_buckets\", type=int, default=5,\n help=\"Put data into similar-length buckets.\")\n parser.add_argument(\"--num_sampled_softmax\", type=int, default=0,\n help=(\"Use sampled_softmax_loss if > 0.\"\n \"Otherwise, use full softmax loss.\"))\n\n # SPM\n parser.add_argument(\"--subword_option\", type=str, default=\"\",\n choices=[\"\", \"bpe\", \"spm\"],\n help=\"\"\"\\\n Set to bpe or spm to activate subword desegmentation.\\\n \"\"\")\n\n # Experimental encoding feature.\n parser.add_argument(\"--use_char_encode\", type=\"bool\", default=False,\n help=\"\"\"\\\n Whether to split each word or bpe into character, and then\n generate the word-level representation from the character\n reprentation.\n \"\"\")\n\n # Misc\n parser.add_argument(\"--num_gpus\", type=int, default=1,\n help=\"Number of gpus in each worker.\")\n parser.add_argument(\"--log_device_placement\", type=\"bool\", nargs=\"?\",\n const=True, default=False, help=\"Debug GPU allocation.\")\n parser.add_argument(\"--metrics\", type=str, default=\"bleu\",\n help=(\"Comma-separated list of evaluations \"\n \"metrics (bleu,rouge,accuracy)\"))\n parser.add_argument(\"--steps_per_external_eval\", type=int, default=None,\n help=\"\"\"\\\n How many training steps to do per external evaluation. Automatically set\n based on data if None.\\\n \"\"\")\n parser.add_argument(\"--scope\", type=str, default=None,\n help=\"scope to put variables under\")\n parser.add_argument(\"--hparams_path\", type=str, default=None,\n help=(\"Path to standard hparams json file that overrides\"\n \"hparams values from FLAGS.\"))\n parser.add_argument(\"--random_seed\", type=int, default=None,\n help=\"Random seed (>0, set a specific seed).\")\n parser.add_argument(\"--override_loaded_hparams\", type=\"bool\", nargs=\"?\",\n const=True, default=False,\n help=\"Override loaded hparams with values specified\")\n parser.add_argument(\"--num_keep_ckpts\", type=int, default=5,\n help=\"Max number of checkpoints to keep.\")\n parser.add_argument(\"--avg_ckpts\", type=\"bool\", nargs=\"?\",\n const=True, default=False, help=(\"\"\"\\\n Average the last N checkpoints for external evaluation.\n N can be controlled by setting --num_keep_ckpts.\\\n \"\"\"))\n parser.add_argument(\"--language_model\", type=\"bool\", nargs=\"?\",\n const=True, default=False,\n help=\"True to train a language model, ignoring encoder\")\n\n # Inference\n parser.add_argument(\"--ckpt\", type=str, default=\"\",\n help=\"Checkpoint file to load a model for inference.\")\n parser.add_argument(\"--inference_input_file\", type=str, default=None,\n help=\"Set to the text to decode.\")\n parser.add_argument(\"--inference_list\", type=str, default=None,\n help=(\"A comma-separated list of sentence indices \"\n \"(0-based) to decode.\"))\n parser.add_argument(\"--infer_batch_size\", type=int, default=32,\n help=\"Batch size for inference mode.\")\n parser.add_argument(\"--inference_output_file\", type=str, default=None,\n help=\"Output file to store decoding results.\")\n parser.add_argument(\"--inference_ref_file\", type=str, default=None,\n help=(\"\"\"\\\n Reference file to compute evaluation scores (if provided).\\\n \"\"\"))\n\n # Advanced inference arguments\n parser.add_argument(\"--infer_mode\", type=str, default=\"greedy\",\n choices=[\"greedy\", \"sample\", \"beam_search\"],\n help=\"Which type of decoder to use during inference.\")\n parser.add_argument(\"--beam_width\", type=int, default=0,\n help=(\"\"\"\\\n beam width when using beam search decoder. If 0 (default), use standard\n decoder with greedy helper.\\\n \"\"\"))\n parser.add_argument(\"--length_penalty_weight\", type=float, default=0.0,\n help=\"Length penalty for beam search.\")\n parser.add_argument(\"--sampling_temperature\", type=float,\n default=0.0,\n help=(\"\"\"\\\n Softmax sampling temperature for inference decoding, 0.0 means greedy\n decoding. This option is ignored when using beam search.\\\n \"\"\"))\n parser.add_argument(\"--num_translations_per_input\", type=int, default=1,\n help=(\"\"\"\\\n Number of translations generated for each sentence. This is only used for\n inference.\\\n \"\"\"))\n\n # Job info\n parser.add_argument(\"--jobid\", type=int, default=0,\n help=\"Task id of the worker.\")\n parser.add_argument(\"--num_workers\", type=int, default=1,\n help=\"Number of workers (inference only).\")\n parser.add_argument(\"--num_inter_threads\", type=int, default=0,\n help=\"number of inter_op_parallelism_threads\")\n parser.add_argument(\"--num_intra_threads\", type=int, default=0,\n help=\"number of intra_op_parallelism_threads\")\n\n\ndef create_hparams(flags):\n \"\"\"Create training hparams.\"\"\"\n return tf.contrib.training.HParams(\n # Data\n src=flags.src,\n tgt=flags.tgt,\n train_prefix=flags.train_prefix,\n dev_prefix=flags.dev_prefix,\n test_prefix=flags.test_prefix,\n vocab_prefix=flags.vocab_prefix,\n embed_prefix=flags.embed_prefix,\n out_dir=flags.out_dir,\n\n # Networks\n num_units=flags.num_units,\n num_encoder_layers=(flags.num_encoder_layers or flags.num_layers),\n num_decoder_layers=(flags.num_decoder_layers or flags.num_layers),\n dropout=flags.dropout,\n unit_type=flags.unit_type,\n encoder_type=flags.encoder_type,\n residual=flags.residual,\n time_major=flags.time_major,\n num_embeddings_partitions=flags.num_embeddings_partitions,\n\n # Attention mechanisms\n attention=flags.attention,\n attention_architecture=flags.attention_architecture,\n output_attention=flags.output_attention,\n pass_hidden_state=flags.pass_hidden_state,\n\n # Train\n optimizer=flags.optimizer,\n num_train_steps=flags.num_train_steps,\n batch_size=flags.batch_size,\n init_op=flags.init_op,\n init_weight=flags.init_weight,\n max_gradient_norm=flags.max_gradient_norm,\n learning_rate=flags.learning_rate,\n warmup_steps=flags.warmup_steps,\n warmup_scheme=flags.warmup_scheme,\n decay_scheme=flags.decay_scheme,\n colocate_gradients_with_ops=flags.colocate_gradients_with_ops,\n num_sampled_softmax=flags.num_sampled_softmax,\n\n # Data constraints\n num_buckets=flags.num_buckets,\n max_train=flags.max_train,\n src_max_len=flags.src_max_len,\n tgt_max_len=flags.tgt_max_len,\n\n # Inference\n src_max_len_infer=flags.src_max_len_infer,\n tgt_max_len_infer=flags.tgt_max_len_infer,\n infer_batch_size=flags.infer_batch_size,\n\n # Advanced inference arguments\n infer_mode=flags.infer_mode,\n beam_width=flags.beam_width,\n length_penalty_weight=flags.length_penalty_weight,\n sampling_temperature=flags.sampling_temperature,\n num_translations_per_input=flags.num_translations_per_input,\n\n # Vocab\n sos=flags.sos if flags.sos else vocab_utils.SOS,\n eos=flags.eos if flags.eos else vocab_utils.EOS,\n subword_option=flags.subword_option,\n check_special_token=flags.check_special_token,\n use_char_encode=flags.use_char_encode,\n\n # Misc\n forget_bias=flags.forget_bias,\n num_gpus=flags.num_gpus,\n epoch_step=0, # record where we were within an epoch.\n steps_per_stats=flags.steps_per_stats,\n steps_per_external_eval=flags.steps_per_external_eval,\n share_vocab=flags.share_vocab,\n metrics=flags.metrics.split(\",\"),\n log_device_placement=flags.log_device_placement,\n random_seed=flags.random_seed,\n override_loaded_hparams=flags.override_loaded_hparams,\n num_keep_ckpts=flags.num_keep_ckpts,\n avg_ckpts=flags.avg_ckpts,\n language_model=flags.language_model,\n num_intra_threads=flags.num_intra_threads,\n num_inter_threads=flags.num_inter_threads,\n )\n\n\ndef _add_argument(hparams, key, value, update=True):\n \"\"\"Add an argument to hparams; if exists, change the value if update==True.\"\"\"\n if hasattr(hparams, key):\n if update:\n setattr(hparams, key, value)\n else:\n hparams.add_hparam(key, value)\n\n\ndef extend_hparams(hparams):\n \"\"\"Add new arguments to hparams.\"\"\"\n # Sanity checks\n if hparams.encoder_type == \"bi\" and hparams.num_encoder_layers % 2 != 0:\n raise ValueError(\"For bi, num_encoder_layers %d should be even\" %\n hparams.num_encoder_layers)\n if (hparams.attention_architecture in [\"gnmt\"] and\n hparams.num_encoder_layers < 2):\n raise ValueError(\"For gnmt attention architecture, \"\n \"num_encoder_layers %d should be >= 2\" %\n hparams.num_encoder_layers)\n if hparams.subword_option and hparams.subword_option not in [\"spm\", \"bpe\"]:\n raise ValueError(\"subword option must be either spm, or bpe\")\n if hparams.infer_mode == \"beam_search\" and hparams.beam_width <= 0:\n raise ValueError(\"beam_width must greater than 0 when using beam_search\"\n \"decoder.\")\n if hparams.infer_mode == \"sample\" and hparams.sampling_temperature <= 0.0:\n raise ValueError(\"sampling_temperature must greater than 0.0 when using\"\n \"sample decoder.\")\n\n # Different number of encoder / decoder layers\n assert hparams.num_encoder_layers and hparams.num_decoder_layers\n if hparams.num_encoder_layers != hparams.num_decoder_layers:\n hparams.pass_hidden_state = False\n utils.print_out(\"Num encoder layer %d is different from num decoder layer\"\n \" %d, so set pass_hidden_state to False\" % (\n hparams.num_encoder_layers,\n hparams.num_decoder_layers))\n\n # Set residual layers\n num_encoder_residual_layers = 0\n num_decoder_residual_layers = 0\n if hparams.residual:\n if hparams.num_encoder_layers > 1:\n num_encoder_residual_layers = hparams.num_encoder_layers - 1\n if hparams.num_decoder_layers > 1:\n num_decoder_residual_layers = hparams.num_decoder_layers - 1\n\n if hparams.encoder_type == \"gnmt\":\n # The first unidirectional layer (after the bi-directional layer) in\n # the GNMT encoder can't have residual connection due to the input is\n # the concatenation of fw_cell and bw_cell's outputs.\n num_encoder_residual_layers = hparams.num_encoder_layers - 2\n\n # Compatible for GNMT models\n if hparams.num_encoder_layers == hparams.num_decoder_layers:\n num_decoder_residual_layers = num_encoder_residual_layers\n _add_argument(hparams, \"num_encoder_residual_layers\",\n num_encoder_residual_layers)\n _add_argument(hparams, \"num_decoder_residual_layers\",\n num_decoder_residual_layers)\n\n # Language modeling\n if getattr(hparams, \"language_model\", None):\n hparams.attention = \"\"\n hparams.attention_architecture = \"\"\n hparams.pass_hidden_state = False\n hparams.share_vocab = True\n hparams.src = hparams.tgt\n utils.print_out(\"For language modeling, we turn off attention and \"\n \"pass_hidden_state; turn on share_vocab; set src to tgt.\")\n\n ## Vocab\n # Get vocab file names first\n if hparams.vocab_prefix:\n src_vocab_file = hparams.vocab_prefix + \".\" + hparams.src\n tgt_vocab_file = hparams.vocab_prefix + \".\" + hparams.tgt\n else:\n raise ValueError(\"hparams.vocab_prefix must be provided.\")\n\n # Source vocab\n check_special_token = getattr(hparams, \"check_special_token\", True)\n src_vocab_size, src_vocab_file = vocab_utils.check_vocab(\n src_vocab_file,\n hparams.out_dir,\n check_special_token=check_special_token,\n sos=hparams.sos,\n eos=hparams.eos,\n unk=vocab_utils.UNK)\n\n # Target vocab\n if hparams.share_vocab:\n utils.print_out(\" using source vocab for target\")\n tgt_vocab_file = src_vocab_file\n tgt_vocab_size = src_vocab_size\n else:\n tgt_vocab_size, tgt_vocab_file = vocab_utils.check_vocab(\n tgt_vocab_file,\n hparams.out_dir,\n check_special_token=check_special_token,\n sos=hparams.sos,\n eos=hparams.eos,\n unk=vocab_utils.UNK)\n _add_argument(hparams, \"src_vocab_size\", src_vocab_size)\n _add_argument(hparams, \"tgt_vocab_size\", tgt_vocab_size)\n _add_argument(hparams, \"src_vocab_file\", src_vocab_file)\n _add_argument(hparams, \"tgt_vocab_file\", tgt_vocab_file)\n\n # Num embedding partitions\n num_embeddings_partitions = getattr(hparams, \"num_embeddings_partitions\", 0)\n _add_argument(hparams, \"num_enc_emb_partitions\", num_embeddings_partitions)\n _add_argument(hparams, \"num_dec_emb_partitions\", num_embeddings_partitions)\n\n # Pretrained Embeddings\n _add_argument(hparams, \"src_embed_file\", \"\")\n _add_argument(hparams, \"tgt_embed_file\", \"\")\n if getattr(hparams, \"embed_prefix\", None):\n src_embed_file = hparams.embed_prefix + \".\" + hparams.src\n tgt_embed_file = hparams.embed_prefix + \".\" + hparams.tgt\n\n if tf.gfile.Exists(src_embed_file):\n utils.print_out(\" src_embed_file %s exist\" % src_embed_file)\n hparams.src_embed_file = src_embed_file\n\n utils.print_out(\n \"For pretrained embeddings, set num_enc_emb_partitions to 1\")\n hparams.num_enc_emb_partitions = 1\n else:\n utils.print_out(\" src_embed_file %s doesn't exist\" % src_embed_file)\n\n if tf.gfile.Exists(tgt_embed_file):\n utils.print_out(\" tgt_embed_file %s exist\" % tgt_embed_file)\n hparams.tgt_embed_file = tgt_embed_file\n\n utils.print_out(\n \"For pretrained embeddings, set num_dec_emb_partitions to 1\")\n hparams.num_dec_emb_partitions = 1\n else:\n utils.print_out(\" tgt_embed_file %s doesn't exist\" % tgt_embed_file)\n\n # Evaluation\n for metric in hparams.metrics:\n best_metric_dir = os.path.join(hparams.out_dir, \"best_\" + metric)\n tf.gfile.MakeDirs(best_metric_dir)\n _add_argument(hparams, \"best_\" + metric, 0, update=False)\n _add_argument(hparams, \"best_\" + metric + \"_dir\", best_metric_dir)\n\n if getattr(hparams, \"avg_ckpts\", None):\n best_metric_dir = os.path.join(hparams.out_dir, \"avg_best_\" + metric)\n tf.gfile.MakeDirs(best_metric_dir)\n _add_argument(hparams, \"avg_best_\" + metric, 0, update=False)\n _add_argument(hparams, \"avg_best_\" + metric + \"_dir\", best_metric_dir)\n\n return hparams\n\n\ndef ensure_compatible_hparams(hparams, default_hparams, hparams_path=\"\"):\n \"\"\"Make sure the loaded hparams is compatible with new changes.\"\"\"\n default_hparams = utils.maybe_parse_standard_hparams(\n default_hparams, hparams_path)\n\n # Set num encoder/decoder layers (for old checkpoints)\n if hasattr(hparams, \"num_layers\"):\n if not hasattr(hparams, \"num_encoder_layers\"):\n hparams.add_hparam(\"num_encoder_layers\", hparams.num_layers)\n if not hasattr(hparams, \"num_decoder_layers\"):\n hparams.add_hparam(\"num_decoder_layers\", hparams.num_layers)\n\n # For compatible reason, if there are new fields in default_hparams,\n # we add them to the current hparams\n default_config = default_hparams.values()\n config = hparams.values()\n for key in default_config:\n if key not in config:\n hparams.add_hparam(key, default_config[key])\n\n # Update all hparams' keys if override_loaded_hparams=True\n if getattr(default_hparams, \"override_loaded_hparams\", None):\n overwritten_keys = default_config.keys()\n else:\n # For inference\n overwritten_keys = INFERENCE_KEYS\n\n for key in overwritten_keys:\n if getattr(hparams, key) != default_config[key]:\n utils.print_out(\"# Updating hparams.%s: %s -> %s\" %\n (key, str(getattr(hparams, key)),\n str(default_config[key])))\n setattr(hparams, key, default_config[key])\n return hparams\n\n\ndef create_or_load_hparams(\n out_dir, default_hparams, hparams_path, save_hparams=True):\n \"\"\"Create hparams or load hparams from out_dir.\"\"\"\n hparams = utils.load_hparams(out_dir)\n if not hparams:\n hparams = default_hparams\n hparams = utils.maybe_parse_standard_hparams(\n hparams, hparams_path)\n else:\n hparams = ensure_compatible_hparams(hparams, default_hparams, hparams_path)\n hparams = extend_hparams(hparams)\n\n # Save HParams\n if save_hparams:\n utils.save_hparams(out_dir, hparams)\n for metric in hparams.metrics:\n utils.save_hparams(getattr(hparams, \"best_\" + metric + \"_dir\"), hparams)\n\n # Print HParams\n utils.print_hparams(hparams)\n return hparams\n\n\ndef run_main(flags, default_hparams, train_fn, inference_fn, target_session=\"\"):\n \"\"\"Run main.\"\"\"\n # Job\n jobid = flags.jobid\n num_workers = flags.num_workers\n utils.print_out(\"# Job id %d\" % jobid)\n\n # GPU device\n utils.print_out(\n \"# Devices visible to TensorFlow: %s\" % repr(tf.Session().list_devices()))\n\n # Random\n random_seed = flags.random_seed\n if random_seed is not None and random_seed > 0:\n utils.print_out(\"# Set random seed to %d\" % random_seed)\n random.seed(random_seed + jobid)\n np.random.seed(random_seed + jobid)\n\n # Model output directory\n out_dir = flags.out_dir\n if out_dir and not tf.gfile.Exists(out_dir):\n utils.print_out(\"# Creating output directory %s ...\" % out_dir)\n tf.gfile.MakeDirs(out_dir)\n\n # Load hparams.\n loaded_hparams = False\n if flags.ckpt: # Try to load hparams from the same directory as ckpt\n ckpt_dir = os.path.dirname(flags.ckpt)\n ckpt_hparams_file = os.path.join(ckpt_dir, \"hparams\")\n if tf.gfile.Exists(ckpt_hparams_file) or flags.hparams_path:\n hparams = create_or_load_hparams(\n ckpt_dir, default_hparams, flags.hparams_path,\n save_hparams=False)\n loaded_hparams = True\n if not loaded_hparams: # Try to load from out_dir\n assert out_dir\n hparams = create_or_load_hparams(\n out_dir, default_hparams, flags.hparams_path,\n save_hparams=(jobid == 0))\n\n ## Train / Decode\n if flags.inference_input_file:\n # Inference output directory\n trans_file = flags.inference_output_file\n assert trans_file\n trans_dir = os.path.dirname(trans_file)\n if not tf.gfile.Exists(trans_dir): tf.gfile.MakeDirs(trans_dir)\n\n # Inference indices\n hparams.inference_indices = None\n if flags.inference_list:\n (hparams.inference_indices) = (\n [int(token) for token in flags.inference_list.split(\",\")])\n\n # Inference\n ckpt = flags.ckpt\n if not ckpt:\n ckpt = tf.train.latest_checkpoint(out_dir)\n inference_fn(ckpt, flags.inference_input_file,\n trans_file, hparams, num_workers, jobid)\n\n # Evaluation\n ref_file = flags.inference_ref_file\n if ref_file and tf.gfile.Exists(trans_file):\n for metric in hparams.metrics:\n score = evaluation_utils.evaluate(\n ref_file,\n trans_file,\n metric,\n hparams.subword_option)\n utils.print_out(\" %s: %.1f\" % (metric, score))\n else:\n # Train\n train_fn(hparams, target_session=target_session)\n\n\ndef main(unused_argv):\n default_hparams = create_hparams(FLAGS)\n train_fn = train.train\n inference_fn = inference.inference\n run_main(FLAGS, default_hparams, train_fn, inference_fn)\n\n\nif __name__ == \"__main__\":\n nmt_parser = argparse.ArgumentParser()\n add_arguments(nmt_parser)\n FLAGS, unparsed = nmt_parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n" ]
[ [ "tensorflow.train.latest_checkpoint", "numpy.random.seed", "tensorflow.gfile.Exists", "tensorflow.gfile.MakeDirs", "tensorflow.Session", "tensorflow.app.run" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
moinnadeem/composer
[ "bc3f41b766bd4450f05a99f44db4a6b3901ea1c8", "bc3f41b766bd4450f05a99f44db4a6b3901ea1c8", "bc3f41b766bd4450f05a99f44db4a6b3901ea1c8", "bc3f41b766bd4450f05a99f44db4a6b3901ea1c8" ]
[ "tests/algorithms/test_torch_export.py", "composer/algorithms/ema/ema.py", "composer/datasets/streaming/world.py", "composer/models/ssd/ssd.py" ]
[ "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"\nTests a variety of export options with our surgery methods applied, including\ntorchscript, torch.fx, and ONNX.\n\"\"\"\nimport os\nimport pathlib\nfrom typing import Any, Callable, Type\n\nimport pytest\nimport torch\nimport torch.fx\n\nfrom composer.algorithms.blurpool.blurpool import BlurPool\nfrom composer.algorithms.channels_last.channels_last import ChannelsLast\nfrom composer.algorithms.factorize.factorize import Factorize\nfrom composer.algorithms.ghost_batchnorm.ghost_batchnorm import GhostBatchNorm\nfrom composer.algorithms.squeeze_excite.squeeze_excite import SqueezeExcite\nfrom composer.algorithms.stochastic_depth.stochastic_depth import StochasticDepth\nfrom composer.core.algorithm import Algorithm\nfrom composer.functional import (apply_blurpool, apply_channels_last, apply_factorization, apply_ghost_batchnorm,\n apply_squeeze_excite, apply_stochastic_depth)\nfrom tests.algorithms.algorithm_settings import get_alg_kwargs, get_alg_model, get_algs_with_marks\n\nalgo_kwargs = {\n apply_stochastic_depth: {\n 'stochastic_method': 'block',\n 'target_layer_name': 'ResNetBottleneck'\n },\n apply_ghost_batchnorm: {\n 'ghost_batch_size': 2\n }\n}\n\n\[email protected]\ndef input():\n # input batch to ComposerModel is (input, target) tuple\n return (torch.rand(4, 3, 112, 112), torch.Tensor())\n\n\ntorchscript_algs_with_marks = [\n x for x in get_algs_with_marks()\n if x.values[0] in (BlurPool, Factorize, GhostBatchNorm, SqueezeExcite, StochasticDepth, ChannelsLast)\n]\n\n# <--- torchscript export --->\n\n\ndef get_surgery_method(alg_cls: Type[Algorithm]) -> Callable:\n if alg_cls is BlurPool:\n return apply_blurpool\n if alg_cls is Factorize:\n return apply_factorization\n if alg_cls is GhostBatchNorm:\n return apply_ghost_batchnorm\n if alg_cls is SqueezeExcite:\n return apply_squeeze_excite\n if alg_cls is StochasticDepth:\n return apply_stochastic_depth\n if alg_cls is ChannelsLast:\n return apply_channels_last\n raise ValueError(f'Unknown algorithm class {alg_cls}')\n\n\[email protected](10)\[email protected]('alg_cls', torchscript_algs_with_marks)\ndef test_surgery_torchscript_train(input: Any, alg_cls: Type[Algorithm]):\n \"\"\"Tests torchscript model in train mode.\"\"\"\n if alg_cls in (Factorize, GhostBatchNorm, StochasticDepth):\n pytest.xfail('Unsupported')\n\n alg_kwargs = get_alg_kwargs(alg_cls)\n model = get_alg_model(alg_cls)\n\n surgery_method = get_surgery_method(alg_cls)\n\n alg_kwargs = algo_kwargs.get(surgery_method, alg_kwargs)\n\n surgery_method(model, **alg_kwargs)\n\n scripted_func = torch.jit.script(model)\n scripted_func.train() # type: ignore (third-party)\n model.train()\n torch.testing.assert_allclose(scripted_func(input), model(input)) # type: ignore (third-party)\n\n\[email protected](10)\[email protected]('alg_cls', torchscript_algs_with_marks)\ndef test_surgery_torchscript_eval(input: Any, alg_cls: Type[Algorithm]):\n \"\"\"Tests torchscript model in eval mode.\"\"\"\n if alg_cls is Factorize:\n pytest.xfail('Unsupported')\n\n surgery_method = get_surgery_method(alg_cls)\n\n alg_kwargs = get_alg_kwargs(alg_cls)\n model = get_alg_model(alg_cls)\n alg_kwargs = algo_kwargs.get(surgery_method, alg_kwargs)\n\n surgery_method(model, **alg_kwargs)\n\n scripted_func = torch.jit.script(model)\n scripted_func.eval() # type: ignore (third-party)\n model.eval()\n torch.testing.assert_allclose(scripted_func(input), model(input)) # type: ignore (third-party)\n\n\n# <--- torch.fx export --->\n\n\[email protected](10)\[email protected]('alg_cls', torchscript_algs_with_marks)\ndef test_surgery_torchfx_eval(\n input: Any,\n alg_cls: Type[Algorithm],\n):\n \"\"\"Tests torch.fx model in eval mode.\"\"\"\n\n alg_kwargs = get_alg_kwargs(alg_cls)\n model = get_alg_model(alg_cls)\n surgery_method = get_surgery_method(alg_cls)\n\n if alg_cls in (BlurPool, GhostBatchNorm):\n pytest.xfail('Control flow')\n\n alg_kwargs = algo_kwargs.get(surgery_method, alg_kwargs)\n\n surgery_method(model, **alg_kwargs)\n\n model.eval()\n\n traced_func = torch.fx.symbolic_trace(model)\n torch.testing.assert_allclose(traced_func(input), model(input)) # type: ignore (third-party)\n\n\n# <--- onnx export --->\n\n\[email protected](10)\[email protected]('alg_cls', torchscript_algs_with_marks)\[email protected](\n r'ignore:Converting a tensor to a Python .* might cause the trace to be incorrect:torch.jit._trace.TracerWarning')\ndef test_surgery_onnx(\n input: Any,\n alg_cls: Type[Algorithm],\n tmp_path: pathlib.Path,\n):\n \"\"\"Tests onnx export and runtime\"\"\"\n pytest.importorskip('onnx')\n import onnx # type: ignore\n import onnxruntime as ort # type: ignore\n\n surgery_method = get_surgery_method(alg_cls)\n\n model = get_alg_model(alg_cls)\n alg_kwargs = get_alg_kwargs(alg_cls)\n alg_kwargs = algo_kwargs.get(surgery_method, alg_kwargs)\n\n surgery_method(model, **alg_kwargs)\n model.eval()\n\n onnx_path = os.path.join(tmp_path, 'model.onnx')\n torch.onnx.export(\n model,\n (input,),\n onnx_path,\n input_names=['input'],\n output_names=['output'],\n )\n\n # check onnx model\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n\n # run inference\n ort_session = ort.InferenceSession(onnx_path)\n outputs = ort_session.run(\n None,\n {'input': input[0].numpy()},\n )\n\n torch.testing.assert_allclose(\n outputs[0],\n model(input),\n rtol=1e-4, # lower tolerance for ONNX\n atol=1e-3, # lower tolerance for ONNX\n )\n", "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Core Exponential Moving Average (EMA) classes and functions.\"\"\"\n\nfrom __future__ import annotations\n\nimport copy\nimport itertools\nimport logging\nfrom typing import Any, Dict, List, Optional, Union\n\nimport torch\n\nfrom composer.core import Algorithm, Event, State, Time, TimeUnit\nfrom composer.loggers import Logger\n\nlog = logging.getLogger(__name__)\n\n__all__ = ['EMA', 'compute_ema']\n\n\ndef compute_ema(model: T_Model, ema_model: T_Model, smoothing: float = 0.99):\n r\"\"\"Updates the weights of ``ema_model`` to be closer to the weights of ``model`` according to an exponential\n weighted average. Weights are updated according to\n\n .. math::\n W_{ema_model}^{(t+1)} = smoothing\\times W_{ema_model}^{(t)}+(1-smoothing)\\times W_{model}^{(t)}\n\n The update to ``ema_model`` happens in place.\n\n The half life of the weights for terms in the average is given by\n\n .. math::\n t_{1/2} = -\\frac{\\log(2)}{\\log(smoothing)}\n\n Therefore to set smoothing to obtain a target half life, set smoothing according to\n\n .. math::\n smoothing = \\exp\\left[-\\frac{\\log(2)}{t_{1/2}}\\right]\n\n Args:\n model (torch.nn.Module): the model containing the latest weights to use to update the moving average weights.\n ema_model (torch.nn.Module): the model containing the moving average weights to be updated.\n smoothing (float, optional): the coefficient representing the degree to which older observations are kept.\n Must be in the interval :math:`(0, 1)`. Default: ``0.99``.\n\n Example:\n .. testcode::\n\n import composer.functional as cf\n from torchvision import models\n model = models.resnet50()\n ema_model = models.resnet50()\n cf.compute_ema(model, ema_model, smoothing=0.9)\n \"\"\"\n with torch.no_grad():\n model_params = itertools.chain(model.parameters(), model.buffers())\n ema_model_params = itertools.chain(ema_model.parameters(), ema_model.buffers())\n\n for ema_param, model_param in zip(ema_model_params, model_params):\n model_param = model_param.detach()\n ema_param.copy_(ema_param * smoothing + (1. - smoothing) * model_param)\n\n\nclass EMA(Algorithm):\n r\"\"\"Maintains a shadow model with weights that follow the exponential moving average of the trained model weights.\n\n Weights are updated according to\n\n .. math::\n W_{ema_model}^{(t+1)} = smoothing\\times W_{ema_model}^{(t)}+(1-smoothing)\\times W_{model}^{(t)}\n\n Where the smoothing is determined from ``half_life`` according to\n\n .. math::\n smoothing = \\exp\\left[-\\frac{\\log(2)}{t_{1/2}}\\right]\n\n Model evaluation is done with the moving average weights, which can result in better generalization. Because of the\n shadow models, EMA triples the model's memory consumption. Note that this does not mean that the total memory\n required doubles, since stored activations and the optimizer state are not duplicated. EMA also uses a small\n amount of extra compute to update the moving average weights.\n\n See the :doc:`Method Card </method_cards/ema>` for more details.\n\n Args:\n half_life (str): The time string specifying the half life for terms in the average. A longer half life means\n old information is remembered longer, a shorter half life means old information is discared sooner.\n A half life of ``0`` means no averaging is done, an infinite half life means no update is done. Currently\n only units of epoch ('ep') and batch ('ba'). Value must be an integer.\n update_interval (str, optional): The time string specifying the period at which updates are done. For example,\n an ``update_interval='1ep'`` means updates are done every epoch, while ``update_interval='10ba'`` means\n updates are done once every ten batches. Units must match the units used to specify ``half_life``. If not\n specified, ``update_interval`` will default to ``1`` in the units of ``half_life``. Value must be an\n integer. Default: ``None``.\n train_with_ema_weights (bool, optional): An experimental feature that uses the ema weights as the training\n weights. In most cases should be left as ``False``. Default ``False``.\n\n Example:\n .. testcode::\n\n from composer.algorithms import EMA\n algorithm = EMA(half_life='50ba', update_interval='1ba')\n trainer = Trainer(\n model=model,\n train_dataloader=train_dataloader,\n eval_dataloader=eval_dataloader,\n max_duration=\"1ep\",\n algorithms=[algorithm],\n optimizers=[optimizer]\n )\n \"\"\"\n\n def __init__(self, half_life: str, update_interval: Optional[str] = None, train_with_ema_weights: bool = False):\n self.half_life = half_life\n self.update_interval = update_interval\n self.train_with_ema_weights = train_with_ema_weights\n\n self.ema_model = None\n self.training_model = None\n\n self.serialized_attributes = [\n 'ema_model',\n 'training_model',\n ]\n\n # Check timestrings are parsable and convert into time object\n try:\n self.half_life = Time.from_timestring(half_life)\n except ValueError as error:\n raise ValueError(f'Invalid time string for parameter half_life') from error\n\n # Create the update interval if none is specified\n if self.update_interval is None:\n self.update_interval = Time(1, self.half_life.unit)\n elif type(update_interval) is str:\n try:\n self.update_interval = Time.from_timestring(update_interval)\n except ValueError as error:\n raise ValueError(f'Invalid time string for parameter update_interval') from error\n else:\n raise ValueError(f'update_interval must be None or a time string.')\n\n # Verify that the units of half_life and update_interval are compatible\n if self.half_life.unit != self.update_interval.unit:\n raise ValueError(f'Units of half_life and update_interval must match.')\n\n # Verify that the time strings have supported units.\n if self.half_life.unit not in [TimeUnit.BATCH, TimeUnit.EPOCH]:\n raise ValueError(f'Invalid time unit for parameter half_life: '\n f'{self.update_interval.unit}')\n\n # Calculate the appropriate weighting for the moving average\n self.smoothing = 2**(-(self.update_interval.value / self.half_life.value))\n\n # Construct the appropriate matching events\n self.match_events = [Event.FIT_START, Event.EVAL_START, Event.EVAL_END]\n if self.half_life.unit == TimeUnit.EPOCH:\n self.match_events.append(Event.EPOCH_END)\n if self.half_life.unit == TimeUnit.BATCH:\n self.match_events.append(Event.BATCH_END)\n\n def match(self, event: Event, state: State) -> bool:\n return event in self.match_events\n\n def apply(self, event: Event, state: State, logger: Logger) -> None:\n assert isinstance(self.update_interval, Time)\n\n if event == Event.FIT_START:\n if self.ema_model is not None:\n _move_shadow_model_to_device(self.ema_model, state.model)\n if self.training_model is not None:\n _move_shadow_model_to_device(self.training_model, state.model)\n\n if event in [Event.BATCH_END, Event.EPOCH_END]:\n # Check if an update should happen\n if state.timestamp.get(self.update_interval.unit).value % self.update_interval.value == 0:\n # Initialize the shadow models if they don't exist yet\n if self.ema_model is None:\n self.ema_model = ShadowModel(state.model)\n if self.training_model is None and self.train_with_ema_weights is False:\n self.training_model = ShadowModel(state.model)\n\n # Update the ema model\n compute_ema(state.model, self.ema_model, smoothing=self.smoothing)\n if self.train_with_ema_weights:\n # Use the ema weights for further training\n _copy_model(self.ema_model, state.model)\n\n if event == Event.EVAL_START and self.ema_model is not None and self.training_model is not None:\n # Swap out the training model for the ema model in state\n _copy_model(state.model, self.training_model)\n _copy_model(self.ema_model, state.model)\n\n if event == Event.EVAL_END and self.training_model is not None:\n # Swap out the ema model for the training model in state\n _copy_model(self.training_model, state.model)\n\n def get_ema_model(self, model: torch.nn.Module):\n \"\"\"Copies ema model parameters and buffers to the input model and returns it.\n\n Args:\n model (torch.nn.Module): the model to convert into the ema model.\n\n Returns:\n model (torch.nn.Module): the input model with parameters and buffers replaced with the averaged parameters\n and buffers.\n \"\"\"\n if self.ema_model is None:\n raise AttributeError('ema model has not been initialized yet')\n\n _copy_model(self.ema_model, model)\n return model\n\n def state_dict(self) -> Dict[str, ShadowModel]:\n state_dict = {}\n for attribute_name in self.serialized_attributes:\n shadow_model = getattr(self, attribute_name)\n state_dict[attribute_name] = {}\n state_dict[attribute_name]['parameters'] = shadow_model.parameters()\n state_dict[attribute_name]['buffers'] = shadow_model.buffers()\n return state_dict\n\n def load_shadow_model(self, name, parameters: List, buffers: List):\n shadow_model = ShadowModel(None)\n shadow_model.param_list = parameters\n shadow_model.buffer_list = buffers\n setattr(self, name, shadow_model)\n\n def load_state_dict(self, state: Dict[str, Any], strict: bool = False):\n for attribute_name, serialized_value in state.items():\n self.load_shadow_model(attribute_name, serialized_value['parameters'], serialized_value['buffers'])\n\n\nclass ShadowModel:\n \"\"\"A shadow model that tracks parameters and buffers from an original source model.\n\n Args:\n model (torch.nn.Module): the source model containing the parameters and buffers to shadow.\n \"\"\"\n\n def __init__(self, model: Union[None, torch.nn.Module]):\n if model is not None:\n self.param_list = [copy.deepcopy(p.data) for p in model.parameters()]\n self.buffer_list = [copy.deepcopy(b.data) for b in model.buffers()]\n else:\n self.param_list = []\n self.buffer_list = []\n\n def parameters(self):\n return self.param_list\n\n def buffers(self):\n return self.buffer_list\n\n\nT_Model = Union[torch.nn.Module, ShadowModel]\n\n\ndef _copy_model(source_model: T_Model, destination_model: T_Model):\n \"\"\"Copies parameters and buffers from ``source_model`` to ``destination_model``\"\"\"\n with torch.no_grad():\n source_params = itertools.chain(source_model.parameters(), source_model.buffers())\n destination_params = itertools.chain(destination_model.parameters(), destination_model.buffers())\n\n for source_param, destination_param in zip(source_params, destination_params):\n destination_param.data = source_param.data\n\n\ndef _move_shadow_model_to_device(shadow_model: ShadowModel, destination_model: torch.nn.Module):\n \"\"\"Ensures the tensors of a shadow model are on the same device as a destination model\"\"\"\n with torch.no_grad():\n destination_params = destination_model.parameters()\n shadow_params = shadow_model.parameters()\n shadow_model.param_list = [s.to(d.device) for s, d in zip(shadow_params, destination_params)]\n\n destination_buffers = destination_model.buffers()\n shadow_buffers = shadow_model.buffers()\n shadow_model.buffer_list = [s.to(d.device) for s, d in zip(shadow_buffers, destination_buffers)]\n", "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"The :class:`World` class is used for easily querying distributed training info, used by `StreamingDataset`.\n\"\"\"\n\nfrom typing import NamedTuple\n\nfrom torch.utils.data import get_worker_info\n\nfrom composer.utils import dist\n\n__all__ = ['World', 'get_world']\n\n\nclass World(NamedTuple):\n \"\"\"A :class:`NamedTuple` that provides context about workers, devices, and nodes.\n\n\n Attributes:\n global_node (int): The id of this node within the global system\n global_num_nodes (int): The number of nodes within the global system\n global_device (int): The id of this device within the global system\n global_num_devices (int): The number of devices within the global system\n node_device (int): The id of this device within this node\n node_num_devices (int): The number of devices within this node\n global_worker (int): The id of this worker within the global system\n global_num_workers (int): The number of workers within the global system\n node_worker (int): The id of this worker within this node\n node_num_workers (int): The number of workers within this node\n device_worker (int): The id of this worker within this device\n device_num_workers (int): The number of workers within this device\n \"\"\"\n global_node: int\n global_num_nodes: int\n\n global_device: int\n global_num_devices: int\n\n node_device: int\n node_num_devices: int\n\n global_worker: int\n global_num_workers: int\n\n node_worker: int\n node_num_workers: int\n\n device_worker: int\n device_num_workers: int\n\n\ndef get_world() -> World:\n \"\"\"Returns a :class:`World` object, initialized using :mod:`composer.utils.dist` and :func:`torch.utils.data.get_worker_info`\"\"\"\n # Node and Device info\n global_node = dist.get_node_rank()\n global_device = dist.get_global_rank()\n global_num_devices = dist.get_world_size()\n node_device = dist.get_local_rank()\n node_num_devices = dist.get_local_world_size()\n\n # TODO: to remove this block, composer.dist must provide 'num_nodes'\n if global_num_devices % node_num_devices != 0:\n raise RuntimeError(\n f\"Expected global_num_devices ({global_num_devices}) % node_num_devices ({node_num_devices}) == 0. Unable to determine 'num_nodes'.\"\n )\n global_num_nodes = global_num_devices // node_num_devices\n\n # Worker info\n # We assume every Device has the same number of Workers.\n worker_info = get_worker_info()\n if worker_info:\n device_worker = worker_info.id\n device_num_workers = worker_info.num_workers\n else:\n device_worker = 0\n device_num_workers = 1\n\n node_worker = node_device * device_num_workers + device_worker\n node_num_workers = node_num_devices * device_num_workers\n\n global_worker = global_device * device_num_workers + device_worker\n global_num_workers = global_num_devices * device_num_workers\n\n return World(\n global_node=global_node,\n global_num_nodes=global_num_nodes,\n global_device=global_device,\n global_num_devices=global_num_devices,\n node_device=node_device,\n node_num_devices=node_num_devices,\n global_worker=global_worker,\n global_num_workers=global_num_workers,\n node_worker=node_worker,\n node_num_workers=node_num_workers,\n device_worker=device_worker,\n device_num_workers=device_num_workers,\n )\n", "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Single Shot Object Detection model with pretrained ResNet34 backbone extending :class:`.ComposerModel`.\"\"\"\n\nimport os\nimport tempfile\nfrom typing import Any, Sequence, Tuple, Union\n\nimport numpy as np\nimport requests\nfrom torch import Tensor\nfrom torchmetrics import Metric, MetricCollection\n\nfrom composer.models.base import ComposerModel\nfrom composer.models.ssd.base_model import Loss\nfrom composer.models.ssd.ssd300 import SSD300\nfrom composer.models.ssd.utils import Encoder, SSDTransformer, dboxes300_coco\nfrom composer.utils.import_helpers import MissingConditionalImportError\n\n__all__ = ['SSD']\n\n\nclass SSD(ComposerModel):\n \"\"\"Single Shot Object detection Model with pretrained ResNet34 backbone extending :class:`.ComposerModel`.\n\n Args:\n input_size (int, optional): input image size. Default: ``300``.\n num_classes (int, optional): The number of classes to detect. Default: ``80``.\n overlap_threshold (float, optional): Minimum IOU threshold for NMS. Default: ``0.5``.\n nms_max_detections (int, optional): Max number of boxes after NMS. Default: ``200``.\n data (str, optional): path to coco dataset. Default: ``\"/localdisk/coco\"``.\n \"\"\"\n\n def __init__(self, input_size: int, overlap_threshold: float, nms_max_detections: int, num_classes: int, data: str):\n super().__init__()\n\n self.input_size = input_size\n self.overlap_threshold = overlap_threshold\n self.nms_max_detections = nms_max_detections\n self.num_classes = num_classes\n url = 'https://download.pytorch.org/models/resnet34-333f7ec4.pth'\n with tempfile.TemporaryDirectory() as tempdir:\n with requests.get(url, stream=True) as r:\n r.raise_for_status()\n pretrained_backbone = os.path.join(tempdir, 'weights.pth')\n with open(pretrained_backbone, 'wb') as f:\n for chunk in r.iter_content(chunk_size=8192):\n f.write(chunk)\n self.module = SSD300(self.num_classes, model_path=pretrained_backbone)\n\n dboxes = dboxes300_coco()\n self.loss_func = Loss(dboxes)\n\n self.encoder = Encoder(dboxes)\n self.data = data\n self.MAP = coco_map(self.data)\n val_annotate = os.path.join(self.data, 'annotations/instances_val2017.json')\n val_coco_root = os.path.join(self.data, 'val2017')\n input_size = self.input_size\n val_trans = SSDTransformer(dboxes, (input_size, input_size), val=True)\n from composer.datasets.coco import COCODetection\n self.val_coco = COCODetection(val_coco_root, val_annotate, val_trans)\n\n def loss(self, outputs: Any, batch: Any) -> Union[Tensor, Sequence[Tensor]]:\n\n (_, _, _, bbox, label) = batch #type: ignore\n if not isinstance(bbox, Tensor):\n raise TypeError('bbox must be a singular tensor')\n trans_bbox = bbox.transpose(1, 2).contiguous()\n\n ploc, plabel = outputs\n gloc, glabel = trans_bbox, label\n\n loss = self.loss_func(ploc, plabel, gloc, glabel)\n return loss\n\n def metrics(self, train: bool = False) -> Union[Metric, MetricCollection]:\n return self.MAP\n\n def forward(self, batch: Any) -> Tensor:\n (img, _, _, _, _) = batch #type: ignore\n ploc, plabel = self.module(img)\n return ploc, plabel #type: ignore\n\n def validate(self, batch: Any) -> Tuple[Any, Any]:\n inv_map = {v: k for k, v in self.val_coco.label_map.items()}\n ret = []\n overlap_threshold = self.overlap_threshold\n nms_max_detections = self.nms_max_detections\n\n (img, img_id, img_size, _, _) = batch #type: ignore\n ploc, plabel = self.module(img)\n\n results = []\n try:\n results = self.encoder.decode_batch(ploc,\n plabel,\n overlap_threshold,\n nms_max_detections,\n nms_valid_thresh=0.05)\n except:\n print('No object detected')\n\n (htot, wtot) = [d.cpu().numpy() for d in img_size] #type: ignore\n img_id = img_id.cpu().numpy() #type: ignore\n if len(results) > 0:\n # Iterate over batch elements\n for img_id_, wtot_, htot_, result in zip(img_id, wtot, htot, results):\n loc, label, prob = [r.cpu().numpy() for r in result] #type: ignore\n # Iterate over image detections\n for loc_, label_, prob_ in zip(loc, label, prob):\n ret.append([img_id_, loc_[0]*wtot_, \\\n loc_[1]*htot_,\n (loc_[2] - loc_[0])*wtot_,\n (loc_[3] - loc_[1])*htot_,\n prob_,\n inv_map[label_]])\n\n return ret, ret\n\n\nclass coco_map(Metric):\n\n def __init__(self, data):\n super().__init__()\n try:\n from pycocotools.coco import COCO\n except ImportError as e:\n raise MissingConditionalImportError(extra_deps_group='coco',\n conda_channel='conda-forge',\n conda_package='pycocotools') from e\n self.add_state('predictions', default=[])\n val_annotate = os.path.join(data, 'annotations/instances_val2017.json')\n self.cocogt = COCO(annotation_file=val_annotate)\n\n def update(self, pred, target):\n self.predictions.append(pred) #type: ignore\n np.squeeze(self.predictions) #type: ignore\n\n def compute(self):\n try:\n from pycocotools.cocoeval import COCOeval\n except ImportError as e:\n raise MissingConditionalImportError(extra_deps_group='coco',\n conda_channel='conda-forge',\n conda_package='pycocotools') from e\n cocoDt = self.cocogt.loadRes(np.array(self.predictions))\n E = COCOeval(self.cocogt, cocoDt, iouType='bbox')\n E.evaluate()\n E.accumulate()\n E.summarize()\n return E.stats[0]\n" ]
[ [ "torch.jit.script", "torch.onnx.export", "torch.Tensor", "torch.fx.symbolic_trace", "torch.rand" ], [ "torch.no_grad" ], [ "torch.utils.data.get_worker_info" ], [ "numpy.squeeze", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gglin001/popart
[ "3225214343f6d98550b6620e809a3544e8bcbfc6", "3225214343f6d98550b6620e809a3544e8bcbfc6", "3225214343f6d98550b6620e809a3544e8bcbfc6", "3225214343f6d98550b6620e809a3544e8bcbfc6", "3225214343f6d98550b6620e809a3544e8bcbfc6" ]
[ "tests/torch/cifar10/model_instancenorm.py", "tests/integration/operators_test/boolean_test.py", "tests/integration/optimizer_tests/adaptive_mixed_mode_test_py_0.py", "tests/integration/model_loading_test.py", "tests/integration/operators_test/constexpr_gather.py" ]
[ "# Copyright (c) 2018 Graphcore Ltd. All rights reserved.\nimport sys\nimport os\nimport c10driver\nimport popart\nimport cmdline\nfrom popart.torch import torchwriter\nimport torch\nimport numpy as np\n\nargs = cmdline.parse()\n\nnInChans = 3\nnOutChans = 8\nbatchSize = 2\nbatchesPerStep = 4\nanchors = {\n \"l1LossVal\": popart.AnchorReturnType(\"EveryN\", 2),\n \"out\": popart.AnchorReturnType(\"Final\"),\n \"im0\": popart.AnchorReturnType(\"All\")\n}\ndataFlow = popart.DataFlow(batchesPerStep, anchors)\ninputShapeInfo = popart.InputShapeInfo()\ninputShapeInfo.add(\"im0\",\n popart.TensorInfo(\"FLOAT\", [batchSize, nInChans, 32, 32]))\n\ninNames = [\"im0\"]\noutNames = [\"out\"]\ncifarInIndices = {\"im0\": 0}\nlosses = [popart.L1Loss(\"out\", \"l1LossVal\", 0.1)]\n\n\nclass Module0(torch.nn.Module):\n def __init__(self):\n torch.nn.Module.__init__(self)\n\n self.sin = torch.sin\n self.conv1 = torchwriter.conv3x3(nInChans, nOutChans)\n self.in2 = torch.nn.InstanceNorm2d(nOutChans,\n eps=0.1,\n affine=True,\n momentum=0)\n # Force random initialization\n np.random.seed(0)\n self.in2.weight.data = torch.tensor(\n np.random.rand(nOutChans).astype(np.float32))\n\n def forward(self, inputs):\n im0 = inputs[0]\n x = self.conv1(im0)\n x = self.in2(x)\n x = self.sin(x)\n return x\n\n\n# Set arbitrary seed so model weights are initialized to the\n# same values each time the test is run\ntorch.manual_seed(1)\n\ntorchWriter = torchwriter.PytorchNetWriter(\n inNames=inNames,\n outNames=outNames,\n losses=losses,\n optimizer=popart.ConstSGD(0.001),\n inputShapeInfo=inputShapeInfo,\n dataFlow=dataFlow,\n ### Torch specific:\n module=Module0(),\n samplesPerBatch=batchSize)\n\nc10driver.run(torchWriter,\n None,\n args.outputdir,\n cifarInIndices,\n args.device,\n args.hw_id,\n transformations=[\"prepareNodesForTraining\"],\n epochs=4)\n", "# Copyright (c) 2019 Graphcore Ltd. All rights reserved.\nimport numpy as np\nimport popart\nimport torch\nimport pytest\nfrom op_tester import op_tester\n\n\ndef test_and(op_tester):\n d1 = (np.random.randn(2) > 0).astype(np.bool_)\n d2 = (np.random.randn(2) > 0).astype(np.bool_)\n\n def init_builder(builder):\n i1 = builder.addInputTensor(d1)\n i2 = builder.addInputTensor(d2)\n o = builder.aiOnnx.logical_and([i1, i2])\n builder.addOutputTensor(o)\n return [o]\n\n def reference(ref_data):\n t1 = torch.tensor(d1, dtype=torch.bool)\n t2 = torch.tensor(d2, dtype=torch.bool)\n out = t1 & t2\n return [out]\n\n op_tester.run(init_builder, reference, step_type='infer')\n\n\ndef test_broadcast_and(op_tester):\n d1 = (np.random.randn(2, 2) > 0).astype(np.bool_)\n d2 = (np.random.randn(2) > 0).astype(np.bool_)\n\n def init_builder(builder):\n i1 = builder.addInputTensor(d1)\n i2 = builder.addInputTensor(d2)\n o = builder.aiOnnx.logical_and([i1, i2])\n builder.addOutputTensor(o)\n return [o]\n\n def reference(ref_data):\n t1 = torch.tensor(d1, dtype=torch.bool)\n t2 = torch.tensor(d2, dtype=torch.bool)\n out = t1 & t2\n return [out]\n\n op_tester.run(init_builder, reference, step_type='infer')\n\n\ndef test_or(op_tester):\n d1 = (np.random.randn(2) > 0).astype(np.bool_)\n d2 = (np.random.randn(2) > 0).astype(np.bool_)\n\n def init_builder(builder):\n i1 = builder.addInputTensor(d1)\n i2 = builder.addInputTensor(d2)\n o = builder.aiOnnx.logical_or([i1, i2])\n builder.addOutputTensor(o)\n return [o]\n\n def reference(ref_data):\n t1 = torch.tensor(d1, dtype=torch.bool)\n t2 = torch.tensor(d2, dtype=torch.bool)\n out = t1 | t2\n return [out]\n\n op_tester.run(init_builder, reference, step_type='infer')\n\n\ndef test_broadcast_or(op_tester):\n d1 = (np.random.randn(2, 2) > 0).astype(np.bool_)\n d2 = (np.random.randn(2) > 0).astype(np.bool_)\n\n def init_builder(builder):\n i1 = builder.addInputTensor(d1)\n i2 = builder.addInputTensor(d2)\n o = builder.aiOnnx.logical_or([i1, i2])\n print(o)\n builder.addOutputTensor(o)\n return [o]\n\n def reference(ref_data):\n t1 = torch.tensor(d1, dtype=torch.bool)\n t2 = torch.tensor(d2, dtype=torch.bool)\n out = t1 | t2\n return [out]\n\n op_tester.run(init_builder, reference, step_type='infer')\n\n\ndef test_not(op_tester):\n d1 = (np.random.randn(2) > 0).astype(np.bool_)\n print(d1)\n\n def init_builder(builder):\n i1 = builder.addInputTensor(d1)\n o = builder.aiOnnx.logical_not([i1])\n builder.addOutputTensor(o)\n return [o]\n\n def reference(ref_data):\n return [np.logical_not(d1)]\n\n op_tester.run(init_builder, reference, step_type='infer')\n\n\ndef test_equal(op_tester):\n d1 = (np.random.randn(2)).astype(np.float32)\n d2 = (np.random.randn(2)).astype(np.float32)\n d2[0] = d1[0]\n\n def init_builder(builder):\n i1 = builder.addInputTensor(d1)\n i2 = builder.addInputTensor(d2)\n o = builder.aiOnnx.equal([i1, i2])\n builder.addOutputTensor(o)\n return [o]\n\n def reference(ref_data):\n t1 = torch.tensor(d1)\n t2 = torch.tensor(d2)\n out = torch.eq(t1, t2)\n\n return [out]\n\n op_tester.run(init_builder, reference, step_type='infer')\n\n\ndef test_broadcast_equal(op_tester):\n d1 = (np.random.randn(2, 2)).astype(np.float32)\n d2 = (np.random.randn(2)).astype(np.float32)\n\n # d2[0][0] = d1[0]\n\n def init_builder(builder):\n i1 = builder.addInputTensor(d1)\n i2 = builder.addInputTensor(d2)\n o = builder.aiOnnx.equal([i1, i2])\n builder.addOutputTensor(o)\n return [o]\n\n def reference(ref_data):\n t1 = torch.tensor(d1)\n t2 = torch.tensor(d2)\n out = torch.eq(t1, t2)\n\n return [out]\n\n op_tester.run(init_builder, reference, step_type='infer')\n", "# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\nimport numpy as np\nimport pytest\nimport popart\nimport onnx\nfrom onnx import numpy_helper\nfrom onnx import TensorProto\n\n# `import test_util` requires adding to sys.path\nimport sys\nfrom pathlib import Path\nsys.path.append(str(Path(__file__).resolve().parent.parent))\nimport test_util as tu\n\n\ndef to_array(weight):\n if weight.data_type == TensorProto.FLOAT16:\n int_data = np.asarray(weight.int32_data, np.int32)\n np_weight = int_data.view(dtype=np.float16).reshape(weight.dims)\n else:\n np_weight = numpy_helper.to_array(weight)\n return np_weight\n\n\ndef run_adaptive_mixed_mode(steps,\n opt_dicts,\n enable_outlining,\n tmpdir,\n dtype=np.float32):\n def run(opt_dict, enable_outlining, model_file_name):\n np.random.seed(1878)\n dsize = 10\n builder = popart.Builder()\n ip = builder.addInputTensor(\n popart.TensorInfo(\"FLOAT\" if dtype == np.float32 else \"FLOAT16\",\n [dsize, dsize]))\n d__ip = popart.reservedGradientPrefix() + ip\n\n def add_layer(in_id, name):\n w = builder.addInitializedInputTensor(\n np.random.rand(dsize, dsize).astype(dtype), \"w_\" + name)\n b = builder.addInitializedInputTensor(\n np.random.rand(dsize).astype(dtype), \"b_\" + name)\n matmul_id = builder.aiOnnx.gemm([in_id, w, b], 1, 1, False, False)\n return matmul_id\n\n m1 = add_layer(ip, \"0\")\n m2 = add_layer(m1, \"1\")\n m3 = add_layer(m2, \"2\")\n m4 = add_layer(m3, \"3\")\n\n out = builder.aiGraphcore.identityloss([m4])\n builder.addOutputTensor(out)\n\n device = tu.create_test_device()\n\n anchors = {}\n\n opts = popart.SessionOptions()\n opts.enableOutliningCopyCostPruning = False\n opts.outlineThreshold = -np.inf\n opts.enableOutlining = enable_outlining\n\n proto = builder.getModelProto()\n\n session = popart.TrainingSession(fnModel=proto,\n dataFlow=popart.DataFlow(1, anchors),\n optimizer=opt_dict[0],\n loss=out,\n patterns=popart.Patterns(\n popart.PatternsLevel.All),\n userOptions=opts,\n deviceInfo=device)\n\n session.prepareDevice()\n session.weightsFromHost()\n\n for i in range(steps):\n if i in opt_dict:\n session.updateOptimizerFromHost(opt_dict[i])\n ip_data = np.ones((dsize, dsize), dtype=dtype)\n stepio = popart.PyStepIO({ip: ip_data}, anchors)\n session.run(stepio)\n\n session.modelToHost(str(tmpdir / model_file_name))\n\n for i, opt_dict in enumerate(opt_dicts):\n print(f\"Running adaptive_mixed_mode_{i}\")\n run(opt_dict, enable_outlining[i], f\"adaptive_mixed_mode_{i}.onnx\")\n\n gt_onnx = onnx.load(str(tmpdir / \"adaptive_mixed_mode_0.onnx\"))\n\n for i, opt_dict in enumerate(opt_dicts):\n print(f\"Testing run adaptive_mixed_mode_{i}\")\n val_onnx = onnx.load(str(tmpdir / f\"adaptive_mixed_mode_{i}.onnx\"))\n for j in range(len(gt_onnx.graph.initializer)):\n print(f\"Checking initializer {j}\")\n gt = gt_onnx.graph.initializer[j]\n gt = to_array(gt)\n val = val_onnx.graph.initializer[j]\n val = to_array(val)\n # print(gt, val)\n assert np.allclose(gt, val)\n\n\n# Test RMSProp with different parameters constant / non-constant\ndef test_adaptive_mixed_mode_0(tmpdir):\n\n #optimizer parameters\n defaultLearningRate = 0.005\n defaultAlpha = 0.8\n defaultMomentum = 0.5\n defaultWeightDecay = 0.1\n defaultEps = 1e-6\n lossScaling = 10.0\n\n optMaps = [{\n 0:\n popart.Adaptive(\n {\n \"defaultLearningRate\": (defaultLearningRate, True),\n \"defaultAlpha\": (defaultAlpha, True),\n \"defaultMomentum\": (defaultMomentum, True),\n \"defaultWeightDecay\": (defaultWeightDecay, True),\n \"defaultEps\": (defaultEps, True),\n \"lossScaling\": (lossScaling, True),\n },\n mode=popart.AdaptiveMode.CenteredRMSProp)\n }]\n outlining = [False]\n\n for i in range(6):\n optMap = {\n \"defaultLearningRate\": (defaultLearningRate, True),\n \"defaultAlpha\": (defaultAlpha, i != 1),\n \"defaultMomentum\": (defaultMomentum, i != 2),\n \"defaultWeightDecay\": (defaultWeightDecay, i != 3),\n \"defaultEps\": (defaultEps, i != 4),\n \"lossScaling\": (lossScaling, i != 5),\n }\n optMaps = optMaps + [{\n 0:\n popart.Adaptive(optMap, mode=popart.AdaptiveMode.CenteredRMSProp)\n }]\n outlining = outlining + [False]\n\n for i in range(6):\n optMap = {\n \"defaultLearningRate\": (defaultLearningRate, i != 0),\n \"defaultAlpha\": (defaultAlpha, i != 1),\n \"defaultMomentum\": (defaultMomentum, i != 2),\n \"defaultWeightDecay\": (defaultWeightDecay, i != 3),\n \"defaultEps\": (defaultEps, i != 4),\n \"lossScaling\": (lossScaling, i != 5),\n }\n optMaps = optMaps + [{\n 0:\n popart.Adaptive(optMap, mode=popart.AdaptiveMode.CenteredRMSProp)\n }]\n outlining = outlining + [True]\n\n run_adaptive_mixed_mode(10, optMaps, outlining, tmpdir, np.float32)\n run_adaptive_mixed_mode(10, optMaps, outlining, tmpdir, np.float16)\n\n\n# Test RMSProp with weight specific const and non-const parameters\ndef test_adaptive_mixed_mode_1(tmpdir):\n\n #optimizer parameters\n defaultLearningRate0 = 0.005\n defaultLearningRate5 = 0.0025\n\n defaultAlpha = 0.7\n defaultMomentum = 0.8\n defaultWeightDecay = 0.1\n defaultEps = 1e-6\n lossScaling = 10.0\n\n adaptive00 = popart.Adaptive({\n \"defaultLearningRate\": (defaultLearningRate0, False),\n \"defaultAlpha\": (defaultAlpha, True),\n \"defaultMomentum\": (defaultMomentum, True),\n \"defaultWeightDecay\": (defaultWeightDecay, True),\n \"defaultEps\": (defaultEps, True),\n \"lossScaling\": (lossScaling, True),\n })\n\n adaptive00.insertSpecific(\"w_0\", {\n \"alpha\": (0.7, True),\n \"momentum\": (0.8, True)\n })\n adaptive00.insertSpecific(\"b_0\", {\n \"alpha\": (0.7, True),\n \"momentum\": (0.8, True)\n })\n\n adaptive05 = popart.Adaptive({\n \"defaultLearningRate\": (defaultLearningRate5, False),\n \"defaultAlpha\": (defaultAlpha, True),\n \"defaultMomentum\": (defaultMomentum, True),\n \"defaultWeightDecay\": (defaultWeightDecay, True),\n \"defaultEps\": (defaultEps, True),\n \"lossScaling\": (lossScaling, True),\n })\n\n adaptive05.insertSpecific(\"w_0\", {\n \"alpha\": (0.7, True),\n \"momentum\": (0.8, True)\n })\n adaptive05.insertSpecific(\"b_0\", {\n \"alpha\": (0.7, True),\n \"momentum\": (0.8, True)\n })\n\n adaptive10 = popart.Adaptive({\n \"defaultLearningRate\": (defaultLearningRate0, False),\n \"defaultAlpha\": (defaultAlpha, False),\n \"defaultMomentum\": (defaultMomentum, False),\n \"defaultWeightDecay\": (defaultWeightDecay, False),\n \"defaultEps\": (defaultEps, False),\n \"lossScaling\": (lossScaling, False),\n })\n\n adaptive10.insertSpecific(\"w_0\", {\n \"alpha\": (0.7, False),\n \"momentum\": (0.8, False)\n })\n adaptive10.insertSpecific(\"b_0\", {\n \"alpha\": (0.7, False),\n \"momentum\": (0.8, False)\n })\n\n adaptive15 = popart.Adaptive({\n \"defaultLearningRate\": (defaultLearningRate5, False),\n \"defaultAlpha\": (defaultAlpha, False),\n \"defaultMomentum\": (defaultMomentum, False),\n \"defaultWeightDecay\": (defaultWeightDecay, False),\n \"defaultEps\": (defaultEps, False),\n \"lossScaling\": (lossScaling, False),\n })\n\n adaptive15.insertSpecific(\"w_0\", {\n \"alpha\": (0.7, False),\n \"momentum\": (0.8, False)\n })\n adaptive15.insertSpecific(\"b_0\", {\n \"alpha\": (0.7, False),\n \"momentum\": (0.8, False)\n })\n\n adaptive20 = popart.Adaptive({\n \"defaultLearningRate\": (defaultLearningRate0, False),\n \"defaultAlpha\": (defaultAlpha, True),\n \"defaultMomentum\": (defaultMomentum, False),\n \"defaultWeightDecay\": (defaultWeightDecay, False),\n \"defaultEps\": (defaultEps, False),\n \"lossScaling\": (lossScaling, False),\n })\n\n adaptive20.insertSpecific(\"w_0\", {\n \"alpha\": (0.7, False),\n \"momentum\": (0.8, True)\n })\n adaptive20.insertSpecific(\"b_0\", {\n \"alpha\": (0.7, False),\n \"momentum\": (0.8, True)\n })\n\n adaptive25 = popart.Adaptive({\n \"defaultLearningRate\": (defaultLearningRate5, False),\n \"defaultAlpha\": (defaultAlpha, True),\n \"defaultMomentum\": (defaultMomentum, False),\n \"defaultWeightDecay\": (defaultWeightDecay, False),\n \"defaultEps\": (defaultEps, False),\n \"lossScaling\": (lossScaling, False),\n })\n\n adaptive25.insertSpecific(\"w_0\", {\n \"alpha\": (0.7, False),\n \"momentum\": (0.8, True)\n })\n adaptive25.insertSpecific(\"b_0\", {\n \"alpha\": (0.7, False),\n \"momentum\": (0.8, True)\n })\n\n # Change RMSProp optimizer after 0 and 5 steps\n optMaps = [{\n 0: adaptive00,\n 5: adaptive05\n }, {\n 0: adaptive10,\n 5: adaptive15\n }, {\n 0: adaptive20,\n 5: adaptive25\n }]\n\n outlining = [True, True, True]\n\n run_adaptive_mixed_mode(10, optMaps, outlining, tmpdir, np.float32)\n run_adaptive_mixed_mode(10, optMaps, outlining, tmpdir, np.float16)\n", "# Copyright (c) 2021 Graphcore Ltd. All rights reserved.\nimport numpy as np\nimport popart\nimport pytest\n\n\ndef test_bad_model_proto():\n builder = popart.Builder()\n\n i = builder.addInputTensor(\"FLOAT\", [4, 4])\n c = builder.aiOnnx.constant(np.random.rand(4, 4).astype(np.float32))\n m = builder.aiOnnx.matmul([i, c])\n m = builder.aiOnnx.matmul([m, c])\n\n builder.addOutputTensor(m)\n\n proto = builder.getModelProto()\n\n # Invalidate the onnx model proto\n proto = [i for i in proto]\n i = 0\n while i < len(proto):\n x = proto[i]\n proto[i] = 10\n i += 10\n proto = bytes(proto)\n\n # Attempt to load the model proto\n with pytest.raises(popart.popart_exception) as e_info:\n builder = popart.Builder(proto)\n\n assert (e_info.value.args[0].startswith(\n 'Failed to load a ModelProto from the string '))\n assert (e_info.value.args[0].endswith(\n 'Check that it is either a valid path to an existing onnx'\n ' model file, or is a valid onnx ModelProto string.'))\n", "# Copyright (c) 2019 Graphcore Ltd. All rights reserved.\nimport itertools\nimport numpy as np\nfrom op_tester import op_tester\nimport popart\n\n\ndef test_gather_id_pattern(op_tester):\n d1 = np.array([[-1, -2, -3]]).astype(np.float32)\n d2 = np.array([0]).astype(np.int32)\n axis = 0\n\n def init_builder(builder):\n i1 = builder.aiOnnx.constant(d1)\n i2 = builder.aiOnnx.constant(d2)\n o = builder.aiOnnx.gather([i1, i2], axis)\n builder.addOutputTensor(o)\n return [o]\n\n def reference(ref_data):\n out = np.take(d1, d2, axis=axis)\n return [out]\n\n op_tester.setPatterns(['OpToIdentity'], enableRuntimeAsserts=False)\n op_tester.run(init_builder, reference, 'infer')\n\n\ndef test_gather_rank2_1(op_tester):\n d1 = np.array([[-1, -2, -3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)\n d2 = np.array([0, 2]).astype(np.int32)\n d_d1 = np.array([[1.0, 1.0, 1.0], [0, 0, 0], [1.0, 1.0,\n 1.0]]).astype(np.float32)\n axis = 0\n\n def init_builder(builder):\n i1 = builder.aiOnnx.constant(d1)\n i2 = builder.aiOnnx.constant(d2)\n o = builder.aiOnnx.gather([i1, i2], axis)\n builder.addOutputTensor(o)\n return [o, popart.reservedGradientPrefix() + i1]\n\n def reference(ref_data):\n out = np.take(d1, d2, axis=axis)\n return [out, d_d1]\n\n op_tester.lossReduction = popart.ReductionType.Sum\n op_tester.setPatterns(['PreUniRepl'], enableRuntimeAsserts=False)\n op_tester.run(init_builder, reference, 'train')\n\n\ndef test_gather_rank2_2(op_tester):\n d1 = np.array([[-1, -2, -3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)\n d2 = np.arange(2, dtype=np.int32).reshape(1, 2)\n d_d1 = np.array([[1.0, 1.0, 0], [1.0, 1.0, 0], [1.0, 1.0,\n 0]]).astype(np.float32)\n axis = 1\n\n def init_builder(builder):\n i1 = builder.aiOnnx.constant(d1)\n i2 = builder.aiOnnx.constant(d2)\n o = builder.aiOnnx.gather([i1, i2], axis)\n builder.addOutputTensor(o)\n return [o, popart.reservedGradientPrefix() + i1]\n\n def reference(ref_data):\n out = np.take(d1, d2, axis=axis)\n return [out, d_d1]\n\n op_tester.lossReduction = popart.ReductionType.Sum\n op_tester.setPatterns(['PreUniRepl'], enableRuntimeAsserts=False)\n op_tester.run(init_builder, reference, 'train')\n\n\ndef test_gather_rank3_1(op_tester):\n d1 = np.array([[[-1, -2, -3], [4, 5, 6], [7, 8, 9]]]).astype(np.float32)\n d2 = np.arange(2, dtype=np.int32)\n d_d1 = np.array([[[1.0, 1.0, 0], [1.0, 1.0, 0], [1.0, 1.0,\n 0]]]).astype(np.float32)\n\n axis = 2\n\n def init_builder(builder):\n i1 = builder.aiOnnx.constant(d1)\n i2 = builder.aiOnnx.constant(d2)\n o = builder.aiOnnx.gather([i1, i2], axis)\n builder.addOutputTensor(o)\n return [o, popart.reservedGradientPrefix() + i1]\n\n def reference(ref_data):\n out = np.take(d1, d2, axis=axis)\n return [out, d_d1]\n\n op_tester.lossReduction = popart.ReductionType.Sum\n op_tester.setPatterns(['PreUniRepl'], enableRuntimeAsserts=False)\n op_tester.run(init_builder, reference, 'train')\n\n\ndef test_gather_rank1_1(op_tester):\n d1 = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]).astype(np.float32)\n d2 = np.arange(2, dtype=np.int32)\n d_d1 = np.array([1.0, 1.0, 0, 0, 0, 0, 0, 0, 0]).astype(np.float32)\n axis = 0\n\n def init_builder(builder):\n i1 = builder.aiOnnx.constant(d1)\n i2 = builder.aiOnnx.constant(d2)\n o = builder.aiOnnx.gather([i1, i2], axis)\n builder.addOutputTensor(o)\n return [o, popart.reservedGradientPrefix() + i1]\n\n def reference(ref_data):\n out = np.take(d1, d2, axis=axis)\n return [out, d_d1]\n\n op_tester.lossReduction = popart.ReductionType.Sum\n op_tester.setPatterns(['PreUniRepl'], enableRuntimeAsserts=False)\n op_tester.run(init_builder, reference, 'train')\n\n\ndef test_gather_example1(op_tester):\n d1 = np.array([[1.0, 1.2], [2.3, 3.4], [4.5, 5.7]]).astype(np.float32)\n d2 = np.array([[[0, 1], [1, 2]]]).astype(np.int32)\n axis = 0\n\n def init_builder(builder):\n i1 = builder.aiOnnx.constant(d1)\n i2 = builder.aiOnnx.constant(d2)\n o = builder.aiOnnx.gather([i1, i2], axis)\n builder.addOutputTensor(o)\n return [o]\n\n def reference(ref_data):\n out = np.take(d1, d2, axis=axis)\n return [out]\n\n op_tester.lossReduction = popart.ReductionType.Sum\n op_tester.setPatterns(['PreUniRepl'], enableRuntimeAsserts=False)\n op_tester.run(init_builder, reference, 'infer')\n\n\ndef test_gather_example2(op_tester):\n d1 = np.array([[1.0, 1.2, 1.9], [2.3, 3.4, 3.9], [4.5, 5.7,\n 5.9]]).astype(np.float32)\n d2 = np.array([[0, 2, 0]]).astype(np.int32)\n d_d1 = np.array([[2.0, 0, 1.0], [2.0, 0, 1.0], [2.0, 0,\n 1.0]]).astype(np.float32)\n axis = 1\n\n def init_builder(builder):\n i1 = builder.aiOnnx.constant(d1)\n i2 = builder.aiOnnx.constant(d2)\n o = builder.aiOnnx.gather([i1, i2], axis)\n builder.addOutputTensor(o)\n return [o, popart.reservedGradientPrefix() + i1]\n\n def reference(ref_data):\n out = np.take(d1, d2, axis=axis)\n return [out, d_d1]\n\n op_tester.lossReduction = popart.ReductionType.Sum\n op_tester.setPatterns(['PreUniRepl'], enableRuntimeAsserts=False)\n op_tester.run(init_builder, reference, 'train')\n\n\ndef test_gather_complex(op_tester):\n axis = 2\n data = np.zeros((5, 6, 3, 2, 1), dtype=np.float32)\n\n for i in range(np.prod(data.shape)):\n data.reshape(np.prod(data.shape))[i] = i\n\n indices = np.zeros((2, 3, 1), dtype=np.int32)\n indices[0, 0, 0] = 0\n indices[0, 1, 0] = 2\n indices[0, 2, 0] = 2\n indices[1, 0, 0] = 0\n indices[1, 1, 0] = 2\n indices[1, 2, 0] = 1\n\n def init_builder(builder):\n constData = builder.aiOnnx.constant(data)\n constIndices = builder.aiOnnx.constant(indices)\n constOut = builder.aiOnnx.gather([constData, constIndices], axis)\n builder.addOutputTensor(constOut)\n return [constOut]\n\n def reference(ref_data):\n result = np.zeros((5, 6, 2, 3, 1, 2, 1), dtype=np.float32)\n for d0, d1, d2, d3, d4, d5, d6 in itertools.product(\n range(result.shape[0]), range(result.shape[1]),\n range(result.shape[2]), range(result.shape[3]),\n range(result.shape[4]), range(result.shape[5]),\n range(result.shape[6])):\n\n # Derive the expected value\n value = (d6 + data.shape[4] *\n (d5 + data.shape[3] *\n (indices[d2, d3, d4] + data.shape[2] *\n (d1 + data.shape[1] * d0))))\n\n result[d0, d1, d2, d3, d4, d5, d6] = value\n return [result]\n\n op_tester.setPatterns(['PreUniRepl'], enableRuntimeAsserts=False)\n op_tester.run(init_builder, reference, 'infer')\n" ]
[ [ "numpy.random.seed", "torch.nn.Module.__init__", "torch.manual_seed", "torch.nn.InstanceNorm2d", "numpy.random.rand" ], [ "torch.eq", "numpy.logical_not", "numpy.random.randn", "torch.tensor" ], [ "numpy.allclose", "numpy.random.seed", "numpy.asarray", "numpy.ones", "numpy.random.rand" ], [ "numpy.random.rand" ], [ "numpy.take", "numpy.arange", "numpy.prod", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pinkimondli/Qubit
[ "aa918e7614f97ec66c723eb57e8f577685e7ae85" ]
[ "qubitPlots.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom Qubit import Qubit\n\nsns.set(font_scale=1)\nsns.set_style(\"whitegrid\", {\"font.family\": \"serif\"})\nplt.rcParams[\"figure.dpi\"]=100\nplt.rcParams[\"savefig.bbox\"]='tight'\nplt.rcParams[\"savefig.transparent\"]=True\n\ndef frequency(qubit,detuning,Bfield):\n time = np.linspace(0,1e-6,10001) # in seconds\n\n freq = np.fft.fftfreq(time.shape[-1],d=max(time)/time.shape[-1])\n ps = np.sqrt(qubit.Rabi(time,detuning,Bfield,'S'))\n ps_fft = np.abs(np.fft.fft(ps))\n\n m = np.argmax(ps_fft[20:-time.shape[-1]//2])+20\n\n return freq[m]\n\ndef plotFieldFreq(qubit,detuning,BAbs,BUnit,so=0):\n qubit.so = so\n qubit.T2 = -1\n\n qFreq=np.array([])\n omega = np.array([])\n for Bi in BAbs:\n print(Bi)\n Bfield_i = np.dot(Bi,BUnit)\n fi = frequency(qubit,detuning,Bfield_i)\n de = qubit.eigen(detuning,Bfield_i)[0]\n de = de[1]-de[0]\n\n omega = np.append(omega,de/qubit.hbar/(2*np.pi))\n qFreq = np.append(qFreq,fi)\n\n fig,ax = plt.subplots(1,figsize=(5,4))\n ax.plot(BAbs,qFreq/1e6,'o',linestyle='-',color='C0',label=str(BUnit))\n ax.plot(-BAbs,qFreq/1e6,'o',linestyle='-',color='C0')\n ax.set_xlabel('B (mT)')\n ax.set_ylabel('qubit frequency (MHz)')\n ax.set_ylim(0,50)\n ax.legend()\n figname = './Plots/plotFieldFreq_'+str(BUnit)+'_SO'+str(so)+'.png'\n fig.savefig(figname)\n\n plt.show()\n return qFreq,omega\n\ndef plotRabiProj(qubit,time,detuning,BAbs,BUnit):\n fig,ax = plt.subplots(1,figsize=(10,4))\n for projection in ['S','T0','TM','TP']:\n xlabel,ylabel = qubit.RabiTimePlot(time,detuning,np.dot(BAbs,BUnit),ax,projection)\n ax.legend(ncol=4,loc='upper center')\n ax.set_ylim(0,1.5)\n ax.set_xlabel(xlabel)\n ax.set_ylabel('probability')\n\n figname = './Plots/plotRabiProj_det_'+str(detuning)+'_field_'+str(BAbs)+str(BUnit)+'.png'\n fig.savefig(figname)\n plt.show()\n\ndef plotFunnelProj(qubit,t,d,B):\n projection = ['S','T0','TM']\n theta = [0,90]\n fig,ax = plt.subplots(3,2,figsize=(8,15))\n for i in range(2):\n th = theta[i]\n for j in range(3):\n proj = projection[j]\n d,Bfield_sym,pS_fin_sym = qubit.Funnel(t,d,B,th,proj)\n ax[j,i].pcolormesh(d*1e6,Bfield_sym*1e3,pS_fin_sym,vmin=0,vmax=1)\n ax[j,i].title.set_text(str(proj))\n ax[j,i].set_xlabel('detuning (ueV)')\n ax[j,i].set_ylabel('Bfield (mT)')\n fig.subplots_adjust(hspace=0.3,wspace=0.3)\n figname = './Plots/plotFunnelProj_t'+str(t)+'_so_'+str(qubit.so)+'.png'\n fig.savefig(figname)\n plt.show()\n\ndef main():\n qubit = Qubit(g1_ll=0.4,g2_ll=0.385,g1_pp=6,g2_pp=5.1,tc=5e-6)\n qubit.so = 300e-3\n\n t = 200e-9\n detuning = np.linspace(-1e-3,10e-6,101)\n BAbs = np.linspace(0,5e-3,101)\n theta = 0\n# BUnit = [1,0,0]\n\n plotFunnelProj(qubit,t,detuning,BAbs)\n# plotRabiProj(qubit,time,detuning,BAbs,BUnit)\n# qFreq,omega = plotFieldFreq(qubit,detuning,BAbs,BUnit,300e-3)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.dot", "numpy.linspace", "numpy.fft.fft", "matplotlib.pyplot.subplots", "numpy.append", "numpy.argmax", "numpy.array", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
micuat/bci-workshop
[ "2ce12f3b44e89f35bc1f04c00a184a372cddfe1e" ]
[ "python/exercise_01.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nExercise 1: A neurofeedback interface (single-channel)\n======================================================\n\nDescription:\nIn this exercise, we'll try and play around with a simple interface that\nreceives EEG from one electrode, computes standard frequency band powers\nand displays both the raw signals and the features.\n\n\"\"\"\n\nimport numpy as np # Module that simplifies computations on matrices\nimport matplotlib.pyplot as plt # Module used for plotting\nfrom pylsl import StreamInlet, resolve_byprop # Module to receive EEG data\n\nimport bci_workshop_tools as BCIw # Our own functions for the workshop\n\n\nif __name__ == \"__main__\":\n\n \"\"\" 1. CONNECT TO EEG STREAM \"\"\"\n\n # Search for active LSL stream\n print('Looking for an EEG stream...')\n streams = resolve_byprop('type', 'EEG', timeout=2)\n if len(streams) == 0:\n raise RuntimeError('Can\\'t find EEG stream.')\n\n # Set active EEG stream to inlet and apply time correction\n print(\"Start acquiring data\")\n inlet = StreamInlet(streams[0], max_chunklen=12)\n eeg_time_correction = inlet.time_correction()\n\n # Get the stream info and description\n info = inlet.info()\n description = info.desc()\n\n # Get the sampling frequency\n # This is an important value that represents how many EEG data points are\n # collected in a second. This influences our frequency band calculation.\n fs = int(info.nominal_srate())\n\n \"\"\" 2. SET EXPERIMENTAL PARAMETERS \"\"\"\n\n # Length of the EEG data buffer (in seconds)\n # This buffer will hold last n seconds of data and be used for calculations\n buffer_length = 15\n\n # Length of the epochs used to compute the FFT (in seconds)\n epoch_length = 1\n\n # Amount of overlap between two consecutive epochs (in seconds)\n overlap_length = 0.8\n\n # Amount to 'shift' the start of each next consecutive epoch\n shift_length = epoch_length - overlap_length\n\n # Index of the channel (electrode) to be used\n # 0 = left ear, 1 = left forehead, 2 = right forehead, 3 = right ear\n index_channel = [0]\n ch_names = ['ch1'] # Name of our channel for plotting purposes\n\n # Get names of features\n # ex. ['delta - CH1', 'pwr-theta - CH1', 'pwr-alpha - CH1',...]\n feature_names = BCIw.get_feature_names(ch_names)\n\n \"\"\" 3. INITIALIZE BUFFERS \"\"\"\n\n # Initialize raw EEG data buffer (for plotting)\n eeg_buffer = np.zeros((int(fs * buffer_length), 1))\n filter_state = None # for use with the notch filter\n\n # Compute the number of epochs in \"buffer_length\" (used for plotting)\n n_win_test = int(np.floor((buffer_length - epoch_length) /\n shift_length + 1))\n\n # Initialize the feature data buffer (for plotting)\n feat_buffer = np.zeros((n_win_test, len(feature_names)))\n\n # Initialize the plots\n plotter_eeg = BCIw.DataPlotter(fs * buffer_length, ch_names, fs)\n plotter_feat = BCIw.DataPlotter(n_win_test, feature_names,\n 1 / shift_length)\n\n \"\"\" 3. GET DATA \"\"\"\n\n # The try/except structure allows to quit the while loop by aborting the\n # script with <Ctrl-C>\n print('Press Ctrl-C in the console to break the while loop.')\n\n try:\n # The following loop does what we see in the diagram of Exercise 1:\n # acquire data, compute features, visualize raw EEG and the features\n while True:\n\n \"\"\" 3.1 ACQUIRE DATA \"\"\"\n # Obtain EEG data from the LSL stream\n eeg_data, timestamp = inlet.pull_chunk(\n timeout=1, max_samples=int(shift_length * fs))\n\n # Only keep the channel we're interested in\n ch_data = np.array(eeg_data)[:, index_channel]\n\n # Update EEG buffer\n eeg_buffer, filter_state = BCIw.update_buffer(\n eeg_buffer, ch_data, notch=True,\n filter_state=filter_state)\n\n \"\"\" 3.2 COMPUTE FEATURES \"\"\"\n # Get newest samples from the buffer\n data_epoch = BCIw.get_last_data(eeg_buffer,\n epoch_length * fs)\n\n # Compute features\n feat_vector = BCIw.compute_feature_vector(data_epoch, fs)\n feat_buffer, _ = BCIw.update_buffer(feat_buffer,\n np.asarray([feat_vector]))\n\n \"\"\" 3.3 VISUALIZE THE RAW EEG AND THE FEATURES \"\"\"\n plotter_eeg.update_plot(eeg_buffer)\n plotter_feat.update_plot(feat_buffer)\n plt.pause(0.00001)\n\n except KeyboardInterrupt:\n print('Closing!')\n" ]
[ [ "numpy.asarray", "numpy.array", "matplotlib.pyplot.pause", "numpy.floor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CDU-LSP/NCSSK
[ "2a8b729933aa59967409a0202e0e9a65b0a23ec8" ]
[ "signal_source/gen_4xsin_signal.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\n# 本代码用于产生四路sin信号,并对其中的三路信号进行了延迟\n\nfs = 1e6 # 正弦信号1MHz\nFs = 256e6 # 采样率256MHz\nadc_t = 1 / Fs\nA = 1 # 幅度\nN = 1024 # 序列长度\nt = np.arange(0, N)\n\nd1 = 20 # 延后点数\nd2 = 60\nd3 = 150\n\nx1 = A * np.cos(2 * np.pi * fs * t * adc_t) # 源信号\n\nx2 = A * np.cos(2 * np.pi * fs * (t + d1) * adc_t) # 延后信号\n\nx3 = A * np.cos(2 * np.pi * fs * (t + d2) * adc_t) # 延后信号\n\nx4 = A * np.cos(2 * np.pi * fs * (t + d3) * adc_t) # 延后信号\n\nplt.subplot(221)\nplt.plot(t, x1, 'r')\nplt.title('source')\nplt.xlabel('t')\nplt.ylabel('y')\nplt.grid(\"True\")\n\nplt.subplot(222)\nplt.plot(t, x2, 'r')\nplt.title('delay_sig1')\nplt.xlabel('t')\nplt.ylabel('y')\nplt.grid(\"True\")\n\nplt.subplot(223)\nplt.plot(t, x3, 'r')\nplt.title('delay_sig2')\nplt.xlabel('t')\nplt.ylabel('y')\nplt.grid(\"True\")\n\nplt.subplot(224)\nplt.plot(t, x4, 'r')\nplt.title('delay_sig3')\nplt.xlabel('t')\nplt.ylabel('y')\nplt.grid(\"True\")\n\nplt.show()\n\ncount = 0\nf = open('sin_signal_1024x4.bin', 'ab')\nfor i in range(0, 1024):\n count += 1\n h_xi = \"{:#06X}\".format(int(round(x1[i], 3) * 1000 + 1000))\n # print(\"{:#06X}\".format(int(round(x1[i],3)*1000 +1000)))\n f.write(bytes(h_xi[2:], encoding=\"utf8\"))\n f.write(bytes(\" \", encoding=\"utf8\"))\nf.close()\n\nprint(\"count\", count)\nf = open('sin_signal_1024x4.bin', 'ab')\nfor i in range(0, 1024):\n h_xi = \"{:#06X}\".format(int(round(x2[i], 3) * 1000 + 1000))\n # print(\"{:#06X}\".format(int(round(x1[i],3)*1000 +1000)))\n f.write(bytes(h_xi[2:], encoding=\"utf8\"))\n f.write(bytes(\" \", encoding=\"utf8\"))\n count += 1\nf.close()\n\nf = open('sin_signal_1024x4.bin', 'ab')\nfor i in range(0, 1024):\n h_xi = \"{:#06X}\".format(int(round(x3[i], 3) * 1000 + 1000))\n # print(\"{:#06X}\".format(int(round(x1[i],3)*1000 +1000)))\n f.write(bytes(h_xi[2:], encoding=\"utf8\"))\n f.write(bytes(\" \", encoding=\"utf8\"))\n count += 1\nf.close()\n\nf = open('sin_signal_1024x4.bin', 'ab')\nfor i in range(0, 1024):\n h_xi = \"{:#06X}\".format(int(round(x4[i], 3) * 1000 + 1000))\n # print(\"{:#06X}\".format(int(round(x1[i],3)*1000 +1000)))\n f.write(bytes(h_xi[2:], encoding=\"utf8\"))\n f.write(bytes(\" \", encoding=\"utf8\"))\n count += 1\nf.close()\n\nprint(\"success\")\nprint(\"count\", count)\n" ]
[ [ "matplotlib.pyplot.title", "numpy.arange", "numpy.cos", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rui-mo/arrow
[ "266993f33511760aafbf63e131136ee422e69ec0" ]
[ "python/pyarrow/tests/test_dataset.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport contextlib\nimport os\nimport pathlib\nimport pickle\n\nimport numpy as np\nimport pytest\n\nimport pyarrow as pa\nimport pyarrow.csv\nimport pyarrow.fs as fs\n\ntry:\n import pandas as pd\nexcept ImportError:\n pd = None\n\ntry:\n import pyarrow.dataset as ds\nexcept ImportError:\n ds = None\n\n# Marks all of the tests in this module\n# Ignore these with pytest ... -m 'not dataset'\npytestmark = pytest.mark.dataset\n\n\[email protected]\ndef change_cwd(path):\n curdir = os.getcwd()\n os.chdir(str(path))\n try:\n yield\n finally:\n os.chdir(curdir)\n\n\ndef _generate_data(n):\n import datetime\n import itertools\n\n day = datetime.datetime(2000, 1, 1)\n interval = datetime.timedelta(days=5)\n colors = itertools.cycle(['green', 'blue', 'yellow', 'red', 'orange'])\n\n data = []\n for i in range(n):\n data.append((day, i, float(i), next(colors)))\n day += interval\n\n return pd.DataFrame(data, columns=['date', 'index', 'value', 'color'])\n\n\ndef _table_from_pandas(df):\n schema = pa.schema([\n pa.field('date', pa.date32()),\n pa.field('index', pa.int64()),\n pa.field('value', pa.float64()),\n pa.field('color', pa.string()),\n ])\n table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)\n return table.replace_schema_metadata()\n\n\ndef _filesystem_uri(path):\n # URIs on Windows must follow 'file:///C:...' or 'file:/C:...' patterns.\n if os.name == 'nt':\n uri = 'file:///{}'.format(path)\n else:\n uri = 'file://{}'.format(path)\n return uri\n\n\[email protected]\[email protected]\ndef mockfs():\n import pyarrow.parquet as pq\n\n mockfs = fs._MockFileSystem()\n\n directories = [\n 'subdir/1/xxx',\n 'subdir/2/yyy',\n ]\n\n for i, directory in enumerate(directories):\n path = '{}/file{}.parquet'.format(directory, i)\n mockfs.create_dir(directory)\n with mockfs.open_output_stream(path) as out:\n data = [\n list(range(5)),\n list(map(float, range(5))),\n list(map(str, range(5))),\n [i] * 5\n ]\n schema = pa.schema([\n pa.field('i64', pa.int64()),\n pa.field('f64', pa.float64()),\n pa.field('str', pa.string()),\n pa.field('const', pa.int64()),\n ])\n batch = pa.record_batch(data, schema=schema)\n table = pa.Table.from_batches([batch])\n\n pq.write_table(table, out)\n\n return mockfs\n\n\[email protected]\ndef open_logging_fs(monkeypatch):\n from pyarrow.fs import PyFileSystem, LocalFileSystem, _normalize_path\n from .test_fs import ProxyHandler\n\n localfs = LocalFileSystem()\n\n def normalized(paths):\n return {_normalize_path(localfs, str(p)) for p in paths}\n\n opened = set()\n\n def open_input_file(self, path):\n path = _normalize_path(localfs, str(path))\n opened.add(path)\n return self._fs.open_input_file(path)\n\n # patch proxyhandler to log calls to open_input_file\n monkeypatch.setattr(ProxyHandler, \"open_input_file\", open_input_file)\n fs = PyFileSystem(ProxyHandler(localfs))\n\n @contextlib.contextmanager\n def assert_opens(expected_opened):\n opened.clear()\n try:\n yield\n finally:\n assert normalized(opened) == normalized(expected_opened)\n\n return fs, assert_opens\n\n\[email protected](scope='module')\ndef multisourcefs(request):\n request.config.pyarrow.requires('pandas')\n request.config.pyarrow.requires('parquet')\n import pyarrow.parquet as pq\n\n df = _generate_data(1000)\n mockfs = fs._MockFileSystem()\n\n # simply split the dataframe into three chunks to construct a data source\n # from each chunk into its own directory\n df_a, df_b, df_c, df_d = np.array_split(df, 4)\n\n # create a directory containing a flat sequence of parquet files without\n # any partitioning involved\n mockfs.create_dir('plain')\n for i, chunk in enumerate(np.array_split(df_a, 10)):\n path = 'plain/chunk-{}.parquet'.format(i)\n with mockfs.open_output_stream(path) as out:\n pq.write_table(_table_from_pandas(chunk), out)\n\n # create one with schema partitioning by week and color\n mockfs.create_dir('schema')\n for part, chunk in df_b.groupby([df_b.date.dt.week, df_b.color]):\n folder = 'schema/{}/{}'.format(*part)\n path = '{}/chunk.parquet'.format(folder)\n mockfs.create_dir(folder)\n with mockfs.open_output_stream(path) as out:\n pq.write_table(_table_from_pandas(chunk), out)\n\n # create one with hive partitioning by year and month\n mockfs.create_dir('hive')\n for part, chunk in df_c.groupby([df_c.date.dt.year, df_c.date.dt.month]):\n folder = 'hive/year={}/month={}'.format(*part)\n path = '{}/chunk.parquet'.format(folder)\n mockfs.create_dir(folder)\n with mockfs.open_output_stream(path) as out:\n pq.write_table(_table_from_pandas(chunk), out)\n\n # create one with hive partitioning by color\n mockfs.create_dir('hive_color')\n for part, chunk in df_d.groupby([\"color\"]):\n folder = 'hive_color/color={}'.format(*part)\n path = '{}/chunk.parquet'.format(folder)\n mockfs.create_dir(folder)\n with mockfs.open_output_stream(path) as out:\n pq.write_table(_table_from_pandas(chunk), out)\n\n return mockfs\n\n\[email protected]\ndef dataset(mockfs):\n format = ds.ParquetFileFormat()\n selector = fs.FileSelector('subdir', recursive=True)\n options = ds.FileSystemFactoryOptions('subdir')\n options.partitioning = ds.DirectoryPartitioning(\n pa.schema([\n pa.field('group', pa.int32()),\n pa.field('key', pa.string())\n ])\n )\n factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)\n return factory.finish()\n\n\ndef test_filesystem_dataset(mockfs):\n schema = pa.schema([\n pa.field('const', pa.int64())\n ])\n file_format = ds.ParquetFileFormat()\n paths = ['subdir/1/xxx/file0.parquet', 'subdir/2/yyy/file1.parquet']\n partitions = [ds.field('part') == x for x in range(1, 3)]\n fragments = [file_format.make_fragment(path, mockfs, part)\n for path, part in zip(paths, partitions)]\n root_partition = ds.field('level') == ds.scalar(1337)\n\n dataset_from_fragments = ds.FileSystemDataset(\n fragments, schema=schema, format=file_format,\n root_partition=root_partition,\n )\n dataset_from_paths = ds.FileSystemDataset.from_paths(\n paths, schema=schema, format=file_format, filesystem=mockfs,\n partitions=partitions, root_partition=root_partition,\n )\n\n for dataset in [dataset_from_fragments, dataset_from_paths]:\n assert isinstance(dataset, ds.FileSystemDataset)\n assert isinstance(dataset.format, ds.ParquetFileFormat)\n assert dataset.partition_expression.equals(root_partition)\n assert set(dataset.files) == set(paths)\n\n fragments = list(dataset.get_fragments())\n for fragment, partition, path in zip(fragments, partitions, paths):\n assert fragment.partition_expression.equals(partition)\n assert fragment.path == path\n assert isinstance(fragment.format, ds.ParquetFileFormat)\n assert isinstance(fragment, ds.ParquetFileFragment)\n assert fragment.row_groups is None\n\n row_group_fragments = list(fragment.split_by_row_group())\n assert len(row_group_fragments) == 1\n assert isinstance(row_group_fragments[0], ds.ParquetFileFragment)\n assert row_group_fragments[0].path == path\n assert row_group_fragments[0].row_groups == [ds.RowGroupInfo(0)]\n\n fragments = list(dataset.get_fragments(filter=ds.field(\"const\") == 0))\n assert len(fragments) == 2\n\n # the root_partition keyword has a default\n dataset = ds.FileSystemDataset(\n fragments, schema=schema, format=file_format\n )\n assert dataset.partition_expression.equals(ds.scalar(True))\n\n # from_paths partitions have defaults\n dataset = ds.FileSystemDataset.from_paths(\n paths, schema=schema, format=file_format, filesystem=mockfs\n )\n assert dataset.partition_expression.equals(ds.scalar(True))\n for fragment in dataset.get_fragments():\n assert fragment.partition_expression.equals(ds.scalar(True))\n\n # validation of required arguments\n with pytest.raises(TypeError, match=\"incorrect type\"):\n ds.FileSystemDataset(fragments, file_format, schema)\n # validation of root_partition\n with pytest.raises(TypeError, match=\"incorrect type\"):\n ds.FileSystemDataset(fragments, schema=schema, format=file_format,\n root_partition=1)\n # missing required argument in from_paths\n with pytest.raises(TypeError, match=\"incorrect type\"):\n ds.FileSystemDataset.from_paths(fragments, format=file_format)\n\n\ndef test_filesystem_dataset_no_filesystem_interaction():\n # ARROW-8283\n schema = pa.schema([\n pa.field('f1', pa.int64())\n ])\n file_format = ds.IpcFileFormat()\n paths = ['nonexistingfile.arrow']\n\n # creating the dataset itself doesn't raise\n dataset = ds.FileSystemDataset.from_paths(\n paths, schema=schema, format=file_format,\n filesystem=fs.LocalFileSystem(),\n )\n\n # getting fragments also doesn't raise\n dataset.get_fragments()\n\n # scanning does raise\n with pytest.raises(FileNotFoundError):\n dataset.to_table()\n\n\ndef test_dataset(dataset):\n assert isinstance(dataset, ds.Dataset)\n assert isinstance(dataset.schema, pa.Schema)\n\n # TODO(kszucs): test non-boolean Exprs for filter do raise\n expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64())\n expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64())\n for task in dataset.scan():\n assert isinstance(task, ds.ScanTask)\n for batch in task.execute():\n assert batch.column(0).equals(expected_i64)\n assert batch.column(1).equals(expected_f64)\n\n batches = dataset.to_batches()\n assert all(isinstance(batch, pa.RecordBatch) for batch in batches)\n\n table = dataset.to_table()\n assert isinstance(table, pa.Table)\n assert len(table) == 10\n\n condition = ds.field('i64') == 1\n result = dataset.to_table(use_threads=True, filter=condition).to_pydict()\n\n # don't rely on the scanning order\n assert result['i64'] == [1, 1]\n assert result['f64'] == [1., 1.]\n assert sorted(result['group']) == [1, 2]\n assert sorted(result['key']) == ['xxx', 'yyy']\n\n\ndef test_scanner(dataset):\n scanner = ds.Scanner.from_dataset(dataset,\n memory_pool=pa.default_memory_pool())\n assert isinstance(scanner, ds.Scanner)\n assert len(list(scanner.scan())) == 2\n\n with pytest.raises(pa.ArrowInvalid):\n ds.Scanner.from_dataset(dataset, columns=['unknown'])\n\n scanner = ds.Scanner.from_dataset(dataset, columns=['i64'],\n memory_pool=pa.default_memory_pool())\n\n assert isinstance(scanner, ds.Scanner)\n assert len(list(scanner.scan())) == 2\n for task in scanner.scan():\n for batch in task.execute():\n assert batch.num_columns == 1\n\n\ndef test_abstract_classes():\n classes = [\n ds.FileFormat,\n ds.Scanner,\n ds.Partitioning,\n ]\n for klass in classes:\n with pytest.raises(TypeError):\n klass()\n\n\ndef test_partitioning():\n schema = pa.schema([\n pa.field('i64', pa.int64()),\n pa.field('f64', pa.float64())\n ])\n for klass in [ds.DirectoryPartitioning, ds.HivePartitioning]:\n partitioning = klass(schema)\n assert isinstance(partitioning, ds.Partitioning)\n\n partitioning = ds.DirectoryPartitioning(\n pa.schema([\n pa.field('group', pa.int64()),\n pa.field('key', pa.float64())\n ])\n )\n expr = partitioning.parse('/3/3.14')\n assert isinstance(expr, ds.Expression)\n\n expected = (ds.field('group') == 3) & (ds.field('key') == 3.14)\n assert expr.equals(expected)\n\n with pytest.raises(pa.ArrowInvalid):\n partitioning.parse('/prefix/3/aaa')\n\n partitioning = ds.HivePartitioning(\n pa.schema([\n pa.field('alpha', pa.int64()),\n pa.field('beta', pa.int64())\n ])\n )\n expr = partitioning.parse('/alpha=0/beta=3')\n expected = (\n (ds.field('alpha') == ds.scalar(0)) &\n (ds.field('beta') == ds.scalar(3))\n )\n assert expr.equals(expected)\n\n for shouldfail in ['/alpha=one/beta=2', '/alpha=one', '/beta=two']:\n with pytest.raises(pa.ArrowInvalid):\n partitioning.parse(shouldfail)\n\n\ndef test_expression_serialization():\n a = ds.scalar(1)\n b = ds.scalar(1.1)\n c = ds.scalar(True)\n d = ds.scalar(\"string\")\n e = ds.scalar(None)\n f = ds.scalar({'a': 1})\n g = ds.scalar(pa.scalar(1))\n\n condition = ds.field('i64') > 5\n schema = pa.schema([\n pa.field('i64', pa.int64()),\n pa.field('f64', pa.float64())\n ])\n assert condition.validate(schema) == pa.bool_()\n\n assert condition.assume(ds.field('i64') == 5).equals(\n ds.scalar(False))\n\n assert condition.assume(ds.field('i64') == 7).equals(\n ds.scalar(True))\n\n all_exprs = [a, b, c, d, e, f, g, a == b, a > b, a & b, a | b, ~c,\n d.is_valid(), a.cast(pa.int32(), safe=False),\n a.cast(pa.int32(), safe=False), a.isin([1, 2, 3]),\n ds.field('i64') > 5, ds.field('i64') == 5,\n ds.field('i64') == 7]\n for expr in all_exprs:\n assert isinstance(expr, ds.Expression)\n restored = pickle.loads(pickle.dumps(expr))\n assert expr.equals(restored)\n\n\ndef test_expression_construction():\n zero = ds.scalar(0)\n one = ds.scalar(1)\n true = ds.scalar(True)\n false = ds.scalar(False)\n string = ds.scalar(\"string\")\n field = ds.field(\"field\")\n\n zero | one == string\n ~true == false\n for typ in (\"bool\", pa.bool_()):\n field.cast(typ) == true\n\n field.isin([1, 2])\n\n with pytest.raises(TypeError):\n field.isin(1)\n\n with pytest.raises(pa.ArrowInvalid):\n field != {1}\n\n\ndef test_parquet_read_options():\n opts1 = ds.ParquetReadOptions()\n opts2 = ds.ParquetReadOptions(buffer_size=4096,\n dictionary_columns=['a', 'b'])\n opts3 = ds.ParquetReadOptions(buffer_size=2**13, use_buffered_stream=True,\n dictionary_columns={'a', 'b'})\n\n assert opts1.use_buffered_stream is False\n assert opts1.buffer_size == 2**13\n assert opts1.dictionary_columns == set()\n\n assert opts2.use_buffered_stream is False\n assert opts2.buffer_size == 2**12\n assert opts2.dictionary_columns == {'a', 'b'}\n\n assert opts3.use_buffered_stream is True\n assert opts3.buffer_size == 2**13\n assert opts3.dictionary_columns == {'a', 'b'}\n\n assert opts1 == opts1\n assert opts1 != opts2\n assert opts2 != opts3\n\n\ndef test_file_format_pickling():\n formats = [\n ds.IpcFileFormat(),\n ds.CsvFileFormat(),\n ds.CsvFileFormat(pa.csv.ParseOptions(delimiter='\\t',\n ignore_empty_lines=True)),\n ds.ParquetFileFormat(),\n ds.ParquetFileFormat(\n read_options=ds.ParquetReadOptions(use_buffered_stream=True)\n ),\n ds.ParquetFileFormat(\n read_options={\n 'use_buffered_stream': True,\n 'buffer_size': 4096,\n }\n )\n ]\n for file_format in formats:\n assert pickle.loads(pickle.dumps(file_format)) == file_format\n\n\[email protected]('paths_or_selector', [\n fs.FileSelector('subdir', recursive=True),\n [\n 'subdir/1/xxx/file0.parquet',\n 'subdir/2/yyy/file1.parquet',\n ]\n])\ndef test_filesystem_factory(mockfs, paths_or_selector):\n format = ds.ParquetFileFormat(\n read_options=ds.ParquetReadOptions(dictionary_columns={\"str\"})\n )\n\n options = ds.FileSystemFactoryOptions('subdir')\n options.partitioning = ds.DirectoryPartitioning(\n pa.schema([\n pa.field('group', pa.int32()),\n pa.field('key', pa.string())\n ])\n )\n assert options.partition_base_dir == 'subdir'\n assert options.selector_ignore_prefixes == ['.', '_']\n assert options.exclude_invalid_files is False\n\n factory = ds.FileSystemDatasetFactory(\n mockfs, paths_or_selector, format, options\n )\n inspected_schema = factory.inspect()\n\n assert factory.inspect().equals(pa.schema([\n pa.field('i64', pa.int64()),\n pa.field('f64', pa.float64()),\n pa.field('str', pa.dictionary(pa.int32(), pa.string())),\n pa.field('const', pa.int64()),\n pa.field('group', pa.int32()),\n pa.field('key', pa.string()),\n ]), check_metadata=False)\n\n assert isinstance(factory.inspect_schemas(), list)\n assert isinstance(factory.finish(inspected_schema),\n ds.FileSystemDataset)\n assert factory.root_partition.equals(ds.scalar(True))\n\n dataset = factory.finish()\n assert isinstance(dataset, ds.FileSystemDataset)\n assert len(list(dataset.scan())) == 2\n\n scanner = ds.Scanner.from_dataset(dataset)\n expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64())\n expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64())\n expected_str = pa.DictionaryArray.from_arrays(\n pa.array([0, 1, 2, 3, 4], type=pa.int32()),\n pa.array(\"0 1 2 3 4\".split(), type=pa.string())\n )\n for task, group, key in zip(scanner.scan(), [1, 2], ['xxx', 'yyy']):\n expected_group = pa.array([group] * 5, type=pa.int32())\n expected_key = pa.array([key] * 5, type=pa.string())\n expected_const = pa.array([group - 1] * 5, type=pa.int64())\n for batch in task.execute():\n assert batch.num_columns == 6\n assert batch[0].equals(expected_i64)\n assert batch[1].equals(expected_f64)\n assert batch[2].equals(expected_str)\n assert batch[3].equals(expected_const)\n assert batch[4].equals(expected_group)\n assert batch[5].equals(expected_key)\n\n table = dataset.to_table()\n assert isinstance(table, pa.Table)\n assert len(table) == 10\n assert table.num_columns == 6\n\n\ndef test_make_fragment(multisourcefs):\n parquet_format = ds.ParquetFileFormat()\n dataset = ds.dataset('/plain', filesystem=multisourcefs,\n format=parquet_format)\n\n for path in dataset.files:\n fragment = parquet_format.make_fragment(path, multisourcefs)\n row_group_fragment = parquet_format.make_fragment(path, multisourcefs,\n row_groups=[0])\n for f in [fragment, row_group_fragment]:\n assert isinstance(f, ds.ParquetFileFragment)\n assert f.path == path\n assert isinstance(f.filesystem, type(multisourcefs))\n assert fragment.row_groups is None\n assert row_group_fragment.row_groups == [ds.RowGroupInfo(0)]\n\n\ndef _create_dataset_for_fragments(tempdir, chunk_size=None):\n import pyarrow.parquet as pq\n\n table = pa.table(\n [range(8), [1] * 8, ['a'] * 4 + ['b'] * 4],\n names=['f1', 'f2', 'part']\n )\n\n path = str(tempdir / \"test_parquet_dataset\")\n\n # write_to_dataset currently requires pandas\n pq.write_to_dataset(table, path,\n partition_cols=[\"part\"], chunk_size=chunk_size)\n\n return table, ds.dataset(path, format=\"parquet\", partitioning=\"hive\")\n\n\[email protected]\[email protected]\ndef test_fragments(tempdir):\n table, dataset = _create_dataset_for_fragments(tempdir)\n\n # list fragments\n fragments = list(dataset.get_fragments())\n assert len(fragments) == 2\n f = fragments[0]\n\n physical_names = ['f1', 'f2']\n # file's schema does not include partition column\n assert f.physical_schema.names == physical_names\n assert f.format.inspect(f.path, f.filesystem) == f.physical_schema\n assert f.partition_expression.equals(ds.field('part') == 'a')\n\n # By default, the partition column is not part of the schema.\n result = f.to_table()\n assert result.column_names == physical_names\n assert result.equals(table.remove_column(2).slice(0, 4))\n\n # scanning fragment includes partition columns when given the proper\n # schema.\n result = f.to_table(schema=dataset.schema)\n assert result.column_names == ['f1', 'f2', 'part']\n assert result.equals(table.slice(0, 4))\n assert f.physical_schema == result.schema.remove(2)\n\n # scanning fragments follow filter predicate\n result = f.to_table(schema=dataset.schema, filter=ds.field('f1') < 2)\n assert result.column_names == ['f1', 'f2', 'part']\n\n\[email protected]\[email protected]\ndef test_fragments_implicit_cast(tempdir):\n # ARROW-8693\n import pyarrow.parquet as pq\n\n table = pa.table([range(8), [1] * 4 + [2] * 4], names=['col', 'part'])\n path = str(tempdir / \"test_parquet_dataset\")\n pq.write_to_dataset(table, path, partition_cols=[\"part\"])\n\n part = ds.partitioning(pa.schema([('part', 'int8')]), flavor=\"hive\")\n dataset = ds.dataset(path, format=\"parquet\", partitioning=part)\n fragments = dataset.get_fragments(filter=ds.field(\"part\") >= 2)\n assert len(list(fragments)) == 1\n\n\[email protected]\[email protected]\ndef test_fragments_reconstruct(tempdir):\n table, dataset = _create_dataset_for_fragments(tempdir)\n\n def assert_yields_projected(fragment, row_slice,\n columns=None, filter=None):\n actual = fragment.to_table(\n schema=table.schema, columns=columns, filter=filter)\n column_names = columns if columns else table.column_names\n assert actual.column_names == column_names\n\n expected = table.slice(*row_slice).to_pandas()[[*column_names]]\n assert actual.equals(pa.Table.from_pandas(expected))\n\n fragment = list(dataset.get_fragments())[0]\n parquet_format = fragment.format\n\n # manually re-construct a fragment, with explicit schema\n new_fragment = parquet_format.make_fragment(\n fragment.path, fragment.filesystem,\n partition_expression=fragment.partition_expression)\n assert new_fragment.to_table().equals(fragment.to_table())\n assert_yields_projected(new_fragment, (0, 4))\n\n # filter / column projection, inspected schema\n new_fragment = parquet_format.make_fragment(\n fragment.path, fragment.filesystem,\n partition_expression=fragment.partition_expression)\n assert_yields_projected(new_fragment, (0, 2), filter=ds.field('f1') < 2)\n\n # filter requiring cast / column projection, inspected schema\n new_fragment = parquet_format.make_fragment(\n fragment.path, fragment.filesystem,\n partition_expression=fragment.partition_expression)\n assert_yields_projected(new_fragment, (0, 2),\n columns=['f1'], filter=ds.field('f1') < 2.0)\n\n # filter on the partition column\n new_fragment = parquet_format.make_fragment(\n fragment.path, fragment.filesystem,\n partition_expression=fragment.partition_expression)\n assert_yields_projected(new_fragment, (0, 4),\n filter=ds.field('part') == 'a')\n\n # Fragments don't contain the partition's columns if not provided to the\n # `to_table(schema=...)` method.\n with pytest.raises(ValueError, match=\"Field named 'part' not found\"):\n new_fragment = parquet_format.make_fragment(\n fragment.path, fragment.filesystem,\n partition_expression=fragment.partition_expression)\n new_fragment.to_table(filter=ds.field('part') == 'a')\n\n\[email protected]\[email protected]\ndef test_fragments_parquet_row_groups(tempdir):\n table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2)\n\n fragment = list(dataset.get_fragments())[0]\n\n # list and scan row group fragments\n row_group_fragments = list(fragment.split_by_row_group())\n assert len(row_group_fragments) == 2\n result = row_group_fragments[0].to_table(schema=dataset.schema)\n assert result.column_names == ['f1', 'f2', 'part']\n assert len(result) == 2\n assert result.equals(table.slice(0, 2))\n\n assert row_group_fragments[0].row_groups is not None\n assert row_group_fragments[0].row_groups[0].statistics == {\n 'f1': {'min': 0, 'max': 1},\n 'f2': {'min': 1, 'max': 1},\n }\n\n fragment = list(dataset.get_fragments(filter=ds.field('f1') < 1))[0]\n row_group_fragments = list(fragment.split_by_row_group(ds.field('f1') < 1))\n assert len(row_group_fragments) == 1\n result = row_group_fragments[0].to_table(filter=ds.field('f1') < 1)\n assert len(result) == 1\n\n\[email protected]\[email protected]\ndef test_fragments_parquet_row_groups_predicate(tempdir):\n table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2)\n\n fragment = list(dataset.get_fragments())[0]\n assert fragment.partition_expression.equals(ds.field('part') == 'a')\n\n # predicate may reference a partition field not present in the\n # physical_schema if an explicit schema is provided to split_by_row_group\n\n # filter matches partition_expression: all row groups\n row_group_fragments = list(\n fragment.split_by_row_group(ds.field('part') == 'a',\n schema=dataset.schema))\n assert len(row_group_fragments) == 2\n\n # filter contradicts partition_expression: no row groups\n row_group_fragments = list(\n fragment.split_by_row_group(ds.field('part') == 'b',\n schema=dataset.schema))\n assert len(row_group_fragments) == 0\n\n\[email protected]\[email protected]\ndef test_fragments_parquet_row_groups_reconstruct(tempdir):\n table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2)\n\n fragment = list(dataset.get_fragments())[0]\n parquet_format = fragment.format\n row_group_fragments = list(fragment.split_by_row_group())\n\n # manually re-construct row group fragments\n new_fragment = parquet_format.make_fragment(\n fragment.path, fragment.filesystem,\n partition_expression=fragment.partition_expression,\n row_groups=[0])\n result = new_fragment.to_table()\n assert result.equals(row_group_fragments[0].to_table())\n\n # manually re-construct a row group fragment with filter/column projection\n new_fragment = parquet_format.make_fragment(\n fragment.path, fragment.filesystem,\n partition_expression=fragment.partition_expression,\n row_groups={1})\n result = new_fragment.to_table(schema=table.schema, columns=['f1', 'part'],\n filter=ds.field('f1') < 3, )\n assert result.column_names == ['f1', 'part']\n assert len(result) == 1\n\n # out of bounds row group index\n new_fragment = parquet_format.make_fragment(\n fragment.path, fragment.filesystem,\n partition_expression=fragment.partition_expression,\n row_groups={2})\n with pytest.raises(IndexError, match=\"Trying to scan row group 2\"):\n new_fragment.to_table()\n\n\ndef test_partitioning_factory(mockfs):\n paths_or_selector = fs.FileSelector('subdir', recursive=True)\n format = ds.ParquetFileFormat()\n\n options = ds.FileSystemFactoryOptions('subdir')\n partitioning_factory = ds.DirectoryPartitioning.discover(['group', 'key'])\n assert isinstance(partitioning_factory, ds.PartitioningFactory)\n options.partitioning_factory = partitioning_factory\n\n factory = ds.FileSystemDatasetFactory(\n mockfs, paths_or_selector, format, options\n )\n inspected_schema = factory.inspect()\n # i64/f64 from data, group/key from \"/1/xxx\" and \"/2/yyy\" paths\n expected_schema = pa.schema([\n (\"i64\", pa.int64()),\n (\"f64\", pa.float64()),\n (\"str\", pa.string()),\n (\"const\", pa.int64()),\n (\"group\", pa.int32()),\n (\"key\", pa.string()),\n ])\n assert inspected_schema.equals(expected_schema)\n\n hive_partitioning_factory = ds.HivePartitioning.discover()\n assert isinstance(hive_partitioning_factory, ds.PartitioningFactory)\n\n\ndef test_partitioning_factory_dictionary(mockfs):\n paths_or_selector = fs.FileSelector('subdir', recursive=True)\n format = ds.ParquetFileFormat()\n options = ds.FileSystemFactoryOptions('subdir')\n\n max_size_to_inferred_type = {\n 0: pa.string(),\n 1: pa.string(),\n 2: pa.dictionary(pa.int32(), pa.string()),\n 64: pa.dictionary(pa.int32(), pa.string()),\n None: pa.dictionary(pa.int32(), pa.string()),\n }\n\n for max_size, expected_type in max_size_to_inferred_type.items():\n options.partitioning_factory = ds.DirectoryPartitioning.discover(\n ['group', 'key'],\n max_partition_dictionary_size=max_size)\n\n factory = ds.FileSystemDatasetFactory(\n mockfs, paths_or_selector, format, options)\n\n inferred_schema = factory.inspect()\n assert inferred_schema.field('key').type == expected_type\n\n if expected_type == pa.string():\n continue\n\n table = factory.finish().to_table().combine_chunks()\n actual = table.column('key').chunk(0)\n expected = pa.array(['xxx'] * 5 + ['yyy'] * 5).dictionary_encode()\n assert actual.equals(expected)\n\n\ndef test_partitioning_function():\n schema = pa.schema([(\"year\", pa.int16()), (\"month\", pa.int8())])\n names = [\"year\", \"month\"]\n\n # default DirectoryPartitioning\n part = ds.partitioning(schema)\n assert isinstance(part, ds.DirectoryPartitioning)\n part = ds.partitioning(field_names=names)\n assert isinstance(part, ds.PartitioningFactory)\n # needs schema or list of names\n with pytest.raises(ValueError):\n ds.partitioning()\n with pytest.raises(ValueError, match=\"Expected list\"):\n ds.partitioning(field_names=schema)\n with pytest.raises(ValueError, match=\"Cannot specify both\"):\n ds.partitioning(schema, field_names=schema)\n\n # Hive partitioning\n part = ds.partitioning(schema, flavor=\"hive\")\n assert isinstance(part, ds.HivePartitioning)\n part = ds.partitioning(flavor=\"hive\")\n assert isinstance(part, ds.PartitioningFactory)\n # cannot pass list of names\n with pytest.raises(ValueError):\n ds.partitioning(names, flavor=\"hive\")\n with pytest.raises(ValueError, match=\"Cannot specify 'field_names'\"):\n ds.partitioning(field_names=names, flavor=\"hive\")\n\n # unsupported flavor\n with pytest.raises(ValueError):\n ds.partitioning(schema, flavor=\"unsupported\")\n\n\ndef _create_single_file(base_dir, table=None, row_group_size=None):\n import pyarrow.parquet as pq\n if table is None:\n table = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5})\n path = base_dir / \"test.parquet\"\n pq.write_table(table, path, row_group_size=row_group_size)\n return table, path\n\n\ndef _create_directory_of_files(base_dir):\n import pyarrow.parquet as pq\n table1 = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5})\n path1 = base_dir / \"test1.parquet\"\n pq.write_table(table1, path1)\n table2 = pa.table({'a': range(9, 18), 'b': [0.] * 4 + [1.] * 5})\n path2 = base_dir / \"test2.parquet\"\n pq.write_table(table2, path2)\n return (table1, table2), (path1, path2)\n\n\ndef _check_dataset(dataset, table):\n assert dataset.schema.equals(table.schema)\n assert dataset.to_table().equals(table)\n\n\ndef _check_dataset_from_path(path, table, **kwargs):\n # pathlib object\n assert isinstance(path, pathlib.Path)\n\n # accept Path, str, List[Path], List[str]\n for p in [path, str(path), [path], [str(path)]]:\n dataset = ds.dataset(path, **kwargs)\n assert isinstance(dataset, ds.FileSystemDataset)\n _check_dataset(dataset, table)\n\n # relative string path\n with change_cwd(path.parent):\n dataset = ds.dataset(path.name, **kwargs)\n assert isinstance(dataset, ds.FileSystemDataset)\n _check_dataset(dataset, table)\n\n\[email protected]\ndef test_open_dataset_single_file(tempdir):\n table, path = _create_single_file(tempdir)\n _check_dataset_from_path(path, table)\n\n\[email protected]\ndef test_deterministic_row_order(tempdir):\n # ARROW-8447 Ensure that dataset.to_table (and Scanner::ToTable) returns a\n # deterministic row ordering. This is achieved by constructing a single\n # parquet file with one row per RowGroup.\n table, path = _create_single_file(tempdir, row_group_size=1)\n _check_dataset_from_path(path, table)\n\n\[email protected]\ndef test_open_dataset_directory(tempdir):\n tables, _ = _create_directory_of_files(tempdir)\n table = pa.concat_tables(tables)\n _check_dataset_from_path(tempdir, table)\n\n\[email protected]\ndef test_open_dataset_list_of_files(tempdir):\n tables, (path1, path2) = _create_directory_of_files(tempdir)\n table = pa.concat_tables(tables)\n\n datasets = [\n ds.dataset([path1, path2]),\n ds.dataset([str(path1), str(path2)])\n ]\n for dataset in datasets:\n assert dataset.schema.equals(table.schema)\n result = dataset.to_table()\n assert result.equals(table)\n\n\ndef test_construct_from_single_file(tempdir):\n directory = tempdir / 'single-file'\n directory.mkdir()\n table, path = _create_single_file(directory)\n relative_path = path.relative_to(directory)\n\n # instantiate from a single file\n d1 = ds.dataset(path)\n # instantiate from a single file with a filesystem object\n d2 = ds.dataset(path, filesystem=fs.LocalFileSystem())\n # instantiate from a single file with prefixed filesystem URI\n d3 = ds.dataset(relative_path, filesystem=_filesystem_uri(directory))\n assert d1.to_table() == d2.to_table() == d3.to_table()\n\n\ndef test_construct_from_single_directory(tempdir):\n directory = tempdir / 'single-directory'\n directory.mkdir()\n tables, paths = _create_directory_of_files(directory)\n\n d1 = ds.dataset(directory)\n d2 = ds.dataset(directory, filesystem=fs.LocalFileSystem())\n d3 = ds.dataset(directory.name, filesystem=_filesystem_uri(tempdir))\n t1 = d1.to_table()\n t2 = d2.to_table()\n t3 = d3.to_table()\n assert t1 == t2 == t3\n\n\ndef test_construct_from_list_of_files(tempdir):\n # instantiate from a list of files\n directory = tempdir / 'list-of-files'\n directory.mkdir()\n tables, paths = _create_directory_of_files(directory)\n\n relative_paths = [p.relative_to(tempdir) for p in paths]\n with change_cwd(tempdir):\n d1 = ds.dataset(relative_paths)\n t1 = d1.to_table()\n assert len(t1) == sum(map(len, tables))\n\n d2 = ds.dataset(relative_paths, filesystem=_filesystem_uri(tempdir))\n t2 = d2.to_table()\n d3 = ds.dataset(paths)\n t3 = d3.to_table()\n d4 = ds.dataset(paths, filesystem=fs.LocalFileSystem())\n t4 = d4.to_table()\n\n assert t1 == t2 == t3 == t4\n\n\ndef test_construct_from_list_of_mixed_paths_fails(mockfs):\n # isntantiate from a list of mixed paths\n files = [\n 'subdir/1/xxx/file0.parquet',\n 'subdir/1/xxx/doesnt-exist.parquet',\n ]\n with pytest.raises(FileNotFoundError, match='doesnt-exist'):\n ds.dataset(files, filesystem=mockfs)\n\n\ndef test_construct_from_mixed_child_datasets(mockfs):\n # isntantiate from a list of mixed paths\n dataset = ds.dataset([\n ds.dataset(['subdir/1/xxx/file0.parquet',\n 'subdir/2/yyy/file1.parquet'], filesystem=mockfs),\n ds.dataset('subdir', filesystem=mockfs)\n ])\n assert isinstance(dataset, ds.UnionDataset)\n assert len(list(dataset.get_fragments())) == 4\n\n table = dataset.to_table()\n assert len(table) == 20\n assert table.num_columns == 4\n\n\ndef test_construct_empty_dataset():\n empty = ds.dataset([])\n table = empty.to_table()\n assert table.num_rows == 0\n assert table.num_columns == 0\n\n empty = ds.dataset([], schema=pa.schema([\n ('a', pa.int64()),\n ('a', pa.string())\n ]))\n table = empty.to_table()\n assert table.num_rows == 0\n assert table.num_columns == 2\n\n\ndef test_construct_from_invalid_sources_raise(multisourcefs):\n child1 = ds.FileSystemDatasetFactory(\n multisourcefs,\n fs.FileSelector('/plain'),\n format=ds.ParquetFileFormat()\n )\n child2 = ds.FileSystemDatasetFactory(\n multisourcefs,\n fs.FileSelector('/schema'),\n format=ds.ParquetFileFormat()\n )\n\n with pytest.raises(TypeError, match='Expected.*FileSystemDatasetFactory'):\n ds.dataset([child1, child2])\n\n expected = (\n \"Expected a list of path-like or dataset objects. The given list \"\n \"contains the following types: int\"\n )\n with pytest.raises(TypeError, match=expected):\n ds.dataset([1, 2, 3])\n\n expected = (\n \"Expected a path-like, list of path-likes or a list of Datasets \"\n \"instead of the given type: NoneType\"\n )\n with pytest.raises(TypeError, match=expected):\n ds.dataset(None)\n\n\[email protected]\ndef test_open_dataset_partitioned_directory(tempdir):\n import pyarrow.parquet as pq\n table = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5})\n\n path = tempdir / \"dataset\"\n path.mkdir()\n\n for part in range(3):\n part = path / \"part={}\".format(part)\n part.mkdir()\n pq.write_table(table, part / \"test.parquet\")\n\n # no partitioning specified, just read all individual files\n full_table = pa.concat_tables([table] * 3)\n _check_dataset_from_path(path, full_table)\n\n # specify partition scheme with discovery\n dataset = ds.dataset(\n str(path), partitioning=ds.partitioning(flavor=\"hive\"))\n expected_schema = table.schema.append(pa.field(\"part\", pa.int32()))\n assert dataset.schema.equals(expected_schema)\n\n # specify partition scheme with discovery and relative path\n with change_cwd(tempdir):\n dataset = ds.dataset(\n \"dataset/\", partitioning=ds.partitioning(flavor=\"hive\"))\n expected_schema = table.schema.append(pa.field(\"part\", pa.int32()))\n assert dataset.schema.equals(expected_schema)\n\n # specify partition scheme with string short-cut\n dataset = ds.dataset(str(path), partitioning=\"hive\")\n assert dataset.schema.equals(expected_schema)\n\n # specify partition scheme with explicit scheme\n dataset = ds.dataset(\n str(path),\n partitioning=ds.partitioning(\n pa.schema([(\"part\", pa.int8())]), flavor=\"hive\"))\n expected_schema = table.schema.append(pa.field(\"part\", pa.int8()))\n assert dataset.schema.equals(expected_schema)\n\n result = dataset.to_table()\n expected = full_table.append_column(\n \"part\", pa.array(np.repeat([0, 1, 2], 9), type=pa.int8()))\n assert result.equals(expected)\n\n\[email protected]\ndef test_open_dataset_filesystem(tempdir):\n # single file\n table, path = _create_single_file(tempdir)\n\n # filesystem inferred from path\n dataset1 = ds.dataset(str(path))\n assert dataset1.schema.equals(table.schema)\n\n # filesystem specified\n dataset2 = ds.dataset(str(path), filesystem=fs.LocalFileSystem())\n assert dataset2.schema.equals(table.schema)\n\n # local filesystem specified with relative path\n with change_cwd(tempdir):\n dataset3 = ds.dataset(\"test.parquet\", filesystem=fs.LocalFileSystem())\n assert dataset3.schema.equals(table.schema)\n\n # passing different filesystem\n with pytest.raises(FileNotFoundError):\n ds.dataset(str(path), filesystem=fs._MockFileSystem())\n\n\[email protected]\ndef test_open_dataset_unsupported_format(tempdir):\n _, path = _create_single_file(tempdir)\n with pytest.raises(ValueError, match=\"format 'blabla' is not supported\"):\n ds.dataset([path], format=\"blabla\")\n\n\[email protected]\ndef test_open_union_dataset(tempdir):\n _, path = _create_single_file(tempdir)\n dataset = ds.dataset(path)\n\n union = ds.dataset([dataset, dataset])\n assert isinstance(union, ds.UnionDataset)\n\n\ndef test_open_union_dataset_with_additional_kwargs(multisourcefs):\n child = ds.dataset('/plain', filesystem=multisourcefs, format='parquet')\n with pytest.raises(ValueError, match=\"cannot pass any additional\"):\n ds.dataset([child], format=\"parquet\")\n\n\ndef test_open_dataset_non_existing_file():\n # ARROW-8213: Opening a dataset with a local incorrect path gives confusing\n # error message\n with pytest.raises(FileNotFoundError):\n ds.dataset('i-am-not-existing.parquet', format='parquet')\n\n with pytest.raises(pa.ArrowInvalid, match='cannot be relative'):\n ds.dataset('file:i-am-not-existing.parquet', format='parquet')\n\n\[email protected]\ndef s3_example_simple(s3_connection, s3_server):\n from pyarrow.fs import FileSystem\n import pyarrow.parquet as pq\n\n host, port, access_key, secret_key = s3_connection\n uri = (\n \"s3://{}:{}@mybucket/data.parquet?scheme=http&endpoint_override={}:{}\"\n .format(access_key, secret_key, host, port)\n )\n\n fs, path = FileSystem.from_uri(uri)\n\n fs.create_dir(\"mybucket\")\n table = pa.table({'a': [1, 2, 3]})\n with fs.open_output_stream(\"mybucket/data.parquet\") as out:\n pq.write_table(table, out)\n\n return table, path, fs, uri, host, port, access_key, secret_key\n\n\[email protected]\[email protected]\ndef test_open_dataset_from_uri_s3(s3_example_simple):\n # open dataset from non-localfs string path\n table, path, fs, uri, _, _, _, _ = s3_example_simple\n\n # full string URI\n dataset = ds.dataset(uri, format=\"parquet\")\n assert dataset.to_table().equals(table)\n\n # passing filesystem object\n dataset = ds.dataset(path, format=\"parquet\", filesystem=fs)\n assert dataset.to_table().equals(table)\n\n\[email protected]\[email protected] # still needed to create the data\ndef test_open_dataset_from_uri_s3_fsspec(s3_example_simple):\n table, path, _, _, host, port, access_key, secret_key = s3_example_simple\n s3fs = pytest.importorskip(\"s3fs\")\n\n from pyarrow.fs import PyFileSystem, FSSpecHandler\n\n fs = s3fs.S3FileSystem(\n key=access_key,\n secret=secret_key,\n client_kwargs={\n 'endpoint_url': 'http://{}:{}'.format(host, port)\n }\n )\n fs = PyFileSystem(FSSpecHandler(fs))\n\n dataset = ds.dataset(path, format=\"parquet\", filesystem=fs)\n assert dataset.to_table().equals(table)\n\n\[email protected]\[email protected]\ndef test_open_dataset_from_s3_with_filesystem_uri(s3_connection, s3_server):\n from pyarrow.fs import FileSystem\n import pyarrow.parquet as pq\n\n host, port, access_key, secret_key = s3_connection\n bucket = 'theirbucket'\n path = 'nested/folder/data.parquet'\n uri = \"s3://{}:{}@{}/{}?scheme=http&endpoint_override={}:{}\".format(\n access_key, secret_key, bucket, path, host, port\n )\n\n fs, path = FileSystem.from_uri(uri)\n assert path == 'theirbucket/nested/folder/data.parquet'\n\n fs.create_dir(bucket)\n\n table = pa.table({'a': [1, 2, 3]})\n with fs.open_output_stream(path) as out:\n pq.write_table(table, out)\n\n # full string URI\n dataset = ds.dataset(uri, format=\"parquet\")\n assert dataset.to_table().equals(table)\n\n # passing filesystem as an uri\n template = (\n \"s3://{}:{}@{{}}?scheme=http&endpoint_override={}:{}\".format(\n access_key, secret_key, host, port\n )\n )\n cases = [\n ('theirbucket/nested/folder/', '/data.parquet'),\n ('theirbucket/nested/folder', 'data.parquet'),\n ('theirbucket/nested/', 'folder/data.parquet'),\n ('theirbucket/nested', 'folder/data.parquet'),\n ('theirbucket', '/nested/folder/data.parquet'),\n ('theirbucket', 'nested/folder/data.parquet'),\n ]\n for prefix, path in cases:\n uri = template.format(prefix)\n dataset = ds.dataset(path, filesystem=uri, format=\"parquet\")\n assert dataset.to_table().equals(table)\n\n with pytest.raises(pa.ArrowInvalid, match='Missing bucket name'):\n uri = template.format('/')\n ds.dataset('/theirbucket/nested/folder/data.parquet', filesystem=uri)\n\n error = (\n \"The path component of the filesystem URI must point to a directory \"\n \"but it has a type: `{}`. The path component is `{}` and the given \"\n \"filesystem URI is `{}`\"\n )\n\n path = 'theirbucket/doesnt/exist'\n uri = template.format(path)\n with pytest.raises(ValueError) as exc:\n ds.dataset('data.parquet', filesystem=uri)\n assert str(exc.value) == error.format('NotFound', path, uri)\n\n path = 'theirbucket/nested/folder/data.parquet'\n uri = template.format(path)\n with pytest.raises(ValueError) as exc:\n ds.dataset('data.parquet', filesystem=uri)\n assert str(exc.value) == error.format('File', path, uri)\n\n\[email protected]\ndef test_filter_implicit_cast(tempdir):\n # ARROW-7652\n table = pa.table({'a': pa.array([0, 1, 2, 3, 4, 5], type=pa.int8())})\n _, path = _create_single_file(tempdir, table)\n dataset = ds.dataset(str(path))\n\n filter_ = ds.field('a') > 2\n assert len(dataset.to_table(filter=filter_)) == 3\n\n\ndef test_dataset_union(multisourcefs):\n child = ds.FileSystemDatasetFactory(\n multisourcefs, fs.FileSelector('/plain'),\n format=ds.ParquetFileFormat()\n )\n factory = ds.UnionDatasetFactory([child])\n\n # TODO(bkietz) reintroduce factory.children property\n assert len(factory.inspect_schemas()) == 1\n assert all(isinstance(s, pa.Schema) for s in factory.inspect_schemas())\n assert factory.inspect_schemas()[0].equals(child.inspect())\n assert factory.inspect().equals(child.inspect())\n assert isinstance(factory.finish(), ds.Dataset)\n\n\ndef test_union_dataset_from_other_datasets(tempdir, multisourcefs):\n child1 = ds.dataset('/plain', filesystem=multisourcefs, format='parquet')\n child2 = ds.dataset('/schema', filesystem=multisourcefs, format='parquet',\n partitioning=['week', 'color'])\n child3 = ds.dataset('/hive', filesystem=multisourcefs, format='parquet',\n partitioning='hive')\n\n assert child1.schema != child2.schema != child3.schema\n\n assembled = ds.dataset([child1, child2, child3])\n assert isinstance(assembled, ds.UnionDataset)\n\n msg = 'cannot pass any additional arguments'\n with pytest.raises(ValueError, match=msg):\n ds.dataset([child1, child2], filesystem=multisourcefs)\n\n expected_schema = pa.schema([\n ('date', pa.date32()),\n ('index', pa.int64()),\n ('value', pa.float64()),\n ('color', pa.string()),\n ('week', pa.int32()),\n ('year', pa.int32()),\n ('month', pa.int32()),\n ])\n assert assembled.schema.equals(expected_schema)\n assert assembled.to_table().schema.equals(expected_schema)\n\n assembled = ds.dataset([child1, child3])\n expected_schema = pa.schema([\n ('date', pa.date32()),\n ('index', pa.int64()),\n ('value', pa.float64()),\n ('color', pa.string()),\n ('year', pa.int32()),\n ('month', pa.int32()),\n ])\n assert assembled.schema.equals(expected_schema)\n assert assembled.to_table().schema.equals(expected_schema)\n\n expected_schema = pa.schema([\n ('month', pa.int32()),\n ('color', pa.string()),\n ('date', pa.date32()),\n ])\n assembled = ds.dataset([child1, child3], schema=expected_schema)\n assert assembled.to_table().schema.equals(expected_schema)\n\n expected_schema = pa.schema([\n ('month', pa.int32()),\n ('color', pa.string()),\n ('unknown', pa.string()) # fill with nulls\n ])\n assembled = ds.dataset([child1, child3], schema=expected_schema)\n assert assembled.to_table().schema.equals(expected_schema)\n\n # incompatible schemas, date and index columns have conflicting types\n table = pa.table([range(9), [0.] * 4 + [1.] * 5, 'abcdefghj'],\n names=['date', 'value', 'index'])\n _, path = _create_single_file(tempdir, table=table)\n child4 = ds.dataset(path)\n\n with pytest.raises(pa.ArrowInvalid, match='Unable to merge'):\n ds.dataset([child1, child4])\n\n\ndef test_dataset_from_a_list_of_local_directories_raises(multisourcefs):\n msg = 'points to a directory, but only file paths are supported'\n with pytest.raises(IsADirectoryError, match=msg):\n ds.dataset(['/plain', '/schema', '/hive'], filesystem=multisourcefs)\n\n\ndef test_union_dataset_filesystem_datasets(multisourcefs):\n # without partitioning\n dataset = ds.dataset([\n ds.dataset('/plain', filesystem=multisourcefs),\n ds.dataset('/schema', filesystem=multisourcefs),\n ds.dataset('/hive', filesystem=multisourcefs),\n ])\n expected_schema = pa.schema([\n ('date', pa.date32()),\n ('index', pa.int64()),\n ('value', pa.float64()),\n ('color', pa.string()),\n ])\n assert dataset.schema.equals(expected_schema)\n\n # with hive partitioning for two hive sources\n dataset = ds.dataset([\n ds.dataset('/plain', filesystem=multisourcefs),\n ds.dataset('/schema', filesystem=multisourcefs),\n ds.dataset('/hive', filesystem=multisourcefs, partitioning='hive')\n ])\n expected_schema = pa.schema([\n ('date', pa.date32()),\n ('index', pa.int64()),\n ('value', pa.float64()),\n ('color', pa.string()),\n ('year', pa.int32()),\n ('month', pa.int32()),\n ])\n assert dataset.schema.equals(expected_schema)\n\n\[email protected]\ndef test_specified_schema(tempdir):\n import pyarrow.parquet as pq\n\n table = pa.table({'a': [1, 2, 3], 'b': [.1, .2, .3]})\n pq.write_table(table, tempdir / \"data.parquet\")\n\n def _check_dataset(schema, expected, expected_schema=None):\n dataset = ds.dataset(str(tempdir / \"data.parquet\"), schema=schema)\n if expected_schema is not None:\n assert dataset.schema.equals(expected_schema)\n else:\n assert dataset.schema.equals(schema)\n result = dataset.to_table()\n assert result.equals(expected)\n\n # no schema specified\n schema = None\n expected = table\n _check_dataset(schema, expected, expected_schema=table.schema)\n\n # identical schema specified\n schema = table.schema\n expected = table\n _check_dataset(schema, expected)\n\n # Specifying schema with change column order\n schema = pa.schema([('b', 'float64'), ('a', 'int64')])\n expected = pa.table([[.1, .2, .3], [1, 2, 3]], names=['b', 'a'])\n _check_dataset(schema, expected)\n\n # Specifying schema with missing column\n schema = pa.schema([('a', 'int64')])\n expected = pa.table([[1, 2, 3]], names=['a'])\n _check_dataset(schema, expected)\n\n # Specifying schema with additional column\n schema = pa.schema([('a', 'int64'), ('c', 'int32')])\n expected = pa.table([[1, 2, 3],\n pa.array([None, None, None], type='int32')],\n names=['a', 'c'])\n _check_dataset(schema, expected)\n\n # Specifying with incompatible schema\n schema = pa.schema([('a', 'int32'), ('b', 'float64')])\n dataset = ds.dataset(str(tempdir / \"data.parquet\"), schema=schema)\n assert dataset.schema.equals(schema)\n with pytest.raises(TypeError):\n dataset.to_table()\n\n\ndef test_ipc_format(tempdir):\n table = pa.table({'a': pa.array([1, 2, 3], type=\"int8\"),\n 'b': pa.array([.1, .2, .3], type=\"float64\")})\n\n path = str(tempdir / 'test.arrow')\n with pa.output_stream(path) as sink:\n writer = pa.RecordBatchFileWriter(sink, table.schema)\n writer.write_batch(table.to_batches()[0])\n writer.close()\n\n dataset = ds.dataset(path, format=ds.IpcFileFormat())\n result = dataset.to_table()\n assert result.equals(table)\n\n for format_str in [\"ipc\", \"arrow\"]:\n dataset = ds.dataset(path, format=format_str)\n result = dataset.to_table()\n assert result.equals(table)\n\n\[email protected]\ndef test_csv_format(tempdir):\n table = pa.table({'a': pa.array([1, 2, 3], type=\"int64\"),\n 'b': pa.array([.1, .2, .3], type=\"float64\")})\n\n path = str(tempdir / 'test.csv')\n table.to_pandas().to_csv(path, index=False)\n\n dataset = ds.dataset(path, format=ds.CsvFileFormat())\n result = dataset.to_table()\n assert result.equals(table)\n\n dataset = ds.dataset(path, format='csv')\n result = dataset.to_table()\n assert result.equals(table)\n\n\ndef test_feather_format(tempdir):\n from pyarrow.feather import write_feather\n\n table = pa.table({'a': pa.array([1, 2, 3], type=\"int8\"),\n 'b': pa.array([.1, .2, .3], type=\"float64\")})\n\n basedir = tempdir / \"feather_dataset\"\n basedir.mkdir()\n write_feather(table, str(basedir / \"data.feather\"))\n\n dataset = ds.dataset(basedir, format=ds.IpcFileFormat())\n result = dataset.to_table()\n assert result.equals(table)\n\n dataset = ds.dataset(basedir, format=\"feather\")\n result = dataset.to_table()\n assert result.equals(table)\n\n # ARROW-8641 - column selection order\n result = dataset.to_table(columns=[\"b\", \"a\"])\n assert result.column_names == [\"b\", \"a\"]\n result = dataset.to_table(columns=[\"a\", \"a\"])\n assert result.column_names == [\"a\", \"a\"]\n\n # error with Feather v1 files\n write_feather(table, str(basedir / \"data1.feather\"), version=1)\n with pytest.raises(ValueError):\n ds.dataset(basedir, format=\"feather\").to_table()\n\n\ndef _create_parquet_dataset_simple(root_path):\n import pyarrow.parquet as pq\n\n metadata_collector = []\n\n for i in range(4):\n table = pa.table({'f1': [i] * 10, 'f2': np.random.randn(10)})\n pq.write_to_dataset(\n table, str(root_path), metadata_collector=metadata_collector\n )\n\n metadata_path = str(root_path / '_metadata')\n # write _metadata file\n pq.write_metadata(\n table.schema, metadata_path,\n metadata_collector=metadata_collector\n )\n return metadata_path, table\n\n\[email protected]\[email protected] # write_to_dataset currently requires pandas\ndef test_parquet_dataset_factory(tempdir):\n root_path = tempdir / \"test_parquet_dataset\"\n metadata_path, table = _create_parquet_dataset_simple(root_path)\n dataset = ds.parquet_dataset(metadata_path)\n assert dataset.schema.equals(table.schema)\n assert len(dataset.files) == 4\n result = dataset.to_table()\n assert result.num_rows == 40\n\n\[email protected]\[email protected]\ndef test_parquet_dataset_factory_invalid(tempdir):\n root_path = tempdir / \"test_parquet_dataset_invalid\"\n metadata_path, table = _create_parquet_dataset_simple(root_path)\n # remove one of the files\n list(root_path.glob(\"*.parquet\"))[0].unlink()\n dataset = ds.parquet_dataset(metadata_path)\n assert dataset.schema.equals(table.schema)\n assert len(dataset.files) == 4\n with pytest.raises(FileNotFoundError):\n dataset.to_table()\n\n\ndef _create_metadata_file(root_path):\n # create _metadata file from existing parquet dataset\n import pyarrow.parquet as pq\n\n parquet_paths = list(sorted(root_path.rglob(\"*.parquet\")))\n schema = pq.ParquetFile(parquet_paths[0]).schema.to_arrow_schema()\n\n metadata_collector = []\n for path in parquet_paths:\n metadata = pq.ParquetFile(path).metadata\n metadata.set_file_path(str(path.relative_to(root_path)))\n metadata_collector.append(metadata)\n\n metadata_path = root_path / \"_metadata\"\n pq.write_metadata(\n schema, metadata_path, metadata_collector=metadata_collector\n )\n return metadata_path\n\n\ndef _create_parquet_dataset_partitioned(root_path):\n import pyarrow.parquet as pq\n\n table = pa.table([\n pa.array(range(20)), pa.array(np.random.randn(20)),\n pa.array(np.repeat(['a', 'b'], 10))],\n names=[\"f1\", \"f2\", \"part\"]\n )\n pq.write_to_dataset(table, str(root_path), partition_cols=['part'])\n return _create_metadata_file(root_path), table\n\n\[email protected]\[email protected]\ndef test_parquet_dataset_factory_partitioned(tempdir):\n root_path = tempdir / \"test_parquet_dataset_factory_partitioned\"\n metadata_path, table = _create_parquet_dataset_partitioned(root_path)\n\n partitioning = ds.partitioning(flavor=\"hive\")\n dataset = ds.parquet_dataset(metadata_path, partitioning=partitioning)\n\n assert dataset.schema.equals(table.schema)\n assert len(dataset.files) == 2\n result = dataset.to_table()\n assert result.num_rows == 20\n\n # the partitioned dataset does not preserve order\n result = result.to_pandas().sort_values(\"f1\").reset_index(drop=True)\n expected = table.to_pandas()\n pd.testing.assert_frame_equal(result, expected)\n\n\[email protected]\[email protected]\ndef test_parquet_dataset_lazy_filtering(tempdir, open_logging_fs):\n fs, assert_opens = open_logging_fs\n\n # Test to ensure that no IO happens when filtering a dataset\n # created with ParquetDatasetFactory from a _metadata file\n\n root_path = tempdir / \"test_parquet_dataset_lazy_filtering\"\n metadata_path, _ = _create_parquet_dataset_simple(root_path)\n\n # creating the dataset should only open the metadata file\n with assert_opens([metadata_path]):\n dataset = ds.parquet_dataset(\n metadata_path,\n partitioning=ds.partitioning(flavor=\"hive\"),\n filesystem=fs)\n\n # materializing fragments should not open any file\n with assert_opens([]):\n fragments = list(dataset.get_fragments())\n\n # filtering fragments should not open any file\n with assert_opens([]):\n list(dataset.get_fragments(ds.field(\"f1\") > 15))\n\n # splitting by row group should still not open any file\n with assert_opens([]):\n fragments[0].split_by_row_group(ds.field(\"f1\") > 15)\n\n # FIXME(bkietz) on Windows this results in FileNotFoundErrors.\n # but actually scanning does open files\n # with assert_opens([f.path for f in fragments]):\n # dataset.to_table()\n\n\[email protected]\[email protected]\ndef test_dataset_schema_metadata(tempdir):\n # ARROW-8802\n df = pd.DataFrame({'a': [1, 2, 3]})\n path = tempdir / \"test.parquet\"\n df.to_parquet(path)\n dataset = ds.dataset(path)\n\n schema = dataset.to_table().schema\n projected_schema = dataset.to_table(columns=[\"a\"]).schema\n\n # ensure the pandas metadata is included in the schema\n assert b\"pandas\" in schema.metadata\n # ensure it is still there in a projected schema (with column selection)\n assert schema.equals(projected_schema, check_metadata=True)\n\n\[email protected]\ndef test_filter_mismatching_schema(tempdir):\n # ARROW-9146\n import pyarrow.parquet as pq\n\n table = pa.table({\"col\": pa.array([1, 2, 3, 4], type='int32')})\n pq.write_table(table, str(tempdir / \"data.parquet\"))\n\n # specifying explicit schema, but that mismatches the schema of the data\n schema = pa.schema([(\"col\", pa.int64())])\n dataset = ds.dataset(\n tempdir / \"data.parquet\", format=\"parquet\", schema=schema)\n\n # filtering on a column with such type mismatch should give a proper error\n with pytest.raises(TypeError):\n dataset.to_table(filter=ds.field(\"col\") > 2)\n\n fragment = list(dataset.get_fragments())[0]\n with pytest.raises(TypeError):\n fragment.to_table(filter=ds.field(\"col\") > 2, schema=schema)\n" ]
[ [ "pandas.DataFrame", "numpy.random.randn", "pandas.testing.assert_frame_equal", "numpy.repeat", "numpy.array_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
FooBandit89/PyKot
[ "892831e757b6a51153d077e5faaab5a411f1ff3d" ]
[ "PyKot.py" ]
[ "\"\"\"\nCopyright (c) 2021, Russell Wallace Butler\nAll rights reserved.\n\nThis source code is licensed under the BSD-style license found in the\nLICENSE file in the root directory of this source tree.\n\"\"\"\n\nimport traceback as tb\nfrom dataclasses import dataclass\nimport re as re\nimport numpy as np\n\nit_function_dict = {}\n\n\nclass Accumulator:\n\n def __init__(self, accumulation_seed):\n self.accumulation_seed = accumulation_seed\n\n def append(self, element_or_substring):\n if isinstance(self.accumulation_seed, str):\n self.accumulation_seed += element_or_substring\n elif isinstance(self.accumulation_seed, list):\n self.accumulation_seed.append(element_or_substring)\n return Accumulator(self.accumulation_seed)\n\n\nclass StringBuilder:\n\n def __init__(self):\n self.string = ''\n\n def __new__(cls):\n return Accumulator('')\n\n\nclass PyKot:\n\n def __init__(self, variable, recall=False):\n self.variable = variable\n self.recall = recall\n self.var = variable\n\n def __repr__(self):\n return str(self.variable)\n\n def last_index(self): # lastIndex()\n raise_type_error_if_merited(\"last_index()\", self.variable, str, list, tuple, type(np.array([])))\n return PyKot(len(self.variable) - 1)\n\n def drop(self, drop_from_front: int): # drop(n)\n raise_type_error_if_merited(\"drop(Int)\", self.variable, str, list, tuple, type(np.array([])))\n self.variable, original_type = pre_type_work(self.variable)\n result = self.variable[drop_from_front:]\n result = post_type_work(result, original_type)\n return PyKot(result, True)\n\n def drop_last(self, drop_from_back: int): # dropLast(n)\n raise_type_error_if_merited(\"drop_last(Int)\", self.variable, str, list, tuple, type(np.array([])))\n self.variable, original_type = pre_type_work(self.variable)\n result = self.variable[:(len(self.variable) - drop_from_back)]\n result = post_type_work(result, original_type)\n return PyKot(result, True)\n\n def drop_while(self, it_expression): # dropWhile(it expression)\n raise_type_error_if_merited(\"drop_while(it expression)\", self.variable, str, list, tuple, type(np.array([])))\n self.variable, original_type = pre_type_work(self.variable)\n while it_expression.in_line_function(self.variable[0]):\n self.variable = self.variable[1:]\n result = post_type_work(self.variable, original_type)\n return PyKot(result, True)\n\n def drop_last_while(self, it_expression): # dropLastWhile(it expression)\n raise_type_error_if_merited(\"drop_last_while(it expression)\",\n self.variable, str, list, tuple, type(np.array([])))\n self.variable, original_type = pre_type_work(self.variable)\n while it_expression.in_line_function(self.variable[-1]):\n self.variable = self.variable[:-1]\n result = post_type_work(self.variable, original_type)\n return PyKot(result, True)\n\n def take(self, take_from_front: int): # take(n)\n raise_type_error_if_merited(\"take(Int)\", self.variable, str, list, tuple, type(np.array([])))\n self.variable, original_type = pre_type_work(self.variable)\n result = self.variable[:take_from_front]\n result = post_type_work(result, original_type)\n return PyKot(result, True)\n\n def take_last(self, take_from_back: int): # take_last(n)\n raise_type_error_if_merited(\"take_last(Int)\", self.variable, str, list, tuple, type(np.array([])))\n self.variable, original_type = pre_type_work(self.variable)\n result = self.variable[len(self.variable) - take_from_back:]\n result = post_type_work(result, original_type)\n return PyKot(result, True)\n\n def take_while(self, it_expression): # take_while(it expression)\n raise_type_error_if_merited(\"take_while(it expression)\", self.variable, str, list, tuple, type(np.array([])))\n self.variable, original_type = pre_type_work(self.variable)\n if type_compliance(self.variable, str):\n result = ''\n while it_expression.in_line_function(self.variable[0]):\n result += self.variable[0]\n self.variable = self.variable[1:]\n else:\n result = []\n while it_expression.in_line_function(self.variable[0]):\n result.append(self.variable[0])\n self.variable = self.variable[1:]\n result = post_type_work(result, original_type)\n return PyKot(result, True)\n\n def take_last_while(self, it_expression): # take_last_while(it expression)\n raise_type_error_if_merited(\"take_last_while(it expression)\",\n self.variable, str, list, tuple, type(np.array([])))\n self.variable, original_type = pre_type_work(self.variable)\n if type_compliance(self.variable, str):\n result = ''\n while it_expression.in_line_function(self.variable[-1]):\n result += self.variable[-1]\n self.variable = self.variable[:-1]\n else:\n result = []\n while it_expression.in_line_function(self.variable[-1]):\n result.append(self.variable[-1])\n self.variable = self.variable[:-1]\n result = post_type_work(result, original_type)\n return PyKot(result, True)\n\n def length(self): # length()\n raise_type_error_if_merited(\"length()\", self.variable, str, list, tuple, type(np.array([])))\n return PyKot(len(self.variable))\n\n def first(self): # first()\n raise_type_error_if_merited(\"first()\", self.variable, str, list, tuple, type(np.array([])))\n return PyKot(self.variable[0], True)\n\n def last(self): # last()\n raise_type_error_if_merited(\"last()\", self.variable, str, list, tuple, type(np.array([])))\n return PyKot(self.variable[-1], True)\n\n def trim_margin(self, margin=\"|\"): # trimMargin(margin)\n raise_type_error_if_merited(\"trim_margin(margin='|')\", self.variable, str)\n return PyKot(self.variable[(self.variable.find(margin) + len(margin)):], True)\n\n def compare_to(self, comparison: str, ignorecase=False): # compareTo(String, ignorecase=False)\n self.variable, original_type = pre_type_work(self.variable)\n comparison, original_type_comparison = pre_type_work(comparison)\n if type_compliance(self.variable, dict):\n self.variable = tuple(self.variable)\n if type_compliance(comparison, dict):\n comparison = tuple(comparison)\n if ignorecase:\n self.variable = self.variable.lower()\n comparison = comparison.lower()\n original = [self.variable, comparison]\n sort_compare = [self.variable, comparison]\n sort_compare.sort()\n sort_compare = -1 if sort_compare == original else 1\n return PyKot(0 if self.variable == comparison else sort_compare)\n\n def sub_string(self, first_index, second_index): # subString(i, j)\n raise_type_error_if_merited(\"sub_string(Int, Int)\", self.variable, str)\n first_index, valid1, second_index, valid2 = unwrap_it(first_index, second_index)\n if valid1:\n first_index = first_index(self.variable)\n if valid2:\n second_index = second_index(self.variable)\n return PyKot(self.variable[first_index: second_index], True)\n\n def split(self, delimiter=' ', *additional_delimiters, ignorecase=False): # split(delimiter) or\n # split(delimiter, ignorecase=True) or split(delimiter.toRegex()) or split(regex(delimiter))\n raise_type_error_if_merited(\"split(delimiter=' ', *additional_delimiters, ignorecase=False)\",\n self.variable, str)\n if ignorecase:\n string = self.variable.lower()\n delimiter_list = [delimiter.lower()] + [d.lower() for d in additional_delimiters]\n else:\n string = self.variable\n delimiter_list = [delimiter] + [d for d in additional_delimiters]\n\n if type_compliance(delimiter, type(re.compile(''))):\n result = re.split(delimiter, self.variable)\n else:\n delimiter_indexes = []\n found = 0\n for delimiter in delimiter_list:\n while found != -1 and (len(string) - found) >= len(delimiter):\n found = string.find(delimiter, found, len(string) - 1)\n if found == -1:\n continue\n delimiter_indexes.append(found)\n found += len(delimiter)\n delimiter_indexes.append(found)\n\n found = 0\n\n delimiter_indexes.append(0)\n delimiter_indexes.sort()\n delimiter_indexes.append(-1)\n di = iter(delimiter_indexes)\n delimiter_indexes = list(zip(di, di))\n result = [self.variable[i:] if j == -1 else self.variable[i: j] for i, j in delimiter_indexes]\n\n return PyKot(tuple(result), True)\n\n def sub_sequence(self, first_index: int, second_index: int): # subSequence(i, j)\n raise_type_error_if_merited(\"sub_string(Int, Int)\", self.variable, str)\n first_index, valid1, second_index, valid2 = unwrap_it(first_index, second_index)\n if valid1:\n first_index = first_index(self.variable)\n if valid2:\n second_index = second_index(self.variable)\n return PyKot(self.variable[first_index: second_index], True)\n\n def lines(self): # lines()\n raise_type_error_if_merited(\"lines()\", self.variable, str)\n return PyKot(self.variable.splitlines(), True)\n\n def capitalize(self): # capitalize()\n raise_type_error_if_merited(\"capitalize()\", self.variable, str)\n return PyKot(self.variable.capitalize(), True)\n\n def to_regex(self): # toRegex()\n raise_type_error_if_merited(\"to_regex()\", self.variable, str)\n return re.compile(self.variable)\n\n def replace(self, old_value: str, new_value: str, ignorecase=False): # replace(old, new, ignorecase=False)\n raise_type_error_if_merited(\"replace(String, String, ignorecase=False)\", self.variable, str)\n if ignorecase:\n find_index = self.variable.lower().find(old_value.lower())\n if find_index == -1:\n return PyKot(self.variable, True)\n return PyKot(self.variable[:find_index] + new_value + self.variable[(find_index + len(old_value)):], True)\n return PyKot(self.variable.replace(old_value, new_value), True)\n\n def ends_with(self, substring): # endsWith(substring)\n raise_type_error_if_merited(\"ends_with(String)\", self.variable, str, list, tuple, type(np.array([])))\n if type_compliance(self.variable, str):\n result = True if self.variable[-len(substring):] == substring else False\n else:\n self.variable = unpack_array(self.variable)\n result = True\n for element in self.variable:\n if not type_compliance(element, str):\n raise TypeError(\"All elements in iterable must be a String to use ends_with()\")\n if result:\n result = True if element[-len(substring):] == substring else False\n return PyKot(result, True)\n\n def plus(self, string_or_int): # plus(String) or plus(Int)\n raise_type_error_if_merited(\"plus(String) or plus(Int)\", self.variable, str, int)\n if type_compliance(self.variable, str) and type_compliance(string_or_int, int):\n string_or_int = str(string_or_int)\n elif type_compliance(self.variable, int) and type_compliance(string_or_int, str):\n string_or_int = int(string_or_int)\n return PyKot(self.variable + string_or_int, True)\n\n def get(self, index): # get()\n raise_type_error_if_merited(\"get(Int)\", self.variable, str, list, tuple, type(np.array([])), dict)\n if isinstance(self.variable[index], type(np.array([1])[0])):\n result = int(self.variable[index])\n elif isinstance(self.variable[index], type(np.array([1.0])[0])):\n result = float(self.variable[index])\n elif isinstance(self.variable, dict):\n result = self.variable[index] if index in self.variable.keys() else None\n else:\n result = self.variable[index]\n return PyKot(result, True)\n\n def to_string(self): # toString()\n raise_type_error_if_merited(\"to_string()\", self.variable, str, int, list, tuple, range, dict)\n if isinstance(self.variable, str):\n result = self.variable\n else:\n result = str(self.variable)\n return PyKot(result, True)\n\n def content_to_string(self): # contentToString()\n raise_type_error_if_merited(\"content_to_string()\", self.variable, list, tuple, type(np.array([])))\n return PyKot(str([x for x in self.variable]), True)\n\n def any(self, predicate=None): # any(predicate)\n raise_type_error_if_merited(\"any(), any(value), or any(predicate)\",\n self.variable, list, tuple, dict, type(np.array([])))\n result = unpack_array(self.variable)\n if type_compliance(predicate, type(it())):\n predicate = predicate.in_line_function\n if type_compliance(self.variable, dict):\n if not type_compliance(predicate, str, int):\n result = True if len(list(filter(predicate, self.variable.items()))) > 0 else False\n else:\n if not type_compliance(predicate, str, int):\n result = True if len(list(filter(predicate, result))) > 0 else False\n if type_compliance(predicate, str, int):\n if type_compliance(self.variable, dict):\n result = True if predicate in self.variable.keys() else False\n else:\n result = True if predicate in self.variable else False\n if predicate is None:\n if self.variable:\n result = True\n else:\n result = False\n return PyKot(result, True)\n\n def none(self): # any(predicate)\n raise_type_error_if_merited(\"any(), any(value), or any(predicate)\",\n self.variable, list, tuple, dict, type(np.array([])))\n return PyKot(False if unpack_array(self.variable) else True, True)\n\n def to_list(self): # toList()\n raise_type_error_if_merited(\"to_list()\", self.variable, list, tuple, dict, type(np.array([])))\n if type_compliance(self.variable, tuple):\n result = self.variable\n elif type_compliance(self.variable, dict):\n result = tuple([(key, self.variable[key]) for key in self.variable.keys()])\n else:\n result = tuple(self.variable)\n return PyKot(result, True)\n\n def to_mutable_list(self): # toMutableList()\n raise_type_error_if_merited(\"to_mutable_list()\", self.variable, list, tuple, dict, type(np.array([])))\n if isinstance(self.variable, tuple):\n result = list(self.variable)\n elif type_compliance(self.variable, dict):\n result = [(key, self.variable[key]) for key in self.variable.keys()]\n elif type_compliance(self.variable, type(np.array([]))):\n result = [x for x in unpack_array(self.variable)]\n else:\n result = self.variable\n return PyKot(result, True)\n\n def contains(self, element): # contains(element)\n raise_type_error_if_merited(\"contains()\", self.variable, list, tuple, dict, type(np.array([])))\n if isinstance(self.variable, dict):\n return PyKot(element in self.variable.keys(), True)\n return PyKot(element in self.variable, True)\n\n def filter(self, predicate): # filter(predicate)\n raise_type_error_if_merited(\"filter(function)\", self.variable, list, tuple, dict, type(np.array([])))\n predicate = predicate.in_line_function\n if type_compliance(self.variable, dict):\n new_map = dict(tuple(filter(predicate, self.variable.items())))\n result = new_map\n else:\n result = list(filter(predicate, self.variable))\n return PyKot(result, True)\n\n def filter_not(self, predicate): # filterNot(predicate)\n raise_type_error_if_merited(\"filter_not(function)\", self.variable, list, tuple, dict, type(np.array([])))\n predicate = predicate.in_line_function\n if type_compliance(self.variable, dict):\n new_map = {}\n do_not_include = list(filter(predicate, self.variable.items()))\n do_not_include = [x for x, y in do_not_include]\n for key in self.variable.keys():\n if key not in do_not_include:\n new_map[key] = self.variable[key]\n result = new_map\n else:\n new_list = []\n do_not_include = list(filter(predicate, self.variable))\n for value in [unpack_array_element(x) for x in self.variable]:\n if value not in do_not_include:\n new_list.append(value)\n result = new_list\n return PyKot(result, True)\n\n def filter_indexed(self, predicate): # filter_indexed(predicate)\n raise_type_error_if_merited(\"filter_indexed(predicate)\", self.variable, list, tuple, type(np.array([])))\n raise_type_error_if_merited(\"filter_indexed()\", predicate, type(lambda x: x))\n return PyKot([y for x, y in enumerate(unpack_array(self.variable)) if predicate(x, y)], True)\n\n def filter_not_null(self): # filter_not_null()\n raise_type_error_if_merited(\"filter_not_null()\", self.variable, list, tuple, type(np.array([])))\n return PyKot([x for x in unpack_array(self.variable) if x is not None])\n\n def filter_is_instance(self, acceptable_type): # filter_is_instance(type)\n raise_type_error_if_merited(\"filter_is_instance(acceptable_type)\",\n self.variable, list, tuple, type(np.array([])))\n return PyKot([x for x in unpack_array(self.variable) if type(x) == acceptable_type])\n\n def partition(self, predicate): # partition(predicate)\n raise_type_error_if_merited(\"partition(predicate)\", self.variable, list, tuple, type(np.array([])))\n if type_compliance(predicate, type(it())):\n predicate = predicate.in_line_function\n match = []\n rest = []\n for element in unpack_array(self.variable):\n if predicate(element):\n match.append(element)\n else:\n rest.append(element)\n return PyKot((tuple(match), tuple(rest)), True)\n\n def for_each(self, *statements): # forEach( statements )\n raise_type_error_if_merited(\"for_each(*statements)\", self.variable, list, tuple, type(np.array([])), dict)\n if type_compliance(self.variable, dict):\n useful_list = [PyKot(self.variable[x]) for x in self.variable.keys()]\n for value in useful_list:\n for statement in statements:\n statement(value)\n else:\n useful_list = [PyKot(unpack_array_element(x)) for x in self.variable]\n for value in useful_list:\n for statement in statements:\n statement(value)\n return PyKot(self.variable, True)\n\n def also(self, *statements): # also( statements )\n raise_type_error_if_merited(\"also(*statements)\", self.variable,\n str, int, range, list, tuple, type(np.array([])), dict)\n if type_compliance(self.variable, dict):\n useful_list = [PyKot(self.variable[x]) for x in self.variable.keys()]\n for value in useful_list:\n for statement in statements:\n statement(value)\n elif type_compliance(self.variable, range, list, tuple, type(np.array([]))):\n useful_list = [PyKot(unpack_array_element(x)) for x in self.variable]\n for value in useful_list:\n for statement in statements:\n statement(value)\n elif type_compliance(self.variable, str, int):\n for statement in statements:\n statement(self.variable)\n return PyKot(self.variable, True)\n\n def let(self, *statements): # let( statements )\n raise_type_error_if_merited(\"let(*statements)\", self.variable,\n str, int, range, list, tuple, type(np.array([])), dict, type(None))\n if self.variable is None:\n return PyKot(self.variable, True)\n if type_compliance(self.variable, dict):\n useful_list = [PyKot(self.variable[x]) for x in self.variable.keys()]\n for value in useful_list:\n for statement in statements:\n statement(value)\n elif type_compliance(self.variable, range, list, tuple, type(np.array([]))):\n useful_list = [PyKot(unpack_array_element(x)) for x in self.variable]\n for value in useful_list:\n for statement in statements:\n statement(value)\n elif type_compliance(self.variable, str, int):\n for statement in statements:\n statement(self.variable)\n return PyKot(self.variable, True)\n\n def find(self, predicate): # find(predicate)\n raise_type_error_if_merited(\"find(predicate)\", self.variable, list, tuple, type(np.array([])))\n predicate = predicate.in_line_function\n found = list(filter(predicate, self.variable))\n if len(found) == 0:\n return PyKot(None, True)\n return PyKot(found[0], True)\n\n def find_last(self, predicate): # findLast(predicate)\n raise_type_error_if_merited(\"find_last(predicate)\", self.variable, list, tuple, type(np.array([])))\n predicate = predicate.in_line_function\n found = list(filter(predicate, self.variable))\n if len(found) == 0:\n return PyKot(None, True)\n return PyKot(found[-1], True)\n\n def with_index(self): # withIndex()\n raise_type_error_if_merited(\"with_index()\", self.variable, list, tuple, type(np.array([])))\n new_variable = [(i, e) for i, e in enumerate(self.variable)]\n if type_compliance(self.variable, list):\n return PyKot(new_variable, True)\n return PyKot(tuple(new_variable), True)\n\n def grouping_by(self, predicate): # groupingBy(it expression)\n raise_type_error_if_merited(\"grouping_by(predicate)\", self.variable, list, tuple, type(np.array([])))\n predicate = predicate.in_line_function\n output_map = {}\n for element in self.variable:\n if predicate(element) in output_map:\n output_map[predicate(element)] = output_map[predicate(element)] + [element]\n continue\n output_map[predicate(element)] = [element]\n return PyKot(output_map, True)\n\n def size(self): # .size()\n raise_type_error_if_merited(\"size()\", self.variable, list, tuple, type(np.array([])), dict)\n if type_compliance(self.variable, dict):\n return len(self.variable.items())\n return PyKot(len(self.variable), True)\n\n def min_or_null(self): # minOrNull()\n raise_type_error_if_merited(\"min_or_null()\", self.variable, list, tuple, type(np.array([])))\n if len(self.variable) == 0:\n return PyKot(None, True)\n useful_list = [unpack_array_element(x) for x in self.variable]\n useful_list.sort()\n return PyKot(useful_list[0], True)\n\n def min_by_or_null(self, predicate): # minByOrNull(predicate)\n raise_type_error_if_merited(\"min_by_or_null()\", self.variable, list, tuple, type(np.array([])))\n predicate = predicate.in_line_function\n useful_list = list(filter(predicate, [unpack_array_element(x) for x in self.variable]))\n if len(useful_list) == 0:\n return PyKot(None, True)\n if len(useful_list) == len(self.variable):\n useful_list.sort(key=predicate)\n else:\n useful_list.sort()\n return PyKot(useful_list[0], True)\n\n def max_or_null(self): # maxOrNull()\n raise_type_error_if_merited(\"max_or_null()\", self.variable, list, tuple, type(np.array([])))\n if len(self.variable) == 0:\n return PyKot(None, True)\n useful_list = [unpack_array_element(x) for x in self.variable]\n useful_list.sort()\n return PyKot(useful_list[-1], True)\n\n def max_by_or_null(self, predicate): # maxByOrNull(predicate)\n raise_type_error_if_merited(\"max_by_or_null()\", self.variable, list, tuple, type(np.array([])))\n predicate = predicate.in_line_function\n useful_list = list(filter(predicate, [unpack_array_element(x) for x in self.variable]))\n if len(useful_list) == 0:\n return PyKot(None, True)\n if len(useful_list) == len(self.variable):\n useful_list.sort(key=predicate)\n else:\n useful_list.sort()\n return PyKot(useful_list[-1], True)\n\n def average(self): # average()\n raise_type_error_if_merited(\"max_or_null()\", self.variable, list, tuple, type(np.array([])))\n useful_list = [x for x in self.variable]\n return PyKot(int(sum(useful_list) / len(useful_list)), True)\n\n def sum(self): # sum()\n raise_type_error_if_merited(\"max_or_null()\", self.variable, list, tuple, type(np.array([])))\n return PyKot(int(sum([x for x in self.variable])), True)\n\n def count(self): # count()\n raise_type_error_if_merited(\"max_or_null()\", self.variable, list, tuple, type(np.array([])))\n return PyKot(len([x for x in self.variable]), True)\n\n def add(self, element): # add(element)\n raise_type_error_if_merited(\"add()\", self.variable, list)\n self.variable.append(element)\n return PyKot(self.variable, True)\n\n def add_all(self, *args): # addAll(elements)\n raise_type_error_if_merited(\"add_all(element) or add_all(element, ..., element)\", self.variable, list)\n self.variable += [arg for arg in args]\n return PyKot(self.variable, True)\n\n def keys(self): # keys()\n raise_type_error_if_merited(\"keys()\", self.variable, dict)\n return PyKot(self.variable.keys(), True)\n\n def values(self): # values()\n raise_type_error_if_merited(\"values()\", self.variable, dict)\n return PyKot(self.variable.values(), True)\n\n def each_count(self): # eachCount()\n raise_type_error_if_merited(\"each_count()\", self.variable, dict)\n output_map = dict([(key, len(self.variable[key]))\n if type_compliance(self.variable[key], list, tuple, type(np.array([])))\n else (key, 1) for key in self.variable.keys()])\n return PyKot(output_map, True)\n\n def each_count_to(self, count_map): # eachCountTo(Map)\n raise_type_error_if_merited(\"each_count_to(Map)\", self.variable, dict)\n new_count_map = dict([(key, len(self.variable[key]))\n if type_compliance(self.variable[key], list, tuple, type(np.array([])))\n else (key, 1) for key in self.variable.keys()])\n for key in count_map.keys():\n if key in new_count_map:\n new_count_map[key] = count_map[key] + new_count_map[key]\n else:\n new_count_map[key] = count_map[key]\n return PyKot(new_count_map, True)\n\n def aggregate(self,\n four_variable_lambda=lambda map_key_default,\n accumulator_default=Accumulator([]), element_default=None, first_default=True:\n accumulator_default.append(map_key_default).append(\":\").append(element_default) if first_default\n else accumulator_default.append(element_default)): # aggregate(lambda)\n raise_type_error_if_merited(\"aggregate(lambda_expression)\", self.variable, dict)\n result = {}\n preliminary_result = []\n for map_key in self.variable.keys():\n first = True\n accumulator = Accumulator([])\n for value in self.variable[map_key]:\n\n temp = four_variable_lambda(map_key, accumulator, value, first)\n if first and isinstance(temp, type(Accumulator([]))):\n accumulator = temp\n first = False\n preliminary_result.append(accumulator.accumulation_seed)\n for accumulation in preliminary_result:\n if type_compliance(accumulation, str):\n split_string = accumulation.split(\":\")\n result[split_string[0]] = split_string[1]\n elif type_compliance(accumulation, list):\n result[accumulation[0]] = accumulation[2]\n if len(accumulation) > 3:\n for additional_element in accumulation[3:]:\n result[accumulation[0]] += additional_element\n return PyKot(result, True)\n\n def set(self, index, value): # set(index, value)\n raise_type_error_if_merited(\"set(index, value)\", self.variable, list, type(np.array([])))\n if isinstance(self.variable, type(np.array([]))):\n list_array = [x for x in self.variable]\n list_array[index] = value\n self.variable = np.array(list_array)\n else:\n self.variable[index] = value\n return PyKot(self.variable, True)\n\n def indices(self): # indices()\n raise_type_error_if_merited(\"indices()\", self.variable, list, tuple, type(np.array([])))\n return PyKot(range(len(self.variable)), True)\n\n def is_empty(self): # isEmpty()\n raise_type_error_if_merited(\"is_empty()\", self.variable, list, tuple, dict, type(np.array([])))\n if isinstance(self.variable, range):\n result = True if self.variable == range(0) else False\n else:\n result = True if self.variable.indices() == range(0) else False\n return PyKot(result, True)\n\n def all(self, predicate=''): # all()\n raise_type_error_if_merited(\"all() or all(predicate)\", self.variable, str, list, tuple, type(np.array([])))\n predicate = predicate.in_line_function\n if type_compliance(predicate, type(lambda y: y)):\n result = True if len(list(filter(predicate, self.variable))) == len(self.variable) else False\n else:\n result = False\n if isinstance(self.variable, str):\n result = True if predicate == self.variable else False\n if isinstance(self.variable, list) or isinstance(self.variable, tuple):\n for x in self.variable:\n if predicate != x:\n return PyKot(False, True)\n result = True\n return PyKot(result, True)\n\n def as_sequence(self): # asSequence()\n return PyKot(tuple(self.variable), True)\n\n def as_iterable(self): # asIterable()\n return PyKot(tuple(self.variable), True)\n\n def iterator(self): # iterator()\n return PyKot(tuple(self.variable), True)\n\n def sequence(self): # sequence()\n return PyKot(tuple(self.variable), True)\n\n def uppercase_char(self): # uppercaseChar()\n if not isinstance(self.variable, str) or not len(self.variable) == 1:\n raise TypeError(\"Can only use uppercase_char() on PyKot(String) with a length of 1.\")\n return PyKot(self.variable.upper(), True)\n\n def uppercase(self): # uppercase()\n raise_type_error_if_merited(\"uppercase()\", self.variable, str)\n return PyKot(self.variable.upper(), True)\n\n def copy_of(self): # copyOf()\n raise_type_error_if_merited(\"copy_of()\", self.variable, list, dict, tuple, type(np.array([])))\n return PyKot(self.variable.copy(), True)\n\n def equals(self, other): # equals(other)\n return PyKot(self.variable == other, True)\n\n def put(self, key, value): # put(key, value)\n raise_type_error_if_merited(\"put(key, value)\", self.variable, dict)\n self.variable[key] = value\n return PyKot(self.variable, True)\n\n def get_or_put(self, key, value): # getOrPut(key) {value}\n raise_type_error_if_merited(\"get_or_put(key, value)\", self.variable, dict)\n if key not in self.variable.keys():\n self.variable[key] = value\n return PyKot(self.variable, True)\n return PyKot(self.variable[key], True)\n\n def sub_list(self, from_index: int, to_index: int): # subList(fromIndex, toIndex)\n raise_type_error_if_merited(\"sub_list(index, index)\", self.variable, list, tuple)\n return PyKot(self.variable[from_index:to_index], True)\n\n def map(self, lambda_function):\n if type_compliance(lambda_function, str):\n it_functions_hex_ids = re.findall(r'at\\s(.*?)>', lambda_function)\n it_functions = []\n for hex_id in it_functions_hex_ids:\n if 'x0' in hex_id:\n hex_id = hex_id.replace('x0', 'x')\n hex_id = hex_id.lower()\n if hex_id in it_function_dict.keys():\n it_functions.append(it_function_dict[hex_id].in_line_function)\n results = [tuple(x(y) for x in it_functions) for y in self.variable]\n results = str(tuple(results))\n\n elif type_compliance(lambda_function, tuple):\n results = []\n for var in self.variable:\n sub_results = []\n for it_function in lambda_function:\n it_function = it_function.in_line_function\n sub_results.append(it_function(var))\n results.append(tuple(sub_results))\n results = tuple(results)\n\n else:\n lambda_function = lambda_function.in_line_function\n results = tuple([lambda_function(x) for x in self.variable])\n return PyKot(results, True)\n\n def assert_equals(self, other): # assertEquals(other)\n if type_compliance(self, type(other)):\n if self != other:\n raise AssertionFailedError(\"Equals assertion failed.\")\n if self.variable != other:\n raise AssertionFailedError(\"Equals assertion failed.\")\n\n def assert_false(self): # assertFalse()\n if self.variable:\n raise AssertionFailedError(\"False assertion failed.\")\n\n def assert_true(self): # assertTrue()\n if not self.variable:\n raise AssertionFailedError(\"True assertion failed.\")\n\n def assert_not_null(self): # assertNotNull()\n if self.variable is None:\n raise AssertionFailedError(\"Not Null assertion failed.\")\n\n def assert_null(self): # assertNull()\n if self.variable is not None:\n raise AssertionFailedError(\"Null assertion failed.\")\n\n def assert_not_same(self, other): # assertNotSame(other)\n if self == other:\n raise AssertionFailedError(\"Not Same assertion failed.\")\n\n def assert_same(self, other): # assertSame(other)\n if self != other:\n raise AssertionFailedError(\"Same assertion failed.\")\n\n def take_if(self, it_expression): # takeIf(it expression)\n if it_expression.in_line_function(self.variable):\n return PyKot(self.variable, True)\n return PyKot(None, True)\n\n def take_unless(self, it_expression): # takeUnless(it expression)\n if it_expression.in_line_function(self.variable):\n return PyKot(None, True)\n return PyKot(self.variable, True)\n\n def index_of(self, target): # indexOf(target)\n type_compliance(self.variable, str, list, tuple, type(np.array([])))\n if isinstance(self.variable, str):\n return PyKot(self.variable.find(target), True)\n if target in self.variable:\n return PyKot([unpack_array_element(x) for x in self.variable].index(target), True)\n return PyKot(-1, True)\n\n def entries(self): # entries()\n raise_type_error_if_merited(\"entries()\", self.variable, dict)\n return PyKot(tuple([(x, y) for x, y in self.variable.items()]), True)\n\n def clear(self): # clear()\n raise_type_error_if_merited(\"clear()\", self.variable, dict)\n return PyKot({}, True)\n\n def contains_key(self, key): # containsKey(key)\n aise_type_error_if_merited(\"contains_key(key)\", self.variable, dict)\n return PyKot(True if key in self.variable.keys() else False, True)\n\n def contains_value(self, value): # containsValue(value)\n aise_type_error_if_merited(\"contains_value(value)\", self.variable, dict)\n return PyKot(True if value in self.variable.values() else False, True)\n\n # class methods\n def apply(self, *assignments):\n for assignment in assignments:\n setattr(self.variable, assignment[0], assignment[1])\n return PyKot(self.variable, True)\n\n\ndef println(string): # println()\n if type_compliance(string, type(it())):\n string = string.in_line_function\n return lambda x: print(string(x))\n print(string)\n\n\ndef regex(regex_expression): # regex(regular_expression)\n return re.compile(regex_expression)\n\n\ndef list_of(*args): # listOf(elements)\n if isinstance(args, int) or isinstance(args, str):\n return PyKot(args, False)\n return PyKot(tuple([element for element in args]), False)\n\n\ndef empty_list(): # emptyList<Any>()\n return PyKot(tuple(), False)\n\n\ndef mutable_list_of(*args): # mutableListOf(elements)\n if isinstance(args, int) or isinstance(args, str):\n return PyKot([args], False)\n return PyKot([element for element in args], False)\n\n\ndef array_of(*args): # arrayOf(elements)\n if len(args) == 0:\n return PyKot(np.array([]), False)\n if isinstance(args, int) or isinstance(args, str):\n return PyKot(np.array([args]), False)\n return PyKot(np.array([element for element in args]), False)\n\n\ndef empty_array(): # emptyArray()\n return PyKot(np.array([]), False)\n\n\ndef int_array_of(*args): # intArrayOf(elements)\n int_array = []\n for arg in args:\n raise_type_error_if_merited(\"int_array_of(Int)\", self.variable, int)\n int_array.append(arg)\n return PyKot(np.array(int_array)), False\n\n\ndef array_of_nulls(size=0): # arrayOfNulls(int)\n null_array = []\n for i in range(size):\n null_array.append(None)\n return PyKot(np.array(null_array)), False\n\n\ndef map_of(*args): # mapOf()\n list_of_keys_and_values = list(args)\n if isinstance(args[0], tuple):\n return PyKot(dict(args), False)\n result_dict = {}\n if len(list_of_keys_and_values) % 2 == 1:\n result_dict[list_of_keys_and_values[-1]] = None\n list_of_keys_and_values = list_of_keys_and_values[:-1]\n index_lead = 0\n for i in range((len(list_of_keys_and_values) // 2)):\n dict_key = list_of_keys_and_values[index_lead]\n dict_value = list_of_keys_and_values[index_lead + 1]\n if dict_key in result_dict.keys():\n if not isinstance(result_dict[dict_key], list):\n result_dict[dict_key] = [result_dict[dict_key]]\n result_dict[dict_key].append(dict_value)\n else:\n result_dict[dict_key] = dict_value\n index_lead += 2\n return PyKot(result_dict, False)\n\n\ndef mutable_map_of(*args): # mutableMapOf()\n list_of_keys_and_values = list(args)\n if isinstance(args[0], tuple):\n return PyKot(dict(args), False)\n result_dict = {}\n if len(list_of_keys_and_values) % 2 == 1:\n result_dict[list_of_keys_and_values[-1]] = None\n list_of_keys_and_values = list_of_keys_and_values[:-1]\n index_lead = 0\n for i in range((len(list_of_keys_and_values) // 2)):\n dict_key = list_of_keys_and_values[index_lead]\n dict_value = list_of_keys_and_values[index_lead + 1]\n if dict_key in result_dict.keys():\n if not isinstance(result_dict[dict_key], list):\n result_dict[dict_key] = [result_dict[dict_key]]\n result_dict[dict_key].append(dict_value)\n else:\n result_dict[dict_key] = dict_value\n index_lead += 2\n return PyKot(result_dict, False)\n\n\ndef hash_map(): # hashMap()\n return PyKot({}, False)\n\n\ndef elvis_operator(not_null_return, alternative_return): # not_null_return ?: alternative_return\n if type_compliance(not_null_return, type(PyKot(''))):\n not_null_return = not_null_return.variable\n if type_compliance(alternative_return, type(PyKot(''))):\n alternative_return = alternative_return.variable\n if not_null_return:\n return_value = not_null_return\n else:\n return_value = alternative_return\n return PyKot(return_value, True)\n\n\ndef it(): # it\n return It(lambda x: x)\n\n\nclass It:\n\n def __init__(self, in_line_function):\n self.in_line_function = in_line_function\n\n def __add__(self, other):\n return return_function(It(lambda x: self.in_line_function(x) + other))\n\n def __sub__(self, other):\n return return_function(It(lambda x: self.in_line_function(x) - other))\n\n def __mul__(self, other):\n return return_function(It(lambda x: self.in_line_function(x) * other))\n\n def __truediv__(self, other):\n return return_function(It(lambda x: self.in_line_function(x) / other))\n\n def __pow__(self, power):\n return return_function(It(lambda x: self.in_line_function(x) ** power))\n\n def __lt__(self, comparison):\n return return_function(It(lambda x: self.in_line_function(x) < comparison))\n\n def __le__(self, comparison):\n return return_function(It(lambda x: self.in_line_function(x) <= comparison))\n\n def __eq__(self, comparison):\n return return_function(It(lambda x: self.in_line_function(x) == comparison))\n\n def __ne__(self, comparison):\n return return_function(It(lambda x: self.in_line_function(x) != comparison))\n\n def __gt__(self, comparison):\n return return_function(It(lambda x: self.in_line_function(x) > comparison))\n\n def __ge__(self, comparison):\n return return_function(It(lambda x: self.in_line_function(x) >= comparison))\n\n def __mod__(self, modulo):\n return return_function(It(lambda x: self.in_line_function(x) % modulo))\n\n def __and__(self, other):\n return return_function(It(lambda x: self.in_line_function(x) & other))\n\n def __or__(self, other):\n return return_function(It(lambda x: self.in_line_function(x) | other))\n\n def __xor__(self, other):\n return return_function(It(lambda x: self.in_line_function(x) ^ other))\n\n def __invert__(self):\n return return_function(It(lambda x: ~self.in_line_function(x)))\n\n def contains(self, string):\n return return_function(It(lambda x: string in self.in_line_function(x.keys()) if isinstance(x, dict)\n else string in self.in_line_function(x)))\n\n def starts_with(self, string, start_index=0, ignorecase=False):\n if ignorecase:\n function = It(lambda x: True if string.lower() == self.in_line_function(x)[start_index:len(string)].lower()\n else False)\n else:\n function = It(lambda x: True if string == self.in_line_function(x)[start_index:len(string)]\n else False)\n return return_function(function)\n\n def ends_with(self, string, ignorecase=False):\n if ignorecase:\n function = It(lambda x:\n True if string.lower() == self.in_line_function(x)[-len(string):].lower()\n else False)\n else:\n function = It(lambda x: True if string == self.in_line_function(x)[-len(string):]\n else False)\n return return_function(function)\n\n def length(self):\n return return_function(It(lambda x: len(self.in_line_function(x))))\n\n def uppercase_char(self):\n return return_function(It(lambda x: self.in_line_function(x).upper()))\n\n def uppercase(self):\n return return_function(It(lambda x: self.in_line_function(x).upper()))\n\n def first(self):\n return return_function(It(lambda x: self.in_line_function(x)[0]))\n\n def code(self):\n return return_function(It(lambda x: ord(self.in_line_function(x))))\n\n def is_not_empty(self):\n return return_function(It(lambda x: True if not self.in_line_function(x)\n else False))\n\n def value(self):\n return return_function(It(lambda x: self.in_line_function(x)[1] if isinstance(x, tuple)\n else self.in_line_function(x)))\n\n def key(self):\n return return_function(It(lambda x: self.in_line_function(x)[0] if isinstance(x, tuple)\n else self.in_line_function(x)))\n\n def to_string(self):\n return return_function(It(lambda x: str(self.in_line_function(x))))\n\n def sum(self):\n return return_function(It(lambda x: sum(self.in_line_function(x))))\n\n\ndef type_compliance(variable, *args):\n \"\"\" Private: internal library function, not intended for public use. \"\"\"\n return_list = []\n for arg in args:\n if isinstance(variable, arg):\n return_list.append(True)\n else:\n return_list.append(False)\n return True if True in return_list else False\n\n\ndef raise_type_error_if_merited(method: str, variable, *args, type_error_message=''):\n \"\"\" Private: internal library function, not intended for public use. \"\"\"\n pykot_exchange = {\n str: \"PyKot(String)\",\n int: \"PyKot(Int)\",\n list: \"PyKot(MutableList)\",\n tuple: \"PyKot(List)\",\n dict: \"PyKot(Map)\",\n range: \"PyKot(Range)\",\n type(np.array([])): \"PyKot(Array)\",\n type(None): \"PyKot(None)\",\n type(lambda x: x): \"lambda (->) functions\"\n }\n\n if not type_compliance(variable, args):\n\n if type_error_message != '':\n raise TypeError(type_error_message)\n\n pykot_types = []\n for i in range(len(args)):\n pykot_types.append(pykot_exchange[args[i]])\n\n type_error_message = f\"Can only use {method} with \"\n if len(pykot_types) == 1:\n type_error_message += f\"{pykot_exchange[args[0]]}.\"\n elif len(pykot_types) == 2:\n type_error_message += f\"{pykot_exchange[args[0]]} or {pykot_exchange[args[1]]}.\"\n else:\n for arg in args[:-2]:\n type_error_message += f\"{pykot_exchange[arg]}, \"\n type_error_message += f\"{pykot_exchange[args[-2]]} or {pykot_exchange[args[-1]]}.\"\n\n raise TypeError(type_error_message)\n\n\ndef unpack_array(array):\n \"\"\" Private: internal library function, not intended for public use. \"\"\"\n return [unpack_array_element(x) for x in array]\n\n\ndef unpack_array_element(element):\n \"\"\" Private: internal library function, not intended for public use. \"\"\"\n if type_compliance(element, type([x for x in np.array([1])][0])):\n return int(element)\n elif type_compliance(element, type([x for x in np.array([1.0])][0])):\n return float(element)\n return element\n\n\ndef return_function(it_function):\n \"\"\" Private: internal library function, not intended for public use. \"\"\"\n if hex(id(it_function)) not in it_function_dict:\n it_function_dict[hex(id(it_function))] = it_function\n return it_function\n\n\ndef pre_type_work(variable):\n \"\"\" Private: internal library function, not intended for public use. \"\"\"\n original_type = type(variable)\n array = type(np.array([]))\n if original_type == array:\n variable = unpack_array(variable)\n if original_type == tuple:\n variable = list(variable)\n return variable, original_type\n\n\ndef post_type_work(result, original_type):\n \"\"\" Private: internal library function, not intended for public use. \"\"\"\n array = type(np.array([]))\n if original_type == array:\n result = np.array(result)\n if original_type == tuple:\n result = tuple(result)\n return result\n\n\ndef unwrap_it(*it_expressions):\n \"\"\" Private: internal library function, not intended for public use. \"\"\"\n results = []\n for it_expression in it_expressions:\n if isinstance(it_expression, type(it())):\n it_expression = it_expression.in_line_function\n results.append(it_expression)\n results.append(isinstance(it_expression, type(lambda x: x)))\n results = tuple(results)\n return results\n\n\nclass Error(Exception):\n pass\n\n\nclass AssertionFailedError(Error):\n\n def __init__(self, message):\n self.message = message\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RasmusSemmle/scipy
[ "4ffeafe269597e6d41b3335549102cd5611b12cb" ]
[ "scipy/integrate/_ivp/common.py" ]
[ "from __future__ import division, print_function, absolute_import\nfrom itertools import groupby\nfrom warnings import warn\nimport numpy as np\nfrom scipy.sparse import find, coo_matrix\n\n\nEPS = np.finfo(float).eps\n\n\ndef validate_max_step(max_step):\n \"\"\"Assert that max_Step is valid and return it.\"\"\"\n if max_step <= 0:\n raise ValueError(\"`max_step` must be positive.\")\n return max_step\n\n\ndef warn_extraneous(extraneous):\n \"\"\"Display a warning for extraneous keyword arguments.\n\n The initializer of each solver class is expected to collect keyword\n arguments that it doesn't understand and warn about them. This function\n prints a warning for each key in the supplied dictionary.\n\n Parameters\n ----------\n extraneous : dict\n Extraneous keyword arguments\n \"\"\"\n if extraneous:\n warn(\"The following arguments have no effect for a chosen solver: {}.\"\n .format(\", \".join(\"`{}`\".format(x) for x in extraneous)))\n\n\ndef validate_tol(rtol, atol, n):\n \"\"\"Validate tolerance values.\"\"\"\n if rtol < 100 * EPS:\n warn(\"`rtol` is too low, setting to {}\".format(100 * EPS))\n rtol = 100 * EPS\n\n atol = np.asarray(atol)\n if atol.ndim > 0 and atol.shape != (n,):\n raise ValueError(\"`atol` has wrong shape.\")\n\n if np.any(atol < 0):\n raise ValueError(\"`atol` must be positive.\")\n\n return rtol, atol\n\n\ndef norm(x):\n \"\"\"Compute RMS norm.\"\"\"\n return np.linalg.norm(x) / x.size ** 0.5\n\n\ndef select_initial_step(fun, t0, y0, f0, direction, order, rtol, atol):\n \"\"\"Empirically select a good initial step.\n\n The algorithm is described in [1]_.\n\n Parameters\n ----------\n fun : callable\n Right-hand side of the system.\n t0 : float\n Initial value of the independent variable.\n y0 : ndarray, shape (n,)\n Initial value of the dependent variable.\n f0 : ndarray, shape (n,)\n Initial value of the derivative, i. e. ``fun(t0, y0)``.\n direction : float\n Integration direction.\n order : float\n Method order.\n rtol : float\n Desired relative tolerance.\n atol : float\n Desired absolute tolerance.\n\n Returns\n -------\n h_abs : float\n Absolute value of the suggested initial step.\n\n References\n ----------\n .. [1] E. Hairer, S. P. Norsett G. Wanner, \"Solving Ordinary Differential\n Equations I: Nonstiff Problems\", Sec. II.4.\n \"\"\"\n if y0.size == 0:\n return np.inf\n\n scale = atol + np.abs(y0) * rtol\n d0 = norm(y0 / scale)\n d1 = norm(f0 / scale)\n if d0 < 1e-5 or d1 < 1e-5:\n h0 = 1e-6\n else:\n h0 = 0.01 * d0 / d1\n\n y1 = y0 + h0 * direction * f0\n f1 = fun(t0 + h0 * direction, y1)\n d2 = norm((f1 - f0) / scale) / h0\n\n if d1 <= 1e-15 and d2 <= 1e-15:\n h1 = max(1e-6, h0 * 1e-3)\n else:\n h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1))\n\n return min(100 * h0, h1)\n\n\nclass OdeSolution(object):\n \"\"\"Continuous ODE solution.\n\n It is organized as a collection of `DenseOutput` objects which represent\n local interpolants. It provides an algorithm to select a right interpolant\n for each given point.\n\n The interpolants cover the range between `t_min` and `t_max` (see\n Attributes below). Evaluation outside this interval is not forbidden, but\n the accuracy is not guaranteed.\n\n When evaluating at a breakpoint (one of the values in `ts`) a segment with\n the lower index is selected.\n\n Parameters\n ----------\n ts : array_like, shape (n_segments + 1,)\n Time instants between which local interpolants are defined. Must\n be strictly increasing or decreasing (zero segment with two points is\n also allowed).\n interpolants : list of DenseOutput with n_segments elements\n Local interpolants. An i-th interpolant is assumed to be defined\n between ``ts[i]`` and ``ts[i + 1]``.\n\n Attributes\n ----------\n t_min, t_max : float\n Time range of the interpolation.\n \"\"\"\n def __init__(self, ts, interpolants):\n ts = np.asarray(ts)\n d = np.diff(ts)\n # The first case covers integration on zero segment.\n if not ((ts.size == 2 and ts[0] == ts[-1])\n or np.all(d > 0) or np.all(d < 0)):\n raise ValueError(\"`ts` must be strictly increasing or decreasing.\")\n\n self.n_segments = len(interpolants)\n if ts.shape != (self.n_segments + 1,):\n raise ValueError(\"Numbers of time stamps and interpolants \"\n \"don't match.\")\n\n self.ts = ts\n self.interpolants = interpolants\n if ts[-1] >= ts[0]:\n self.t_min = ts[0]\n self.t_max = ts[-1]\n self.ascending = True\n self.ts_sorted = ts\n else:\n self.t_min = ts[-1]\n self.t_max = ts[0]\n self.ascending = False\n self.ts_sorted = ts[::-1]\n\n def _call_single(self, t):\n # Here we preserve a certain symmetry that when t is in self.ts,\n # then we prioritize a segment with a lower index.\n if self.ascending:\n ind = np.searchsorted(self.ts_sorted, t, side='left')\n else:\n ind = np.searchsorted(self.ts_sorted, t, side='right')\n\n segment = min(max(ind - 1, 0), self.n_segments - 1)\n if not self.ascending:\n segment = self.n_segments - 1 - segment\n\n return self.interpolants[segment](t)\n\n def __call__(self, t):\n \"\"\"Evaluate the solution.\n\n Parameters\n ----------\n t : float or array_like with shape (n_points,)\n Points to evaluate at.\n\n Returns\n -------\n y : ndarray, shape (n_states,) or (n_states, n_points)\n Computed values. Shape depends on whether `t` is a scalar or a\n 1-d array.\n \"\"\"\n t = np.asarray(t)\n\n if t.ndim == 0:\n return self._call_single(t)\n\n order = np.argsort(t)\n reverse = np.empty_like(order)\n reverse[order] = np.arange(order.shape[0])\n t_sorted = t[order]\n\n # See comment in self._call_single.\n if self.ascending:\n segments = np.searchsorted(self.ts_sorted, t_sorted, side='left')\n else:\n segments = np.searchsorted(self.ts_sorted, t_sorted, side='right')\n segments -= 1\n segments[segments < 0] = 0\n segments[segments > self.n_segments - 1] = self.n_segments - 1\n if not self.ascending:\n segments = self.n_segments - 1 - segments\n\n ys = []\n group_start = 0\n for segment, group in groupby(segments):\n group_end = group_start + len(list(group))\n y = self.interpolants[segment](t_sorted[group_start:group_end])\n ys.append(y)\n group_start = group_end\n\n ys = np.hstack(ys)\n ys = ys[:, reverse]\n\n return ys\n\n\nNUM_JAC_DIFF_REJECT = EPS ** 0.875\nNUM_JAC_DIFF_SMALL = EPS ** 0.75\nNUM_JAC_DIFF_BIG = EPS ** 0.25\nNUM_JAC_MIN_FACTOR = 1e3 * EPS\nNUM_JAC_FACTOR_INCREASE = 10\nNUM_JAC_FACTOR_DECREASE = 0.1\n\n\ndef num_jac(fun, t, y, f, threshold, factor, sparsity=None):\n \"\"\"Finite differences Jacobian approximation tailored for ODE solvers.\n\n This function computes finite difference approximation to the Jacobian\n matrix of `fun` with respect to `y` using forward differences.\n The Jacobian matrix has shape (n, n) and its element (i, j) is equal to\n ``d f_i / d y_j``.\n\n A special feature of this function is the ability to correct the step\n size from iteration to iteration. The main idea is to keep the finite\n difference significantly separated from its round-off error which\n approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a\n huge error and assures that the estimated derivative are reasonably close\n to the true values (i.e. the finite difference approximation is at least\n qualitatively reflects the structure of the true Jacobian).\n\n Parameters\n ----------\n fun : callable\n Right-hand side of the system implemented in a vectorized fashion.\n t : float\n Current time.\n y : ndarray, shape (n,)\n Current state.\n f : ndarray, shape (n,)\n Value of the right hand side at (t, y).\n threshold : float\n Threshold for `y` value used for computing the step size as\n ``factor * np.maximum(np.abs(y), threshold)``. Typically the value of\n absolute tolerance (atol) for a solver should be passed as `threshold`.\n factor : ndarray with shape (n,) or None\n Factor to use for computing the step size. Pass None for the very\n evaluation, then use the value returned from this function.\n sparsity : tuple (structure, groups) or None\n Sparsity structure of the Jacobian, `structure` must be csc_matrix.\n\n Returns\n -------\n J : ndarray or csc_matrix, shape (n, n)\n Jacobian matrix.\n factor : ndarray, shape (n,)\n Suggested `factor` for the next evaluation.\n \"\"\"\n y = np.asarray(y)\n n = y.shape[0]\n if n == 0:\n return np.empty((0, 0)), factor\n\n if factor is None:\n factor = np.ones(n) * EPS ** 0.5\n else:\n factor = factor.copy()\n\n # Direct the step as ODE dictates, hoping that such a step won't lead to\n # a problematic region. For complex ODEs it makes sense to use the real\n # part of f as we use steps along real axis.\n f_sign = 2 * (np.real(f) >= 0).astype(float) - 1\n y_scale = f_sign * np.maximum(threshold, np.abs(y))\n h = (y + factor * y_scale) - y\n\n # Make sure that the step is not 0 to start with. Not likely it will be\n # executed often.\n for i in np.nonzero(h == 0)[0]:\n while h[i] == 0:\n factor[i] *= 10\n h[i] = (y[i] + factor[i] * y_scale[i]) - y[i]\n\n if sparsity is None:\n return _dense_num_jac(fun, t, y, f, h, factor, y_scale)\n else:\n structure, groups = sparsity\n return _sparse_num_jac(fun, t, y, f, h, factor, y_scale,\n structure, groups)\n\n\ndef _dense_num_jac(fun, t, y, f, h, factor, y_scale):\n n = y.shape[0]\n h_vecs = np.diag(h)\n f_new = fun(t, y[:, None] + h_vecs)\n diff = f_new - f[:, None]\n max_ind = np.argmax(np.abs(diff), axis=0)\n r = np.arange(n)\n max_diff = np.abs(diff[max_ind, r])\n scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))\n\n diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale\n if np.any(diff_too_small):\n ind, = np.nonzero(diff_too_small)\n new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]\n h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]\n h_vecs[ind, ind] = h_new\n f_new = fun(t, y[:, None] + h_vecs[:, ind])\n diff_new = f_new - f[:, None]\n max_ind = np.argmax(np.abs(diff_new), axis=0)\n r = np.arange(ind.shape[0])\n max_diff_new = np.abs(diff_new[max_ind, r])\n scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))\n\n update = max_diff[ind] * scale_new < max_diff_new * scale[ind]\n if np.any(update):\n update, = np.nonzero(update)\n update_ind = ind[update]\n factor[update_ind] = new_factor[update]\n h[update_ind] = h_new[update]\n diff[:, update_ind] = diff_new[:, update]\n scale[update_ind] = scale_new[update]\n max_diff[update_ind] = max_diff_new[update]\n\n diff /= h\n\n factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE\n factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE\n factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)\n\n return diff, factor\n\n\ndef _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups):\n n = y.shape[0]\n n_groups = np.max(groups) + 1\n h_vecs = np.empty((n_groups, n))\n for group in range(n_groups):\n e = np.equal(group, groups)\n h_vecs[group] = h * e\n h_vecs = h_vecs.T\n\n f_new = fun(t, y[:, None] + h_vecs)\n df = f_new - f[:, None]\n\n i, j, _ = find(structure)\n diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc()\n max_ind = np.array(abs(diff).argmax(axis=0)).ravel()\n r = np.arange(n)\n max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel()\n scale = np.maximum(np.abs(f[max_ind]),\n np.abs(f_new[max_ind, groups[r]]))\n\n diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale\n if np.any(diff_too_small):\n ind, = np.nonzero(diff_too_small)\n new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]\n h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]\n h_new_all = np.zeros(n)\n h_new_all[ind] = h_new\n\n groups_unique = np.unique(groups[ind])\n groups_map = np.empty(n_groups, dtype=int)\n h_vecs = np.empty((groups_unique.shape[0], n))\n for k, group in enumerate(groups_unique):\n e = np.equal(group, groups)\n h_vecs[k] = h_new_all * e\n groups_map[group] = k\n h_vecs = h_vecs.T\n\n f_new = fun(t, y[:, None] + h_vecs)\n df = f_new - f[:, None]\n i, j, _ = find(structure[:, ind])\n diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]],\n (i, j)), shape=(n, ind.shape[0])).tocsc()\n\n max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel()\n r = np.arange(ind.shape[0])\n max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel()\n scale_new = np.maximum(\n np.abs(f[max_ind_new]),\n np.abs(f_new[max_ind_new, groups_map[groups[ind]]]))\n\n update = max_diff[ind] * scale_new < max_diff_new * scale[ind]\n if np.any(update):\n update, = np.nonzero(update)\n update_ind = ind[update]\n factor[update_ind] = new_factor[update]\n h[update_ind] = h_new[update]\n diff[:, update_ind] = diff_new[:, update]\n scale[update_ind] = scale_new[update]\n max_diff[update_ind] = max_diff_new[update]\n\n diff.data /= np.repeat(h, np.diff(diff.indptr))\n\n factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE\n factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE\n factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)\n\n return diff, factor\n" ]
[ [ "numpy.diag", "numpy.asarray", "numpy.all", "numpy.max", "numpy.any", "numpy.searchsorted", "numpy.hstack", "scipy.sparse.coo_matrix", "scipy.sparse.find", "numpy.unique", "numpy.arange", "numpy.empty_like", "numpy.finfo", "numpy.real", "numpy.diff", "numpy.zeros", "numpy.nonzero", "numpy.equal", "numpy.argsort", "numpy.maximum", "numpy.abs", "numpy.linalg.norm", "numpy.ones", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
tomsal/endtoenddecisiontrees
[ "be4cf69f530cbbd3cd1a2443c9cc513482a9a2c3" ]
[ "e2edt/gate.py" ]
[ "\"\"\"\nThis file implements the smooth gating/split function including the linear\ncombination of features. If given, the features are sent through a non linear\nmodule first, which may also be optimized thanks to autograd..\n\"\"\"\nimport torch.nn as nn\n\nclass Gate(nn.Module):\n def __init__(self, input_size, initial_steepness, non_linear_module=None):\n super(Gate, self).__init__()\n self.steepness = initial_steepness\n self.input_size = input_size\n\n # --- setup non-linear split feature module\n self.non_linear = None\n if non_linear_module is not None:\n self.non_linear = non_linear_module()\n self.input_size = self.non_linear.output_size\n\n # --- setup linear combination of features and sigmoid\n self.linear = nn.Linear(self.input_size, 1)\n norm = self.linear.weight.data.norm()\n self.linear.weight.data /= norm\n self.linear.bias.data /= norm\n norm = self.linear.weight.data.norm()\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, X, debug=False):\n if self.non_linear is not None:\n X = self.non_linear(X)\n gating_logits = self.linear(X.contiguous().view(-1,self.input_size))\n gating_weight = self.sigmoid(gating_logits * self.steepness)\n return gating_weight\n" ]
[ [ "torch.nn.Linear", "torch.nn.Sigmoid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mohamedelmesawy/MTAF
[ "b0200a3a38843a6c95d270da63aae64ad7950113" ]
[ "mtaf/utils/loss.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\ndef cross_entropy_2d(predict, target):\n \"\"\"\n Args:\n predict:(n, c, h, w)\n target:(n, h, w)\n \"\"\"\n assert not target.requires_grad\n assert predict.dim() == 4\n assert target.dim() == 3\n assert predict.size(0) == target.size(0), f\"{predict.size(0)} vs {target.size(0)}\"\n assert predict.size(2) == target.size(1), f\"{predict.size(2)} vs {target.size(1)}\"\n assert predict.size(3) == target.size(2), f\"{predict.size(3)} vs {target.size(3)}\"\n n, c, h, w = predict.size()\n target_mask = (target >= 0) * (target < 200)\n target = target[target_mask]\n if not target.data.dim():\n return Variable(torch.zeros(1))\n predict = predict.transpose(1, 2).transpose(2, 3).contiguous()\n predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)\n loss = F.cross_entropy(predict, target, size_average=True)\n return loss\n\ndef entropy_loss(v):\n \"\"\"\n Entropy loss for probabilistic prediction vectors\n input: batch_size x channels x h x w\n output: batch_size x 1 x h x w\n \"\"\"\n assert v.dim() == 4\n n, c, h, w = v.size()\n return -torch.sum(torch.mul(v, torch.log2(v + 1e-30))) / (n * h * w * np.log2(c))\n\ndef kl_divergence(predict_0, predict_1):\n \"\"\"\n Args:\n predict_0:(n, c, h, w)\n predict_1:(n, c, h, w)\n \"\"\"\n assert predict_0.dim() == 4\n assert predict_1.dim() == 4\n assert predict_0.size(0) == predict_1.size(0), f\"{predict_0.size(0)} vs {predict_1.size(0)}\"\n assert predict_0.size(1) == predict_1.size(1), f\"{predict_0.size(1)} vs {predict_1.size(1)}\"\n assert predict_0.size(2) == predict_1.size(2), f\"{predict_0.size(2)} vs {predict_1.size(2)}\"\n assert predict_0.size(3) == predict_1.size(3), f\"{predict_0.size(3)} vs {predict_1.size(3)}\"\n n, c, h, w = predict_0.size()\n predict_0 = predict_0.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)\n predict_1 = predict_1.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)\n softmax_predict_0 = F.softmax(predict_0)\n softmax_predict_1 = F.softmax(predict_1)\n log_softmax_predict_0 = F.log_softmax(predict_0)\n loss = F.kl_div(log_softmax_predict_0,softmax_predict_1,size_average=True)\n return loss\n\ndef mse_loss(predict_0, predict_1):\n \"\"\"\n Args:\n predict_0:(n, c, h, w)\n predict_1:(n, c, h, w)\n \"\"\"\n assert predict_0.dim() == 4\n assert predict_1.dim() == 4\n assert predict_0.size(0) == predict_1.size(0), f\"{predict_0.size(0)} vs {predict_1.size(0)}\"\n assert predict_0.size(1) == predict_1.size(1), f\"{predict_0.size(1)} vs {predict_1.size(1)}\"\n assert predict_0.size(2) == predict_1.size(2), f\"{predict_0.size(2)} vs {predict_1.size(2)}\"\n assert predict_0.size(3) == predict_1.size(3), f\"{predict_0.size(3)} vs {predict_1.size(3)}\"\n n, c, h, w = predict_0.size()\n predict_0 = predict_0.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)\n predict_1 = predict_1.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)\n softmax_predict_0 = F.softmax(predict_0)\n softmax_predict_1 = F.softmax(predict_1)\n softmax_mask = ((torch.max(softmax_predict_0, dim=1, keepdim=True)[0].expand(-1, c)) > threshold)\n softmax_predict_0 = softmax_predict_0[softmax_mask]\n softmax_predict_1 = softmax_predict_1[softmax_mask]\n loss = F.mse_loss(softmax_predict_0,softmax_predict_1,size_average=True)\n return loss\n" ]
[ [ "torch.nn.functional.kl_div", "torch.nn.functional.softmax", "numpy.log2", "torch.max", "torch.nn.functional.log_softmax", "torch.zeros", "torch.nn.functional.cross_entropy", "torch.nn.functional.mse_loss", "torch.log2" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chdhr-harshal/uber-driver-strategy
[ "f21f968e7aa04d8105bf42e046ab120f813aa12f" ]
[ "src/python/strategies.py" ]
[ "#!/usr/local/bin/python\n\n\"\"\"\nNaive strategy methods\n\"\"\"\nimport numpy as np\nfrom driver_utils import *\nfrom uncertainty_utils import *\n\ndef build_naive_strategy(self, city_attributes):\n for b in reversed(xrange(self.B)):\n for i in xrange(len(self.city_zones)):\n action_cumulative_earnings = {}\n for action in self.actions.get_available_actions(i, b, b, city_attributes[b]['travel_time_matrix']):\n if action[0] != 'a0':\n raise ValueError(\"Received wrong action for naive strategy\")\n parameters = get_passenger_action_parameters(self, i, b, b, city_attributes)\n empirical_transition_vector = parameters[0]\n rewards_vector = parameters[1]\n induced_earnings_vector = parameters[2]\n if parameters[3] is not None:\n uncertainty_level = parameters[3]\n else:\n uncertainty_level = self.uncertainty_level\n num_trips_vector = parameters[4]\n\n if not self.robust:\n cumulative_earnings = self.actions.get_passenger_cumulative_earnings(empirical_transition_vector,\n rewards_vector,\n induced_earnings_vector,\n robust=False)\n else:\n n = len(self.city_zones)\n row_beta = calculate_beta(num_trips_vector, uncertainty_level, df=len(num_trips_vector)-1)\n\n cumulative_earnings = self.actions.get_passenger_cumulative_earnings(num_trips_vector,\n rewards_vector,\n induced_earnings_vector,\n robust=True,\n beta_max=None,\n beta=row_beta,\n delta=None)\n action_cumulative_earnings[action] = cumulative_earnings\n\n best_action = max(action_cumulative_earnings, key=lambda x: action_cumulative_earnings[x])\n best_earning = action_cumulative_earnings[best_action]\n\n self.earnings_matrix.earnings_matrix[b][b][i] = best_earning\n self.actions_matrix.actions_matrix[b][b][i] = best_action\n\ndef build_relocation_strategy(self, city_attributes):\n for b in reversed(xrange(self.B)):\n for i in xrange(len(self.city_zones)):\n action_cumulative_earnings = {}\n for action in self.actions.get_available_actions(i, b, b, city_attributes[b]['travel_time_matrix']):\n if action[0] == 'a0':\n parameters = get_passenger_action_parameters(self, i, b, b, city_attributes)\n empirical_transition_vector = parameters[0]\n rewards_vector = parameters[1]\n induced_earnings_vector = parameters[2]\n if parameters[3] is not None:\n uncertainty_level = parameters[3]\n else:\n uncertainty_level = self.uncertainty_level\n num_trips_vector = parameters[4]\n\n if not self.robust:\n cumulative_earnings = self.actions.get_passenger_cumulative_earnings(empirical_transition_vector,\n rewards_vector,\n induced_earnings_vector,\n robust=False)\n else:\n n = len(self.city_zones)\n row_beta = calculate_beta(num_trips_vector, uncertainty_level, df=len(num_trips_vector)-1)\n\n cumulative_earnings = self.actions.get_passenger_cumulative_earnings(num_trips_vector,\n rewards_vector,\n induced_earnings_vector,\n robust=True,\n beta_max=None,\n beta=row_beta,\n delta=None)\n action_cumulative_earnings[action] = cumulative_earnings\n else:\n # action[0] == 'a2':\n if action[0] != 'a2':\n raise ValueError(\"Received wrong action for relocation strategy\")\n\n target_zone = action[1]\n parameters = get_relocate_action_parameters(self, i, target_zone, b, b, city_attributes)\n action_earnings = parameters[0]\n induced_earnings = parameters[1]\n\n cumulative_earnings = self.actions.relocate_cumulative_earnings(action_earnings,\n induced_earnings)\n action_cumulative_earnings[action] = cumulative_earnings\n\n best_action = max(action_cumulative_earnings, key=lambda x: action_cumulative_earnings[x])\n best_earning = action_cumulative_earnings[best_action]\n self.earnings_matrix.earnings_matrix[b][b][i] = best_earning\n self.actions_matrix.actions_matrix[b][b][i] = best_action\n\ndef build_flexible_strategy(self, city_attributes):\n for t in reversed(xrange(self.N)):\n for b in reversed(xrange(self.B)):\n for i in xrange(len(self.city_zones)):\n action_cumulative_earnings = {}\n for action in self.actions.get_available_actions(i, t, b, city_attributes[t]['travel_time_matrix']):\n # Get passenger action\n if action[0] == 'a0':\n parameters = get_passenger_action_parameters(self, i, t, b, city_attributes)\n empirical_transition_vector = parameters[0]\n rewards_vector = parameters[1]\n induced_earnings_vector = parameters[2]\n if parameters[3] is not None:\n uncertainty_level = parameters[3]\n else:\n uncertainty_level = self.uncertainty_level\n num_trips_vector = parameters[4]\n\n if not self.robust:\n cumulative_earnings = self.actions.get_passenger_cumulative_earnings(empirical_transition_vector,\n rewards_vector,\n induced_earnings_vector,\n robust=False)\n else:\n n = len(self.city_zones)\n row_beta = calculate_beta(num_trips_vector, uncertainty_level, df=len(num_trips_vector)-1)\n\n cumulative_earnings = self.actions.get_passenger_cumulative_earnings(num_trips_vector,\n rewards_vector,\n induced_earnings_vector,\n robust=True,\n beta_max=None,\n beta=row_beta,\n delta=None)\n action_cumulative_earnings[action] = cumulative_earnings\n else:\n # action[0] == 'a1'\n if action[0] != 'a1':\n raise ValueError(\"Received wrong action for flexible strategy\")\n parameters = go_home_action_parameters(self, i, t, b, city_attributes)\n action_earnings = parameters[0]\n induced_earnings = parameters[1]\n\n cumulative_earnings = self.actions.go_home_cumulative_earnings(action_earnings,\n induced_earnings)\n action_cumulative_earnings[action] = cumulative_earnings\n\n best_action = max(action_cumulative_earnings, key=lambda x: action_cumulative_earnings[x])\n best_earning = action_cumulative_earnings[best_action]\n self.earnings_matrix.earnings_matrix[t][b][i] = best_earning\n self.actions_matrix.actions_matrix[t][b][i] = best_action\n\ndef build_relocation_flexible_strategy(self, city_attributes):\n for t in reversed(xrange(self.N)):\n for b in reversed(xrange(self.B)):\n for i in xrange(len(self.city_zones)):\n action_cumulative_earnings = {}\n for action in self.actions.get_available_actions(i, t, b, city_attributes[t]['travel_time_matrix']):\n # Get passenger action\n if action[0] == 'a0':\n parameters = get_passenger_action_parameters(self, i, t, b, city_attributes)\n empirical_transition_vector = parameters[0]\n rewards_vector = parameters[1]\n induced_earnings_vector = parameters[2]\n if parameters[3] is not None:\n uncertainty_level = parameters[3]\n else:\n uncertainty_level = self.uncertainty_level\n num_trips_vector = parameters[4]\n\n if not self.robust:\n cumulative_earnings = self.actions.get_passenger_cumulative_earnings(empirical_transition_vector,\n rewards_vector,\n induced_earnings_vector,\n robust=False)\n else:\n n = len(self.city_zones)\n row_beta = calculate_beta(num_trips_vector, uncertainty_level, df=len(num_trips_vector)-1)\n\n\n cumulative_earnings = self.actions.get_passenger_cumulative_earnings(num_trips_vector,\n rewards_vector,\n induced_earnings_vector,\n robust=True,\n beta_max=None,\n beta=row_beta,\n delta=None)\n action_cumulative_earnings[action] = cumulative_earnings\n # Go home action\n if action[0] == 'a1':\n # print \"Reached here\"\n parameters = go_home_action_parameters(self, i, t, b, city_attributes)\n action_earnings = parameters[0]\n induced_earnings = parameters[1]\n\n cumulative_earnings = self.actions.go_home_cumulative_earnings(action_earnings,\n induced_earnings)\n action_cumulative_earnings[action] = cumulative_earnings\n # Relocate action\n if action[0] == 'a2':\n target_zone = action[1]\n parameters = get_relocate_action_parameters(self, i, target_zone, t, b, city_attributes)\n action_earnings = parameters[0]\n induced_earnings = parameters[1]\n\n cumulative_earnings = self.actions.relocate_cumulative_earnings(action_earnings,\n induced_earnings)\n action_cumulative_earnings[action] = cumulative_earnings\n\n best_action = max(action_cumulative_earnings, key=lambda x: action_cumulative_earnings[x])\n best_earning = action_cumulative_earnings[best_action]\n self.earnings_matrix.earnings_matrix[t][b][i] = best_earning\n self.actions_matrix.actions_matrix[t][b][i] = best_action\n\ndef get_passenger_action_parameters(self, zone, t, b, city_attributes):\n uncertainty_level = None\n empirical_transition_vector = city_attributes[t]['transition_matrix'][zone]\n count_vector = city_attributes[t]['count_matrix'][zone]\n N = np.sum(count_vector)\n if N == 0:\n N = 100\n num_trips_vector = N * empirical_transition_vector\n\n driver_earnings_vector = city_attributes[t]['driver_earnings_matrix'][zone]\n driver_costs_vector = city_attributes[t]['driver_costs_matrix'][zone]\n surge_multiplier = city_attributes[t]['surge_vector'][zone]\n\n # If surge is to be included, multiply earnings by surge multiplier\n if self.surge == 'Passive' or self.surge == 'Active':\n driver_earnings_vector = surge_multiplier * driver_earnings_vector\n\n # rewards_vector = city_attributes[t]['driver_earnings_matrix'][zone] - city_attributes[t]['driver_costs_matrix'][zone]\n rewards_vector = driver_earnings_vector - driver_costs_vector\n travel_time_vector = city_attributes[t]['travel_time_matrix'][zone]\n\n t_dash_vector = t + travel_time_vector\n b_dash_vector = b + travel_time_vector\n\n induced_earnings_vector = []\n for j in xrange(len(self.city_zones)):\n t_dash = int(t_dash_vector[j])\n b_dash = int(b_dash_vector[j])\n v = self.earnings_matrix.get_earnings_matrix(t_dash, b_dash, j)\n induced_earnings_vector.append(v)\n if v == 0:\n home_zone = self.city_zones.index(self.home_zone)\n induced_earnings_vector[-1] = city_attributes[t]['driver_earnings_matrix'][zone][home_zone] - city_attributes[t]['driver_costs_matrix'][zone][home_zone]\n if j == home_zone and zone == home_zone:\n uncertainty_level = 0.0\n empirical_transition_vector = np.delete(empirical_transition_vector, j)\n num_trips_vector = np.delete(num_trips_vector, j)\n empirical_transition_vector = empirical_transition_vector/np.sum(empirical_transition_vector)\n rewards_vector = np.delete(rewards_vector, j)\n induced_earnings_vector = induced_earnings_vector[:-1]\n\n # for j in xrange(len(self.city_zones)):\n # t_dash = int(t_dash_vector[j])\n # b_dash = int(b_dash_vector[j])\n # v = self.earnings_matrix.get_earnings_matrix(t_dash, b_dash, j)\n # induced_earnings_vector.append(v)\n # if v == 0:\n # home_zone = self.city_zones.index(self.home_zone)\n # induced_earnings_vector[-1] = city_attributes[t]['driver_earnings_matrix'][zone][home_zone] - city_attributes[t]['driver_costs_matrix'][zone][home_zone]\n # if j == home_zone and zone == home_zone:\n # uncertainty_level = 0.0\n # empirical_transition_vector = np.delete(empirical_transition_vector, j)\n # num_trips_vector = np.delete(num_trips_vector, j)\n # empirical_transition_vector = empirical_transition_vector/np.sum(empirical_transition_vector)\n # rewards_vector = np.delete(rewards_vector, j)\n # induced_earnings_vector = induced_earnings_vector[:-1]\n induced_earnings_vector = np.array(induced_earnings_vector)\n\n return (empirical_transition_vector, rewards_vector, induced_earnings_vector, uncertainty_level, num_trips_vector)\n\ndef get_relocate_action_parameters(self, zone, target_zone, t, b, city_attributes):\n # target_zone = self.city_zones.index(target_zone)\n action_earnings = -1 * city_attributes[t]['driver_costs_matrix'][zone][target_zone]\n travel_time = city_attributes[t]['travel_time_matrix'][zone][target_zone]\n\n t_dash = t + travel_time\n b_dash = b + travel_time\n\n induced_earnings = self.earnings_matrix.get_earnings_matrix(t_dash, b_dash, target_zone)\n\n return (action_earnings, induced_earnings)\n\ndef go_home_action_parameters(self, zone, t, b, city_attributes):\n home_zone = self.city_zones.index(self.home_zone)\n action_earnings = -1 * city_attributes[t]['driver_costs_matrix'][zone][home_zone]\n travel_time = city_attributes[t]['travel_time_matrix'][zone][home_zone]\n t_dash = t + travel_time\n induced_earnings = self.earnings_matrix.get_earnings_matrix(t_dash, b, home_zone)\n\n return (action_earnings, induced_earnings)\n\ndef chase_surge_parameters(self, zone, t, b, city_attributes):\n surge_vector = city_attributes[t]['surge_vector']\n if surge_vector[zone] > 1:\n return None\n\n target_zone = np.argmax(surge_vector)\n surge_multiplier = surge_vector[target_zone]\n if surge_multiplier == 1:\n return None\n\n action_earnings = -1 * city_attributes[t]['driver_costs_matrix'][zone][target_zone]\n travel_time = city_attributes[t]['travel_time_matrix'][zone][target_zone]\n\n t_dash = t + travel_time\n b_dash = b + travel_time\n\n induced_earnings = self.earnings_matrix.get_earnings_matrix(t_dash, b_dash, target_zone)\n\n return (action_earnings, induced_earnings, target_zone)\n\n" ]
[ [ "numpy.delete", "numpy.array", "numpy.argmax", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
P0lyFish/noise2-series
[ "a21ad1b7cb20e44161393156efd7dcdab729b4a3", "a21ad1b7cb20e44161393156efd7dcdab729b4a3" ]
[ "models/loss.py", "data/bsd_dataset.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass CharbonnierLoss(nn.Module):\n \"\"\"Charbonnier Loss (L1)\"\"\"\n\n def __init__(self, eps=1e-6):\n super(CharbonnierLoss, self).__init__()\n self.eps = eps\n\n def forward(self, x, y):\n diff = x - y\n loss = torch.sum(torch.sqrt(diff * diff + self.eps))\n return loss\n\n\n# Define GAN loss: [vanilla | lsgan | wgan-gp]\nclass GANLoss(nn.Module):\n def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):\n super(GANLoss, self).__init__()\n self.gan_type = gan_type.lower()\n self.real_label_val = real_label_val\n self.fake_label_val = fake_label_val\n\n if self.gan_type == 'gan' or self.gan_type == 'ragan':\n self.loss = nn.BCEWithLogitsLoss()\n elif self.gan_type == 'lsgan':\n self.loss = nn.MSELoss()\n elif self.gan_type == 'wgan-gp':\n\n def wgan_loss(input, target):\n # target is boolean\n return -1 * input.mean() if target else input.mean()\n\n self.loss = wgan_loss\n else:\n raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))\n\n def get_target_label(self, input, target_is_real):\n if self.gan_type == 'wgan-gp':\n return target_is_real\n if target_is_real:\n return torch.empty_like(input).fill_(self.real_label_val)\n else:\n return torch.empty_like(input).fill_(self.fake_label_val)\n\n def forward(self, input, target_is_real):\n target_label = self.get_target_label(input, target_is_real)\n loss = self.loss(input, target_label)\n return loss\n\n\nclass GradientPenaltyLoss(nn.Module):\n def __init__(self, device=torch.device('cpu')):\n super(GradientPenaltyLoss, self).__init__()\n self.register_buffer('grad_outputs', torch.Tensor())\n self.grad_outputs = self.grad_outputs.to(device)\n\n def get_grad_outputs(self, input):\n if self.grad_outputs.size() != input.size():\n self.grad_outputs.resize_(input.size()).fill_(1.0)\n return self.grad_outputs\n\n def forward(self, interp, interp_crit):\n grad_outputs = self.get_grad_outputs(interp_crit)\n grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp,\n grad_outputs=grad_outputs, create_graph=True,\n retain_graph=True, only_inputs=True)[0]\n grad_interp = grad_interp.view(grad_interp.size(0), -1)\n grad_interp_norm = grad_interp.norm(2, dim=1)\n\n loss = ((grad_interp_norm - 1)**2).mean()\n return loss\n\n\nclass SpatialGradientLoss(nn.Module):\n \"\"\"Super sharp Loss\"\"\"\n\n def __init__(self):\n super(SpatialGradientLoss, self).__init__()\n\n def diffMap(self, A, alongX):\n B, N, C, H = A.shape\n if alongX:\n return A[:, :, 1:C, :] - A[:, :, 0:C-1, :]\n return A[:, :, :, 1:H] - A[:, :, :, 0:H-1]\n\n def forward(self, A, B):\n Amap = self.diffMap(A, alongX=True)\n Bmap = self.diffMap(B, alongX=True)\n loss = torch.sum((Amap - Bmap) ** 2)\n\n Amap = self.diffMap(A, alongX=False)\n Bmap = self.diffMap(B, alongX=False)\n loss += torch.sum((Amap - Bmap) ** 2)\n loss = torch.sqrt(loss)\n\n return loss\n\n\nclass KLDivergence(nn.Module):\n \"\"\"KL loss for VAE regularization\"\"\"\n def __init__(self):\n super(KLDivergence, self).__init__()\n\n def forward(self, X):\n B, N = X.shape\n\n mean = X.mean(dim=0).to(X.device)\n\n var = torch.zeros((N, N)).to(X.device)\n for i in range(B):\n y = X[i, :] - mean\n var += torch.mm(y.resize(N, 1), y.resize(1, N))\n for i in range(N):\n if var[i, i] <= 0:\n print(var[i][i])\n var = var.clamp(min=1e-18) / N\n\n kl = 0.5 * (-(var.log().trace()) + torch.trace(var)\n - N + mean.pow(2).sum())\n\n return kl\n\n\nclass FrobeniousNorm(nn.Module):\n def __init__(self, eps=1e-6):\n super(FrobeniousNorm, self).__init__()\n\n self.eps = eps\n\n def forward(self, X):\n B, C, H, W = X.shape\n return torch.sqrt(torch.sum(X ** 2, dim=(1, 2, 3)) + self.eps)\n", "'''\nBSD68 dataset\nsupport reading images from lmdb\n'''\nimport logging\nimport numpy as np\nimport random\nimport torch\nimport torch.utils.data as data\n\nimport data.util as util\n\nlogger = logging.getLogger('base')\n\n\nclass BSD68Dataset(data.Dataset):\n '''\n Reading the training BSD68 dataset\n '''\n\n def __init__(self, opt):\n super(BSD68Dataset, self).__init__()\n self.opt = opt\n # temporal augmentation\n\n self.LQ_data = util.load_data_storage(opt['LQ_data'])\n self.HQ_data = util.load_data_storage(opt['HQ_data'])\n\n if self.HQ_data is not None:\n self.need_GT = True\n else:\n self.need_GT = False\n\n assert len(self.LQ_data), 'Error: LQ data is empty.'\n\n def __getitem__(self, index):\n img_LQ = self.LQ_data[index]\n if self.need_GT:\n img_HQ = self.HQ_data[index]\n else:\n img_HQ = None\n\n img_LQ = img_LQ[:, :, np.newaxis] / 255.\n if self.need_GT:\n img_HQ = img_HQ[:, :, np.newaxis] / 255.\n\n if self.opt['phase'] == 'train':\n H, W, _ = img_LQ.shape\n HQ_size = self.opt['HQ_size']\n rnd_h = random.randint(0, max(0, H - HQ_size))\n rnd_w = random.randint(0, max(0, W - HQ_size))\n img_LQ = img_LQ[rnd_h:rnd_h + HQ_size, rnd_w:rnd_w + HQ_size, :]\n if self.need_GT:\n img_HQ = img_HQ[rnd_h:rnd_h + HQ_size,\n rnd_w:rnd_w + HQ_size, :]\n\n rlt = util.augment([img_LQ, img_HQ], self.opt['use_flip'],\n self.opt['use_rot'])\n img_LQ = rlt[0]\n img_HQ = rlt[1]\n\n img_LQ = np.transpose(img_LQ, (2, 0, 1))\n img_LQ = torch.from_numpy(np.ascontiguousarray(img_LQ)).float()\n\n if img_HQ is not None:\n img_HQ = np.transpose(img_HQ, (2, 0, 1))\n img_HQ = torch.from_numpy(np.ascontiguousarray(img_HQ)).float()\n\n if img_HQ is not None:\n return {'LQ': img_LQ, 'HQ': img_HQ}\n return {'LQ': img_LQ}\n\n def __len__(self):\n return len(self.LQ_data)\n" ]
[ [ "torch.empty_like", "torch.Tensor", "torch.zeros", "torch.sqrt", "torch.trace", "torch.sum", "torch.nn.BCEWithLogitsLoss", "torch.device", "torch.autograd.grad", "torch.nn.MSELoss" ], [ "numpy.ascontiguousarray", "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
outlk/read-ICESat-2
[ "4a1e90038548a050b4bdbcbcf9e4fb7864a52b9f" ]
[ "scripts/interp_sea_level_ICESat2_ATL11.py" ]
[ "#!/usr/bin/env python\nu\"\"\"\ninterp_sea_level_ICESat2_ATL11.py\nWritten by Tyler Sutterley (10/2021)\nInterpolates sea level anomalies (sla), absolute dynamic topography (adt) and\n mean dynamic topography (mdt) to times and locations of ICESat-2 ATL11 data\n This data will be extrapolated onto land points\n (masking will be needed for accurate assessments)\n\nhttps://www.aviso.altimetry.fr/en/data/products/sea-surface-height-products/\n global/msla-h.html\nftp://ftp.sltac.cls.fr/Core/SEALEVEL_GLO_PHY_L4_REP_OBSERVATIONS_008_047/\n dataset-duacs-rep-global-merged-allsat-phy-l4-v3\n\nNote that the AVISO sea level data are gzip compressed netCDF4 files\n\nCOMMAND LINE OPTIONS:\n -D X, --directory X: Working data directory\n -C, --crossovers: Run ATL11 Crossovers\n -V, --verbose: Output information about each created file\n -M X, --mode X: Permission mode of directories and files created\n\nPYTHON DEPENDENCIES:\n numpy: Scientific Computing Tools For Python\n https://numpy.org\n https://numpy.org/doc/stable/user/numpy-for-matlab-users.html\n pyproj: Python interface to PROJ library\n https://pypi.org/project/pyproj/\n scikit-learn: Machine Learning in Python\n https://scikit-learn.org/stable/index.html\n https://github.com/scikit-learn/scikit-learn\n h5py: Python interface for Hierarchal Data Format 5 (HDF5)\n https://h5py.org\n netCDF4: Python interface to the netCDF C library\n https://unidata.github.io/netcdf4-python/netCDF4/index.html\n\nPROGRAM DEPENDENCIES:\n read_ICESat2_ATL11.py: reads ICESat-2 annual land ice height data files\n time.py: utilities for calculating time operations\n utilities.py: download and management utilities for syncing files\n\nUPDATE HISTORY:\n Updated 10/2021: using python logging for handling verbose output\n added parsing for converting file lines to arguments\n Updated 05/2021: print full path of output filename\n Updated 02/2021: replaced numpy bool/int to prevent deprecation warnings\n Written 02/2021\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport re\nimport gzip\nimport h5py\nimport pyproj\nimport logging\nimport netCDF4\nimport argparse\nimport datetime\nimport numpy as np\nimport collections\nimport sklearn.neighbors\nimport icesat2_toolkit.time\nimport icesat2_toolkit.utilities\nfrom icesat2_toolkit.read_ICESat2_ATL11 import read_HDF5_ATL11\n\n#-- PURPOSE: set the hemisphere of interest based on the granule\ndef set_hemisphere(GRANULE):\n if GRANULE in ('10','11','12'):\n projection_flag = 'S'\n elif GRANULE in ('03','04','05'):\n projection_flag = 'N'\n return projection_flag\n\n#-- PURPOSE: interpolates to coordinates with inverse distance weighting\ndef inverse_distance(x, y, z, xi, yi, SEARCH='BallTree', N=10, POWER=2.0):\n #-- number of output points\n npts = len(xi)\n #-- create neighbors object for coordinates\n if (SEARCH == 'BallTree'):\n tree = sklearn.neighbors.BallTree(np.c_[x,y])\n elif (SEARCH == 'KDTree'):\n tree = sklearn.neighbors.KDTree(np.c_[x,y])\n #-- query the search tree to find the N closest points\n dist,indices = tree.query(np.c_[xi,yi], k=N, return_distance=True)\n #-- normalized weights if POWER > 0 (typically between 1 and 3)\n #-- in the inverse distance weighting\n power_inverse_distance = dist**(-POWER)\n s = np.sum(power_inverse_distance, axis=1)\n w = power_inverse_distance/np.broadcast_to(s[:,None],(npts,N))\n #-- calculate interpolated fields by inverse distance weighting\n return np.sum(w*z[indices],axis=1)\n\n#-- PURPOSE interpolate sea level anomalies to lat/lon and then to time\ndef interpolate_sea_level(base_dir, xi, yi, CJD, HEM):\n #-- EPSG projections for converting lat/lon to polar stereographic\n EPSG = dict(N=3413,S=3031)\n #-- pyproj transformer for converting to polar stereographic\n crs1 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(4326))\n crs2 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(EPSG[HEM]))\n transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\n\n #-- interpolate mean dynamic topography\n input_file = 'mdt_cnes_cls2013_global.nc.gz'\n #-- read bytes from compressed file\n fd = gzip.open(os.path.join(base_dir,input_file),'rb')\n #-- dictionary with input fields\n dinput = {}\n #-- read netCDF file for mean dynamic topography\n with netCDF4.Dataset('mdt', mode='r', memory=fd.read()) as fileID:\n dinput['lon'] = fileID['lon'][:].copy()\n dinput['lat'] = fileID['lat'][:].copy()\n dinput['mdt'] = np.ma.array(fileID['mdt'][0,:,:].copy(),\n fill_value=fileID['mdt']._FillValue)\n dinput['mdt'].mask = (dinput['mdt'].data == dinput['mdt'].fill_value)\n #-- close the compressed file objects\n fd.close()\n #-- create 2-D grid coordinates from longitude and latitude vectors\n gridlon,gridlat = np.meshgrid(dinput['lon'],dinput['lat'])\n #-- convert from latitude/longitude into polar stereographic\n xg,yg = transformer.transform(gridlon,gridlat)\n\n #-- reduce to local coordinates to improve computational time\n gridmask = np.logical_not(dinput['mdt'].mask)\n if (HEM.upper() == 'N'):\n gridmask &= (gridlat >= 50.0)\n elif (HEM.upper() == 'S'):\n gridmask &= (gridlat <= -50.0)\n indy,indx = np.nonzero(gridmask)\n #-- calculate mean dynamic topography by inverse distance weighting\n MDT = inverse_distance(xg[indy,indx], yg[indy,indx],\n dinput['mdt'].data[indy,indx], xi, yi)\n\n #-- CNES Julian Days before and after measurement\n CJD1 = np.floor(CJD)\n #-- scale for linearly interpolating to date\n dt = (CJD - CJD1[0])\n #-- output sea level anomaly and absolute dynamic topography\n SLA = np.zeros_like(CJD)\n ADT = np.zeros_like(CJD)\n #-- for the range of dates\n for day in range(2):\n #-- convert from CNES Julians Days to calendar dates for time\n JD1 = CJD1 + day + 2433282.5\n YY,MM,DD,HH,MN,SS = icesat2_toolkit.time.convert_julian(JD1[0],\n FORMAT='tuple', ASTYPE=int)\n #-- sea level directory\n ddir = os.path.join(base_dir, '{0:0.0f}'.format(YY))\n #-- input file for day before the measurement\n regex = re.compile(('dt_global_allsat_phy_l4_{0:4d}{1:02d}{2:02d}_'\n '(\\d{{4}})(\\d{{2}})(\\d{{2}}).nc.gz').format(YY,MM,DD))\n input_file, = [fi for fi in os.listdir(ddir) if regex.match(fi)]\n #-- dictionary with input fields\n dinput = {}\n #-- read bytes from compressed file\n fd = gzip.open(os.path.join(ddir,input_file),'rb')\n #-- read netCDF file for time\n with netCDF4.Dataset('sla', mode='r', memory=fd.read()) as fileID:\n dinput['lon'] = fileID['lon'][:].copy()\n dinput['lat'] = fileID['lat'][:].copy()\n dinput['sla'] = np.ma.array(fileID['sla'][0,:,:].copy(),\n fill_value=fileID['sla']._FillValue)\n dinput['adt'] = np.ma.array(fileID['adt'][0,:,:].copy(),\n fill_value=fileID['adt']._FillValue)\n #-- close the compressed file objects\n fd.close()\n #-- for each variable to interpolate\n out = {}\n for var in ['sla','adt']:\n #-- reduce to local coordinates to improve computational time\n gridmask = np.logical_not(dinput[var].mask)\n if (HEM.upper() == 'N'):\n gridmask &= (gridlat >= 50.0)\n elif (HEM.upper() == 'S'):\n gridmask &= (gridlat <= -50.0)\n indy,indx = np.nonzero(gridmask)\n #-- calculate variable by inverse distance weighting\n out[var] = inverse_distance(xg[indy,indx], yg[indy,indx],\n dinput[var].data[indy,indx], xi, yi)\n #-- linearly interpolate to date for iteration\n SLA += out['sla']*(2.0*dt*day - dt - day + 1.0)\n ADT += out['adt']*(2.0*dt*day - dt - day + 1.0)\n #-- return interpolated values\n return (MDT,SLA,ADT)\n\n#-- PURPOSE: read ICESat-2 annual land ice height data (ATL11) from NSIDC\n#-- interpolate AVISO sea level at points and times\ndef interp_sea_level_ICESat2(base_dir, FILE, CROSSOVERS=False, VERBOSE=False,\n MODE=0o775):\n\n #-- create logger\n loglevel = logging.INFO if VERBOSE else logging.CRITICAL\n logging.basicConfig(level=loglevel)\n\n #-- read data from input file\n logging.info('{0} -->'.format(os.path.basename(FILE)))\n IS2_atl11_mds,IS2_atl11_attrs,IS2_atl11_pairs = read_HDF5_ATL11(FILE,\n ATTRIBUTES=True, CROSSOVERS=CROSSOVERS)\n DIRECTORY = os.path.dirname(FILE)\n #-- extract parameters from ICESat-2 ATLAS HDF5 file name\n rx = re.compile(r'(processed_)?(ATL\\d{2})_(\\d{4})(\\d{2})_(\\d{2})(\\d{2})_'\n r'(\\d{3})_(\\d{2})(.*?).h5$')\n SUB,PRD,TRK,GRAN,SCYC,ECYC,RL,VERS,AUX = rx.findall(FILE).pop()\n #-- set the hemisphere flag based on ICESat-2 granule\n HEM = set_hemisphere(GRAN)\n\n #-- HDF5 file attributes\n attrib = {}\n #-- mean dynamic topography\n attrib['mdt'] = {}\n attrib['mdt']['long_name'] = 'Mean Dynamic Topography'\n attrib['mdt']['description'] = 'Sea surface height above geoid'\n attrib['mdt']['reference'] = ('https://www.aviso.altimetry.fr/en/data/'\n 'products/sea-surface-height-products/global/msla-h.html')\n #-- sea level anomalies\n attrib['sla'] = {}\n attrib['sla']['long_name'] = 'Sea Level Anomaly'\n attrib['sla']['description'] = 'Sea surface anomalies'\n attrib['sla']['reference'] = ('https://www.aviso.altimetry.fr/en/data/'\n 'products/sea-surface-height-products/global/msla-h.html')\n #-- absolute dynamic topography\n attrib['adt'] = {}\n attrib['adt']['long_name'] = 'Absolute Dynamic Topography'\n attrib['adt']['description'] = ('Sea surface height above geoid calculated '\n 'by adding the mean dynamic topography to the sea level anomalies')\n attrib['adt']['reference'] = ('https://www.aviso.altimetry.fr/en/data/'\n 'products/sea-surface-height-products/global/msla-h.html')\n\n #-- EPSG projections for converting lat/lon to polar stereographic\n EPSG = dict(N=3413,S=3031)\n #-- pyproj transformer for converting to polar stereographic\n crs1 = pyproj.CRS.from_string('epsg:4326')\n crs2 = pyproj.CRS.from_string(EPSG[HEM])\n transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\n\n #-- number of GPS seconds between the GPS epoch\n #-- and ATLAS Standard Data Product (SDP) epoch\n atlas_sdp_gps_epoch = IS2_atl11_mds['ancillary_data']['atlas_sdp_gps_epoch']\n\n #-- copy variables for outputting to HDF5 file\n IS2_atl11_corr = {}\n IS2_atl11_fill = {}\n IS2_atl11_dims = {}\n IS2_atl11_corr_attrs = {}\n #-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)\n #-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)\n #-- Add this value to delta time parameters to compute full gps_seconds\n IS2_atl11_corr['ancillary_data'] = {}\n IS2_atl11_corr_attrs['ancillary_data'] = {}\n for key in ['atlas_sdp_gps_epoch']:\n #-- get each HDF5 variable\n IS2_atl11_corr['ancillary_data'][key] = IS2_atl11_mds['ancillary_data'][key]\n #-- Getting attributes of group and included variables\n IS2_atl11_corr_attrs['ancillary_data'][key] = {}\n for att_name,att_val in IS2_atl11_attrs['ancillary_data'][key].items():\n IS2_atl11_corr_attrs['ancillary_data'][key][att_name] = att_val\n #-- HDF5 group name for across-track data\n XT = 'crossing_track_data'\n\n #-- for each input beam pair within the file\n for ptx in sorted(IS2_atl11_pairs):\n #-- output data dictionaries for beam pair\n IS2_atl11_corr[ptx] = dict(cycle_stats=collections.OrderedDict(),\n crossing_track_data=collections.OrderedDict())\n IS2_atl11_fill[ptx] = dict(cycle_stats={},crossing_track_data={})\n IS2_atl11_dims[ptx] = dict(cycle_stats={},crossing_track_data={})\n IS2_atl11_corr_attrs[ptx] = dict(cycle_stats={},crossing_track_data={})\n\n #-- extract along-track and across-track variables\n ref_pt = {}\n latitude = {}\n longitude = {}\n delta_time = {}\n groups = ['AT']\n #-- dictionary with output sea level variables\n MDT,SLA,ADT = ({},{},{})\n #-- number of average segments and number of included cycles\n #-- fill_value for invalid heights and corrections\n fv = IS2_atl11_attrs[ptx]['h_corr']['_FillValue']\n #-- shape of along-track data\n n_points,n_cycles = IS2_atl11_mds[ptx]['delta_time'].shape\n #-- along-track (AT) reference point, latitude, longitude and time\n ref_pt['AT'] = IS2_atl11_mds[ptx]['ref_pt'].copy()\n latitude['AT'] = np.ma.array(IS2_atl11_mds[ptx]['latitude'],\n fill_value=IS2_atl11_attrs[ptx]['latitude']['_FillValue'])\n latitude['AT'].mask = (latitude['AT'] == latitude['AT'].fill_value)\n longitude['AT'] = np.ma.array(IS2_atl11_mds[ptx]['longitude'],\n fill_value=IS2_atl11_attrs[ptx]['longitude']['_FillValue'])\n longitude['AT'].mask = (longitude['AT'] == longitude['AT'].fill_value)\n delta_time['AT'] = np.ma.array(IS2_atl11_mds[ptx]['delta_time'],\n fill_value=IS2_atl11_attrs[ptx]['delta_time']['_FillValue'])\n delta_time['AT'].mask = (delta_time['AT'] == delta_time['AT'].fill_value)\n #-- along-track (AT) sea level corrections\n MDT['AT'] = np.ma.empty((n_points,n_cycles),fill_value=fv)\n MDT['AT'].mask = (delta_time['AT'] == delta_time['AT'].fill_value)\n SLA['AT'] = np.ma.empty((n_points,n_cycles),fill_value=fv)\n SLA['AT'].mask = (delta_time['AT'] == delta_time['AT'].fill_value)\n ADT['AT'] = np.ma.empty((n_points,n_cycles),fill_value=fv)\n ADT['AT'].mask = (delta_time['AT'] == delta_time['AT'].fill_value)\n #-- if running ATL11 crossovers\n if CROSSOVERS:\n #-- add to group\n groups.append('XT')\n #-- shape of across-track data\n n_cross, = IS2_atl11_mds[ptx][XT]['delta_time'].shape\n #-- across-track (XT) reference point, latitude, longitude and time\n ref_pt['XT'] = IS2_atl11_mds[ptx][XT]['ref_pt'].copy()\n latitude['XT'] = np.ma.array(IS2_atl11_mds[ptx][XT]['latitude'],\n fill_value=IS2_atl11_attrs[ptx][XT]['latitude']['_FillValue'])\n latitude['XT'].mask = (latitude['XT'] == latitude['XT'].fill_value)\n longitude['XT'] = np.ma.array(IS2_atl11_mds[ptx][XT]['longitude'],\n fill_value=IS2_atl11_attrs[ptx][XT]['longitude']['_FillValue'])\n latitude['XT'].mask = (latitude['XT'] == longitude['XT'].fill_value)\n delta_time['XT'] = np.ma.array(IS2_atl11_mds[ptx][XT]['delta_time'],\n fill_value=IS2_atl11_attrs[ptx][XT]['delta_time']['_FillValue'])\n delta_time['XT'].mask = (delta_time['XT'] == delta_time['XT'].fill_value)\n #-- across-track (XT) sea level corrections\n MDT['XT'] = np.ma.empty((n_cross),fill_value=fv)\n MDT['XT'].mask = (delta_time['XT'] == delta_time['XT'].fill_value)\n SLA['XT'] = np.ma.empty((n_cross),fill_value=fv)\n SLA['XT'].mask = (delta_time['XT'] == delta_time['XT'].fill_value)\n ADT['XT'] = np.ma.empty((n_cross),fill_value=fv)\n ADT['XT'].mask = (delta_time['XT'] == delta_time['XT'].fill_value)\n\n #-- calculate corrections for along-track and across-track data\n for track in groups:\n #-- convert time from ATLAS SDP to CNES Julian Days\n #-- days relative to 1950-01-01T00:00:00\n gps_seconds = atlas_sdp_gps_epoch + delta_time[track]\n leap_seconds = icesat2_toolkit.time.count_leap_seconds(gps_seconds)\n cnes_time = icesat2_toolkit.time.convert_delta_time(gps_seconds-leap_seconds,\n epoch1=(1980,1,6,0,0,0), epoch2=(1950,1,1,0,0,0), scale=1.0/86400.0)\n\n #-- extract lat/lon and convert to polar stereographic\n X,Y = transformer.transform(longitude[track],longitude[track])\n\n #-- calculate sea level corrections for track type\n if (track == 'AT'):\n #-- calculate for each cycle if along-track\n for cycle in range(n_cycles):\n #-- interpolate sea level anomalies and dynamic topographies\n MDT[track][:,cycle],SLA[track][:,cycle],ADT[track][:,cycle] = \\\n interpolate_sea_level(base_dir,X,Y,cnes_time[:,cycle],HEM)\n elif (track == 'XT'):\n #-- for each unique CNES day to interpolate in the crossovers\n CJD,inverse = np.unique(np.floor(cnes_time),return_inverse=True)\n for indice,_ in enumerate(CJD):\n #-- indices in original arrays for the CNES day\n i, = np.nonzero(inverse == indice)\n #-- interpolate sea level anomalies and dynamic topographies\n MDT[track][i],SLA[track][i],ADT[track][i] = \\\n interpolate_sea_level(base_dir,X[i],Y[i],cnes_time[i],HEM)\n\n #-- group attributes for beam\n IS2_atl11_corr_attrs[ptx]['description'] = ('Contains the primary science parameters '\n 'for this data set')\n IS2_atl11_corr_attrs[ptx]['beam_pair'] = IS2_atl11_attrs[ptx]['beam_pair']\n IS2_atl11_corr_attrs[ptx]['ReferenceGroundTrack'] = IS2_atl11_attrs[ptx]['ReferenceGroundTrack']\n IS2_atl11_corr_attrs[ptx]['first_cycle'] = IS2_atl11_attrs[ptx]['first_cycle']\n IS2_atl11_corr_attrs[ptx]['last_cycle'] = IS2_atl11_attrs[ptx]['last_cycle']\n IS2_atl11_corr_attrs[ptx]['equatorial_radius'] = IS2_atl11_attrs[ptx]['equatorial_radius']\n IS2_atl11_corr_attrs[ptx]['polar_radius'] = IS2_atl11_attrs[ptx]['polar_radius']\n\n #-- geolocation, time and reference point\n #-- reference point\n IS2_atl11_corr[ptx]['ref_pt'] = ref_pt['AT'].copy()\n IS2_atl11_fill[ptx]['ref_pt'] = None\n IS2_atl11_dims[ptx]['ref_pt'] = None\n IS2_atl11_corr_attrs[ptx]['ref_pt'] = collections.OrderedDict()\n IS2_atl11_corr_attrs[ptx]['ref_pt']['units'] = \"1\"\n IS2_atl11_corr_attrs[ptx]['ref_pt']['contentType'] = \"referenceInformation\"\n IS2_atl11_corr_attrs[ptx]['ref_pt']['long_name'] = \"Reference point number\"\n IS2_atl11_corr_attrs[ptx]['ref_pt']['source'] = \"ATL06\"\n IS2_atl11_corr_attrs[ptx]['ref_pt']['description'] = (\"The reference point is the \"\n \"7 digit segment_id number corresponding to the center of the ATL06 data used \"\n \"for each ATL11 point. These are sequential, starting with 1 for the first \"\n \"segment after an ascending equatorial crossing node.\")\n IS2_atl11_corr_attrs[ptx]['ref_pt']['coordinates'] = \\\n \"delta_time latitude longitude\"\n #-- cycle_number\n IS2_atl11_corr[ptx]['cycle_number'] = IS2_atl11_mds[ptx]['cycle_number'].copy()\n IS2_atl11_fill[ptx]['cycle_number'] = None\n IS2_atl11_dims[ptx]['cycle_number'] = None\n IS2_atl11_corr_attrs[ptx]['cycle_number'] = collections.OrderedDict()\n IS2_atl11_corr_attrs[ptx]['cycle_number']['units'] = \"1\"\n IS2_atl11_corr_attrs[ptx]['cycle_number']['long_name'] = \"Orbital cycle number\"\n IS2_atl11_corr_attrs[ptx]['cycle_number']['source'] = \"ATL06\"\n IS2_atl11_corr_attrs[ptx]['cycle_number']['description'] = (\"Number of 91-day periods \"\n \"that have elapsed since ICESat-2 entered the science orbit. Each of the 1,387 \"\n \"reference ground track (RGTs) is targeted in the polar regions once \"\n \"every 91 days.\")\n #-- delta time\n IS2_atl11_corr[ptx]['delta_time'] = delta_time['AT'].copy()\n IS2_atl11_fill[ptx]['delta_time'] = delta_time['AT'].fill_value\n IS2_atl11_dims[ptx]['delta_time'] = ['ref_pt','cycle_number']\n IS2_atl11_corr_attrs[ptx]['delta_time'] = collections.OrderedDict()\n IS2_atl11_corr_attrs[ptx]['delta_time']['units'] = \"seconds since 2018-01-01\"\n IS2_atl11_corr_attrs[ptx]['delta_time']['long_name'] = \"Elapsed GPS seconds\"\n IS2_atl11_corr_attrs[ptx]['delta_time']['standard_name'] = \"time\"\n IS2_atl11_corr_attrs[ptx]['delta_time']['calendar'] = \"standard\"\n IS2_atl11_corr_attrs[ptx]['delta_time']['source'] = \"ATL06\"\n IS2_atl11_corr_attrs[ptx]['delta_time']['description'] = (\"Number of GPS \"\n \"seconds since the ATLAS SDP epoch. The ATLAS Standard Data Products (SDP) epoch offset \"\n \"is defined within /ancillary_data/atlas_sdp_gps_epoch as the number of GPS seconds \"\n \"between the GPS epoch (1980-01-06T00:00:00.000000Z UTC) and the ATLAS SDP epoch. By \"\n \"adding the offset contained within atlas_sdp_gps_epoch to delta time parameters, the \"\n \"time in gps_seconds relative to the GPS epoch can be computed.\")\n IS2_atl11_corr_attrs[ptx]['delta_time']['coordinates'] = \\\n \"ref_pt cycle_number latitude longitude\"\n #-- latitude\n IS2_atl11_corr[ptx]['latitude'] = latitude['AT'].copy()\n IS2_atl11_fill[ptx]['latitude'] = latitude['AT'].fill_value\n IS2_atl11_dims[ptx]['latitude'] = ['ref_pt']\n IS2_atl11_corr_attrs[ptx]['latitude'] = collections.OrderedDict()\n IS2_atl11_corr_attrs[ptx]['latitude']['units'] = \"degrees_north\"\n IS2_atl11_corr_attrs[ptx]['latitude']['contentType'] = \"physicalMeasurement\"\n IS2_atl11_corr_attrs[ptx]['latitude']['long_name'] = \"Latitude\"\n IS2_atl11_corr_attrs[ptx]['latitude']['standard_name'] = \"latitude\"\n IS2_atl11_corr_attrs[ptx]['latitude']['source'] = \"ATL06\"\n IS2_atl11_corr_attrs[ptx]['latitude']['description'] = (\"Center latitude of \"\n \"selected segments\")\n IS2_atl11_corr_attrs[ptx]['latitude']['valid_min'] = -90.0\n IS2_atl11_corr_attrs[ptx]['latitude']['valid_max'] = 90.0\n IS2_atl11_corr_attrs[ptx]['latitude']['coordinates'] = \\\n \"ref_pt delta_time longitude\"\n #-- longitude\n IS2_atl11_corr[ptx]['longitude'] = longitude['AT'].copy()\n IS2_atl11_fill[ptx]['longitude'] = longitude['AT'].fill_value\n IS2_atl11_dims[ptx]['longitude'] = ['ref_pt']\n IS2_atl11_corr_attrs[ptx]['longitude'] = collections.OrderedDict()\n IS2_atl11_corr_attrs[ptx]['longitude']['units'] = \"degrees_east\"\n IS2_atl11_corr_attrs[ptx]['longitude']['contentType'] = \"physicalMeasurement\"\n IS2_atl11_corr_attrs[ptx]['longitude']['long_name'] = \"Longitude\"\n IS2_atl11_corr_attrs[ptx]['longitude']['standard_name'] = \"longitude\"\n IS2_atl11_corr_attrs[ptx]['longitude']['source'] = \"ATL06\"\n IS2_atl11_corr_attrs[ptx]['longitude']['description'] = (\"Center longitude of \"\n \"selected segments\")\n IS2_atl11_corr_attrs[ptx]['longitude']['valid_min'] = -180.0\n IS2_atl11_corr_attrs[ptx]['longitude']['valid_max'] = 180.0\n IS2_atl11_corr_attrs[ptx]['longitude']['coordinates'] = \\\n \"ref_pt delta_time latitude\"\n\n #-- cycle statistics variables\n IS2_atl11_corr_attrs[ptx]['cycle_stats']['Description'] = (\"The cycle_stats subgroup \"\n \"contains summary information about segments for each reference point, including \"\n \"the uncorrected mean heights for reference surfaces, blowing snow and cloud \"\n \"indicators, and geolocation and height misfit statistics.\")\n IS2_atl11_corr_attrs[ptx]['cycle_stats']['data_rate'] = (\"Data within this group \"\n \"are stored at the average segment rate.\")\n\n #-- interpolated sea level products\n sea_level = dict(mdt=MDT['AT'],sla=SLA['AT'],adt=ADT['AT'])\n for key,val in sea_level.items():\n #-- add to output\n IS2_atl11_corr[ptx]['cycle_stats'][key] = val.copy()\n IS2_atl11_fill[ptx]['cycle_stats'][key] = val.fill_value\n IS2_atl11_dims[ptx]['cycle_stats'][key] = ['ref_pt','cycle_number']\n IS2_atl11_corr_attrs[ptx]['cycle_stats'][key] = collections.OrderedDict()\n IS2_atl11_corr_attrs[ptx]['cycle_stats'][key]['units'] = \"meters\"\n IS2_atl11_corr_attrs[ptx]['cycle_stats'][key]['contentType'] = \"referenceInformation\"\n IS2_atl11_corr_attrs[ptx]['cycle_stats'][key]['long_name'] = attrib[key]['long_name']\n IS2_atl11_corr_attrs[ptx]['cycle_stats'][key]['description'] = attrib[key]['description']\n IS2_atl11_corr_attrs[ptx]['cycle_stats'][key]['source'] = 'AVISO/Copernicus'\n IS2_atl11_corr_attrs[ptx]['cycle_stats'][key]['reference'] = attrib[key]['reference']\n IS2_atl11_corr_attrs[ptx]['cycle_stats'][key]['coordinates'] = \\\n \"../ref_pt ../cycle_number ../delta_time ../latitude ../longitude\"\n\n #-- if crossover measurements were calculated\n if CROSSOVERS:\n #-- crossing track variables\n IS2_atl11_corr_attrs[ptx][XT]['Description'] = (\"The crossing_track_data \"\n \"subgroup contains elevation data at crossover locations. These are \"\n \"locations where two ICESat-2 pair tracks cross, so data are available \"\n \"from both the datum track, for which the granule was generated, and \"\n \"from the crossing track.\")\n IS2_atl11_corr_attrs[ptx][XT]['data_rate'] = (\"Data within this group are \"\n \"stored at the average segment rate.\")\n\n #-- reference point\n IS2_atl11_corr[ptx][XT]['ref_pt'] = ref_pt['XT'].copy()\n IS2_atl11_fill[ptx][XT]['ref_pt'] = None\n IS2_atl11_dims[ptx][XT]['ref_pt'] = None\n IS2_atl11_corr_attrs[ptx][XT]['ref_pt'] = collections.OrderedDict()\n IS2_atl11_corr_attrs[ptx][XT]['ref_pt']['units'] = \"1\"\n IS2_atl11_corr_attrs[ptx][XT]['ref_pt']['contentType'] = \"referenceInformation\"\n IS2_atl11_corr_attrs[ptx][XT]['ref_pt']['long_name'] = (\"fit center reference point number, \"\n \"segment_id\")\n IS2_atl11_corr_attrs[ptx][XT]['ref_pt']['source'] = \"derived, ATL11 algorithm\"\n IS2_atl11_corr_attrs[ptx][XT]['ref_pt']['description'] = (\"The reference-point number of the \"\n \"fit center for the datum track. The reference point is the 7 digit segment_id number \"\n \"corresponding to the center of the ATL06 data used for each ATL11 point. These are \"\n \"sequential, starting with 1 for the first segment after an ascending equatorial \"\n \"crossing node.\")\n IS2_atl11_corr_attrs[ptx][XT]['ref_pt']['coordinates'] = \\\n \"delta_time latitude longitude\"\n\n #-- reference ground track of the crossing track\n IS2_atl11_corr[ptx][XT]['rgt'] = IS2_atl11_mds[ptx][XT]['rgt'].copy()\n IS2_atl11_fill[ptx][XT]['rgt'] = IS2_atl11_attrs[ptx][XT]['rgt']['_FillValue']\n IS2_atl11_dims[ptx][XT]['rgt'] = None\n IS2_atl11_corr_attrs[ptx][XT]['rgt'] = collections.OrderedDict()\n IS2_atl11_corr_attrs[ptx][XT]['rgt']['units'] = \"1\"\n IS2_atl11_corr_attrs[ptx][XT]['rgt']['contentType'] = \"referenceInformation\"\n IS2_atl11_corr_attrs[ptx][XT]['rgt']['long_name'] = \"crossover reference ground track\"\n IS2_atl11_corr_attrs[ptx][XT]['rgt']['source'] = \"ATL06\"\n IS2_atl11_corr_attrs[ptx][XT]['rgt']['description'] = \"The RGT number for the crossing data.\"\n IS2_atl11_corr_attrs[ptx][XT]['rgt']['coordinates'] = \\\n \"ref_pt delta_time latitude longitude\"\n #-- cycle_number of the crossing track\n IS2_atl11_corr[ptx][XT]['cycle_number'] = IS2_atl11_mds[ptx][XT]['cycle_number'].copy()\n IS2_atl11_fill[ptx][XT]['cycle_number'] = IS2_atl11_attrs[ptx][XT]['cycle_number']['_FillValue']\n IS2_atl11_dims[ptx][XT]['cycle_number'] = None\n IS2_atl11_corr_attrs[ptx][XT]['cycle_number'] = collections.OrderedDict()\n IS2_atl11_corr_attrs[ptx][XT]['cycle_number']['units'] = \"1\"\n IS2_atl11_corr_attrs[ptx][XT]['cycle_number']['long_name'] = \"crossover cycle number\"\n IS2_atl11_corr_attrs[ptx][XT]['cycle_number']['source'] = \"ATL06\"\n IS2_atl11_corr_attrs[ptx][XT]['cycle_number']['description'] = (\"Cycle number for the \"\n \"crossing data. Number of 91-day periods that have elapsed since ICESat-2 entered \"\n \"the science orbit. Each of the 1,387 reference ground track (RGTs) is targeted \"\n \"in the polar regions once every 91 days.\")\n #-- delta time of the crossing track\n IS2_atl11_corr[ptx][XT]['delta_time'] = delta_time['XT'].copy()\n IS2_atl11_fill[ptx][XT]['delta_time'] = delta_time['XT'].fill_value\n IS2_atl11_dims[ptx][XT]['delta_time'] = ['ref_pt']\n IS2_atl11_corr_attrs[ptx][XT]['delta_time'] = {}\n IS2_atl11_corr_attrs[ptx][XT]['delta_time']['units'] = \"seconds since 2018-01-01\"\n IS2_atl11_corr_attrs[ptx][XT]['delta_time']['long_name'] = \"Elapsed GPS seconds\"\n IS2_atl11_corr_attrs[ptx][XT]['delta_time']['standard_name'] = \"time\"\n IS2_atl11_corr_attrs[ptx][XT]['delta_time']['calendar'] = \"standard\"\n IS2_atl11_corr_attrs[ptx][XT]['delta_time']['source'] = \"ATL06\"\n IS2_atl11_corr_attrs[ptx][XT]['delta_time']['description'] = (\"Number of GPS \"\n \"seconds since the ATLAS SDP epoch. The ATLAS Standard Data Products (SDP) epoch offset \"\n \"is defined within /ancillary_data/atlas_sdp_gps_epoch as the number of GPS seconds \"\n \"between the GPS epoch (1980-01-06T00:00:00.000000Z UTC) and the ATLAS SDP epoch. By \"\n \"adding the offset contained within atlas_sdp_gps_epoch to delta time parameters, the \"\n \"time in gps_seconds relative to the GPS epoch can be computed.\")\n IS2_atl11_corr_attrs[ptx]['delta_time']['coordinates'] = \\\n \"ref_pt latitude longitude\"\n #-- latitude of the crossover measurement\n IS2_atl11_corr[ptx][XT]['latitude'] = latitude['XT'].copy()\n IS2_atl11_fill[ptx][XT]['latitude'] = latitude['XT'].fill_value\n IS2_atl11_dims[ptx][XT]['latitude'] = ['ref_pt']\n IS2_atl11_corr_attrs[ptx][XT]['latitude'] = collections.OrderedDict()\n IS2_atl11_corr_attrs[ptx][XT]['latitude']['units'] = \"degrees_north\"\n IS2_atl11_corr_attrs[ptx][XT]['latitude']['contentType'] = \"physicalMeasurement\"\n IS2_atl11_corr_attrs[ptx][XT]['latitude']['long_name'] = \"crossover latitude\"\n IS2_atl11_corr_attrs[ptx][XT]['latitude']['standard_name'] = \"latitude\"\n IS2_atl11_corr_attrs[ptx][XT]['latitude']['source'] = \"ATL06\"\n IS2_atl11_corr_attrs[ptx][XT]['latitude']['description'] = (\"Center latitude of \"\n \"selected segments\")\n IS2_atl11_corr_attrs[ptx][XT]['latitude']['valid_min'] = -90.0\n IS2_atl11_corr_attrs[ptx][XT]['latitude']['valid_max'] = 90.0\n IS2_atl11_corr_attrs[ptx][XT]['latitude']['coordinates'] = \\\n \"ref_pt delta_time longitude\"\n #-- longitude of the crossover measurement\n IS2_atl11_corr[ptx][XT]['longitude'] = longitude['XT'].copy()\n IS2_atl11_fill[ptx][XT]['longitude'] = longitude['XT'].fill_value\n IS2_atl11_dims[ptx][XT]['longitude'] = ['ref_pt']\n IS2_atl11_corr_attrs[ptx][XT]['longitude'] = collections.OrderedDict()\n IS2_atl11_corr_attrs[ptx][XT]['longitude']['units'] = \"degrees_east\"\n IS2_atl11_corr_attrs[ptx][XT]['longitude']['contentType'] = \"physicalMeasurement\"\n IS2_atl11_corr_attrs[ptx][XT]['longitude']['long_name'] = \"crossover longitude\"\n IS2_atl11_corr_attrs[ptx][XT]['longitude']['standard_name'] = \"longitude\"\n IS2_atl11_corr_attrs[ptx][XT]['longitude']['source'] = \"ATL06\"\n IS2_atl11_corr_attrs[ptx][XT]['longitude']['description'] = (\"Center longitude of \"\n \"selected segments\")\n IS2_atl11_corr_attrs[ptx][XT]['longitude']['valid_min'] = -180.0\n IS2_atl11_corr_attrs[ptx][XT]['longitude']['valid_max'] = 180.0\n IS2_atl11_corr_attrs[ptx][XT]['longitude']['coordinates'] = \\\n \"ref_pt delta_time latitude\"\n\n #-- interpolated sea level at the crossover measurement\n sea_level = dict(mdt=MDT['XT'],sla=SLA['XT'],adt=ADT['XT'])\n for key,val in sea_level.items():\n #-- add to output\n IS2_atl11_corr[ptx][XT][key] = val.copy()\n IS2_atl11_fill[ptx][XT][key] = val.fill_value\n IS2_atl11_dims[ptx][XT][key] = ['ref_pt']\n IS2_atl11_corr_attrs[ptx][XT][key] = collections.OrderedDict()\n IS2_atl11_corr_attrs[ptx][XT][key]['units'] = \"meters\"\n IS2_atl11_corr_attrs[ptx][XT][key]['contentType'] = \"referenceInformation\"\n IS2_atl11_corr_attrs[ptx][XT][key]['long_name'] = attrib[key]['long_name']\n IS2_atl11_corr_attrs[ptx][XT][key]['description'] = attrib[key]['description']\n IS2_atl11_corr_attrs[ptx][XT][key]['source'] = 'AVISO/Copernicus'\n IS2_atl11_corr_attrs[ptx][XT][key]['reference'] = attrib[key]['reference']\n IS2_atl11_corr_attrs[ptx][XT][key]['coordinates'] = \\\n \"ref_pt delta_time latitude longitude\"\n\n #-- output HDF5 files with interpolated sea level data\n fargs = (PRD,'AVISO_SEA_LEVEL',TRK,GRAN,SCYC,ECYC,RL,VERS,AUX)\n file_format = '{0}_{1}_{2}{3}_{4}{5}_{6}_{7}{8}.h5'\n output_file = os.path.join(DIRECTORY,file_format.format(*fargs))\n #-- print file information\n logging.info('\\t{0}'.format(output_file))\n HDF5_ATL11_corr_write(IS2_atl11_corr, IS2_atl11_corr_attrs,\n CLOBBER=True, INPUT=os.path.basename(FILE), CROSSOVERS=CROSSOVERS,\n FILL_VALUE=IS2_atl11_fill, DIMENSIONS=IS2_atl11_dims,\n FILENAME=output_file)\n #-- change the permissions mode\n os.chmod(output_file, MODE)\n\n#-- PURPOSE: outputting the correction values for ICESat-2 data to HDF5\ndef HDF5_ATL11_corr_write(IS2_atl11_corr, IS2_atl11_attrs, INPUT=None,\n FILENAME='', FILL_VALUE=None, DIMENSIONS=None, CROSSOVERS=False,\n CLOBBER=False):\n #-- setting HDF5 clobber attribute\n if CLOBBER:\n clobber = 'w'\n else:\n clobber = 'w-'\n\n #-- open output HDF5 file\n fileID = h5py.File(os.path.expanduser(FILENAME), clobber)\n\n #-- create HDF5 records\n h5 = {}\n\n #-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)\n #-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)\n h5['ancillary_data'] = {}\n for k,v in IS2_atl11_corr['ancillary_data'].items():\n #-- Defining the HDF5 dataset variables\n val = 'ancillary_data/{0}'.format(k)\n h5['ancillary_data'][k] = fileID.create_dataset(val, np.shape(v), data=v,\n dtype=v.dtype, compression='gzip')\n #-- add HDF5 variable attributes\n for att_name,att_val in IS2_atl11_attrs['ancillary_data'][k].items():\n h5['ancillary_data'][k].attrs[att_name] = att_val\n\n #-- write each output beam pair\n pairs = [k for k in IS2_atl11_corr.keys() if bool(re.match(r'pt\\d',k))]\n for ptx in pairs:\n fileID.create_group(ptx)\n h5[ptx] = {}\n #-- add HDF5 group attributes for beam\n for att_name in ['description','beam_pair','ReferenceGroundTrack',\n 'first_cycle','last_cycle','equatorial_radius','polar_radius']:\n fileID[ptx].attrs[att_name] = IS2_atl11_attrs[ptx][att_name]\n\n #-- ref_pt, cycle number, geolocation and delta_time variables\n for k in ['ref_pt','cycle_number','delta_time','latitude','longitude']:\n #-- values and attributes\n v = IS2_atl11_corr[ptx][k]\n attrs = IS2_atl11_attrs[ptx][k]\n fillvalue = FILL_VALUE[ptx][k]\n #-- Defining the HDF5 dataset variables\n val = '{0}/{1}'.format(ptx,k)\n if fillvalue:\n h5[ptx][k] = fileID.create_dataset(val, np.shape(v), data=v,\n dtype=v.dtype, fillvalue=fillvalue, compression='gzip')\n else:\n h5[ptx][k] = fileID.create_dataset(val, np.shape(v), data=v,\n dtype=v.dtype, compression='gzip')\n #-- create or attach dimensions for HDF5 variable\n if DIMENSIONS[ptx][k]:\n #-- attach dimensions\n for i,dim in enumerate(DIMENSIONS[ptx][k]):\n h5[ptx][k].dims[i].attach_scale(h5[ptx][dim])\n else:\n #-- make dimension\n h5[ptx][k].make_scale(k)\n #-- add HDF5 variable attributes\n for att_name,att_val in attrs.items():\n h5[ptx][k].attrs[att_name] = att_val\n\n #-- add to cycle_stats variables\n groups = ['cycle_stats']\n #-- if running crossovers: add to crossing_track_data variables\n if CROSSOVERS:\n groups.append('crossing_track_data')\n for key in groups:\n fileID[ptx].create_group(key)\n h5[ptx][key] = {}\n for att_name in ['Description','data_rate']:\n att_val=IS2_atl11_attrs[ptx][key][att_name]\n fileID[ptx][key].attrs[att_name] = att_val\n for k,v in IS2_atl11_corr[ptx][key].items():\n #-- attributes\n attrs = IS2_atl11_attrs[ptx][key][k]\n fillvalue = FILL_VALUE[ptx][key][k]\n #-- Defining the HDF5 dataset variables\n val = '{0}/{1}/{2}'.format(ptx,key,k)\n if fillvalue:\n h5[ptx][key][k] = fileID.create_dataset(val, np.shape(v), data=v,\n dtype=v.dtype, fillvalue=fillvalue, compression='gzip')\n else:\n h5[ptx][key][k] = fileID.create_dataset(val, np.shape(v), data=v,\n dtype=v.dtype, compression='gzip')\n #-- create or attach dimensions for HDF5 variable\n if DIMENSIONS[ptx][key][k]:\n #-- attach dimensions\n for i,dim in enumerate(DIMENSIONS[ptx][key][k]):\n if (key == 'cycle_stats'):\n h5[ptx][key][k].dims[i].attach_scale(h5[ptx][dim])\n else:\n h5[ptx][key][k].dims[i].attach_scale(h5[ptx][key][dim])\n else:\n #-- make dimension\n h5[ptx][key][k].make_scale(k)\n #-- add HDF5 variable attributes\n for att_name,att_val in attrs.items():\n h5[ptx][key][k].attrs[att_name] = att_val\n\n #-- HDF5 file title\n fileID.attrs['featureType'] = 'trajectory'\n fileID.attrs['title'] = 'ATLAS/ICESat-2 Annual Land Ice Height'\n fileID.attrs['summary'] = ('The purpose of ATL11 is to provide an ICESat-2 '\n 'satellite cycle summary of heights and height changes of land-based '\n 'ice and will be provided as input to ATL15 and ATL16, gridded '\n 'estimates of heights and height-changes.')\n fileID.attrs['description'] = ('Land ice parameters for each beam pair. '\n 'All parameters are calculated for the same along-track increments '\n 'for each beam pair and repeat.')\n date_created = datetime.datetime.today()\n fileID.attrs['date_created'] = date_created.isoformat()\n project = 'ICESat-2 > Ice, Cloud, and land Elevation Satellite-2'\n fileID.attrs['project'] = project\n platform = 'ICESat-2 > Ice, Cloud, and land Elevation Satellite-2'\n fileID.attrs['project'] = platform\n #-- add attribute for elevation instrument and designated processing level\n instrument = 'ATLAS > Advanced Topographic Laser Altimeter System'\n fileID.attrs['instrument'] = instrument\n fileID.attrs['source'] = 'Spacecraft'\n fileID.attrs['references'] = 'https://nsidc.org/data/icesat-2'\n fileID.attrs['processing_level'] = '4'\n #-- add attributes for input ATL11 files\n fileID.attrs['input_files'] = os.path.basename(INPUT)\n #-- find geospatial and temporal ranges\n lnmn,lnmx,ltmn,ltmx,tmn,tmx = (np.inf,-np.inf,np.inf,-np.inf,np.inf,-np.inf)\n for ptx in pairs:\n lon = IS2_atl11_corr[ptx]['longitude']\n lat = IS2_atl11_corr[ptx]['latitude']\n delta_time = IS2_atl11_corr[ptx]['delta_time']\n valid = np.nonzero(delta_time != FILL_VALUE[ptx]['delta_time'])\n #-- setting the geospatial and temporal ranges\n lnmn = lon.min() if (lon.min() < lnmn) else lnmn\n lnmx = lon.max() if (lon.max() > lnmx) else lnmx\n ltmn = lat.min() if (lat.min() < ltmn) else ltmn\n ltmx = lat.max() if (lat.max() > ltmx) else ltmx\n tmn = delta_time[valid].min() if (delta_time[valid].min() < tmn) else tmn\n tmx = delta_time[valid].max() if (delta_time[valid].max() > tmx) else tmx\n #-- add geospatial and temporal attributes\n fileID.attrs['geospatial_lat_min'] = ltmn\n fileID.attrs['geospatial_lat_max'] = ltmx\n fileID.attrs['geospatial_lon_min'] = lnmn\n fileID.attrs['geospatial_lon_max'] = lnmx\n fileID.attrs['geospatial_lat_units'] = \"degrees_north\"\n fileID.attrs['geospatial_lon_units'] = \"degrees_east\"\n fileID.attrs['geospatial_ellipsoid'] = \"WGS84\"\n fileID.attrs['date_type'] = 'UTC'\n fileID.attrs['time_type'] = 'CCSDS UTC-A'\n #-- convert start and end time from ATLAS SDP seconds into GPS seconds\n atlas_sdp_gps_epoch=IS2_atl11_corr['ancillary_data']['atlas_sdp_gps_epoch']\n gps_seconds = atlas_sdp_gps_epoch + np.array([tmn,tmx])\n #-- calculate leap seconds\n leaps = icesat2_toolkit.time.count_leap_seconds(gps_seconds)\n #-- convert from seconds since 1980-01-06T00:00:00 to Julian days\n MJD = icesat2_toolkit.time.convert_delta_time(gps_seconds - leaps,\n epoch1=(1980,1,6,0,0,0), epoch2=(1858,11,17,0,0,0), scale=1.0/86400.0)\n #-- convert to calendar date\n YY,MM,DD,HH,MN,SS = icesat2_toolkit.time.convert_julian(MJD + 2400000.5,\n FORMAT='tuple')\n #-- add attributes with measurement date start, end and duration\n tcs = datetime.datetime(int(YY[0]), int(MM[0]), int(DD[0]),\n int(HH[0]), int(MN[0]), int(SS[0]), int(1e6*(SS[0] % 1)))\n fileID.attrs['time_coverage_start'] = tcs.isoformat()\n tce = datetime.datetime(int(YY[1]), int(MM[1]), int(DD[1]),\n int(HH[1]), int(MN[1]), int(SS[1]), int(1e6*(SS[1] % 1)))\n fileID.attrs['time_coverage_end'] = tce.isoformat()\n fileID.attrs['time_coverage_duration'] = '{0:0.0f}'.format(tmx-tmn)\n #-- Closing the HDF5 file\n fileID.close()\n\n#-- Main program that calls interp_sea_level_ICESat2()\ndef main():\n #-- Read the system arguments listed after the program\n parser = argparse.ArgumentParser(\n description=\"\"\"Interpolates AVISO sea level anomalies, absolute\n dynamic topography and mean dynamic topography to ICESat-2\n ATL11 annual land ice height data\n \"\"\",\n fromfile_prefix_chars=\"@\"\n )\n parser.convert_arg_line_to_args = \\\n icesat2_toolkit.utilities.convert_arg_line_to_args\n #-- command line parameters\n parser.add_argument('infile',\n type=lambda p: os.path.abspath(os.path.expanduser(p)), nargs='+',\n help='ICESat-2 ATL11 file to run')\n #-- directory with sea level data\n parser.add_argument('--directory','-D',\n type=lambda p: os.path.abspath(os.path.expanduser(p)),\n default=os.getcwd(),\n help='Working data directory')\n #-- run with ATL11 crossovers\n parser.add_argument('--crossovers','-C',\n default=False, action='store_true',\n help='Run ATL11 Crossovers')\n #-- verbosity settings\n #-- verbose will output information about each output file\n parser.add_argument('--verbose','-V',\n default=False, action='store_true',\n help='Output information about each created file')\n #-- permissions mode of the local files (number in octal)\n parser.add_argument('--mode','-M',\n type=lambda x: int(x,base=8), default=0o775,\n help='Permission mode of directories and files created')\n args,_ = parser.parse_known_args()\n\n #-- run for each input ATL11 file\n for FILE in args.infile:\n interp_sea_level_ICESat2(args.directory, FILE,\n CROSSOVERS=args.crossovers, VERBOSE=args.verbose,\n MODE=args.mode)\n\n#-- run main program\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.logical_not", "numpy.nonzero", "numpy.ma.empty", "numpy.zeros_like", "numpy.broadcast_to", "numpy.floor", "numpy.shape", "numpy.ma.array", "numpy.array", "numpy.meshgrid", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ipattarapong/dbnd
[ "7bd65621c46c73e078eb628f994127ad4c7dbd1a" ]
[ "modules/dbnd/test_dbnd/targets_tests/conftest.py" ]
[ "from __future__ import absolute_import\n\nimport os\n\nimport pandas as pd\n\nfrom pytest import fixture\n\n\n@fixture\ndef simple_df():\n return pd.DataFrame(data=[[1, 1], [2, 2]], columns=[\"c1\", \"c2\"])\n\n\n@fixture\ndef pandas_data_frame():\n names = [\"Bob\", \"Jessica\", \"Mary\", \"John\", \"Mel\"]\n births = [968, 155, 77, 578, 973]\n df = pd.DataFrame(data=list(zip(names, births)), columns=[\"Names\", \"Births\"])\n return df\n\n\n@fixture\ndef pandas_data_frame_histograms(pandas_data_frame):\n return {\n \"Births\": ([2, 0, 1, 2], [77.0, 301.0, 525.0, 749.0, 973.0],),\n \"Names\": ([1, 1, 1, 1, 1], [\"Bob\", \"Mel\", \"John\", \"Mary\", \"Jessica\"]),\n }\n\n\n@fixture\ndef pandas_data_frame_stats(pandas_data_frame):\n return {\n \"Births\": {\n \"count\": 5.0,\n \"mean\": 550.2,\n \"std\": 428.42,\n \"min\": 77.0,\n \"25%\": 155.0,\n \"50%\": 578.0,\n \"75%\": 968.0,\n \"max\": 973.0,\n \"non-null\": 5,\n \"null-count\": 0,\n \"distinct\": 5,\n \"type\": \"int64\",\n },\n \"Names\": {\n \"count\": 5,\n \"distinct\": 5,\n \"freq\": 1,\n \"non-null\": 5,\n \"null-count\": 0,\n \"type\": \"object\",\n \"unique\": 5,\n },\n }\n\n\n@fixture\ndef s1_root_dir(tmpdir):\n dir_path = str(tmpdir.join(\"dir.csv/\"))\n os.makedirs(dir_path)\n return dir_path + \"/\"\n\n\n@fixture\ndef s1_file_1_csv(s1_root_dir, simple_df):\n path = os.path.join(s1_root_dir, \"1.csv\")\n simple_df.head(1).to_csv(path, index=False)\n return path\n\n\n@fixture\ndef s1_file_2_csv(s1_root_dir, simple_df):\n path = os.path.join(s1_root_dir, \"2.csv\")\n simple_df.tail(1).to_csv(path, index=False)\n return path\n\n\n@fixture\ndef s1_dir_with_csv(s1_root_dir, s1_file_1_csv, s1_file_2_csv):\n return s1_root_dir, s1_file_1_csv, s1_file_2_csv\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
albertvillanova/audio
[ "0cd25093626d067e008e1f81ad76e072bd4a1edd", "0cd25093626d067e008e1f81ad76e072bd4a1edd", "0cd25093626d067e008e1f81ad76e072bd4a1edd" ]
[ "examples/libtorchaudio/speech_recognition/build_pipeline_from_huggingface_transformers.py", "examples/source_separation/utils/metrics.py", "examples/pipeline_wavernn/inference.py" ]
[ "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport os\nfrom typing import Tuple\n\nimport torch\nimport torchaudio\nfrom torchaudio.models.wav2vec2.utils.import_huggingface import import_huggingface_model\nfrom greedy_decoder import Decoder\n\nTORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(\".\")[:2])\nif TORCH_VERSION >= (1, 10):\n import torch.ao.quantization as tq\nelse:\n import torch.quantization as tq\n\n_LG = logging.getLogger(__name__)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(\n description=__doc__,\n )\n parser.add_argument(\n '--model',\n required=True,\n help='Path to the input pretrained weight file.'\n )\n parser.add_argument(\n '--output-path',\n help='Path to the directory, where the Torchscript-ed pipelines are saved.',\n )\n parser.add_argument(\n '--test-file',\n help='Path to a test audio file.',\n )\n parser.add_argument(\n '--quantize',\n action='store_true',\n help='Quantize the model.',\n )\n parser.add_argument(\n '--debug',\n action='store_true',\n help=(\n 'When enabled, individual components are separately tested '\n 'for the numerical compatibility and TorchScript compatibility.'\n )\n )\n return parser.parse_args()\n\n\nclass Loader(torch.nn.Module):\n def forward(self, audio_path: str) -> torch.Tensor:\n waveform, sample_rate = torchaudio.load(audio_path)\n if sample_rate != 16000:\n waveform = torchaudio.functional.resample(waveform, float(sample_rate), 16000.)\n return waveform\n\n\nclass Encoder(torch.nn.Module):\n def __init__(self, encoder: torch.nn.Module):\n super().__init__()\n self.encoder = encoder\n\n def forward(self, waveform: torch.Tensor) -> torch.Tensor:\n result, _ = self.encoder(waveform)\n return result[0]\n\n\ndef _get_model(model_id):\n from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor\n tokenizer = Wav2Vec2Processor.from_pretrained(model_id).tokenizer\n labels = [k for k, v in sorted(tokenizer.get_vocab().items(), key=lambda kv: kv[1])]\n original = Wav2Vec2ForCTC.from_pretrained(model_id)\n model = import_huggingface_model(original)\n return model.eval(), labels\n\n\ndef _get_decoder(labels):\n return Decoder(labels)\n\n\ndef _main():\n args = _parse_args()\n _init_logging(args.debug)\n _LG.info('Loading model: %s', args.model)\n model, labels = _get_model(args.model)\n _LG.info('Labels: %s', labels)\n _LG.info('Building pipeline')\n loader = Loader()\n encoder = Encoder(model)\n decoder = _get_decoder(labels)\n _LG.info(encoder)\n\n if args.quantize:\n _LG.info('Quantizing the model')\n model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()\n encoder = tq.quantize_dynamic(\n encoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)\n _LG.info(encoder)\n\n # test\n if args.test_file:\n _LG.info('Testing with %s', args.test_file)\n waveform = loader(args.test_file)\n emission = encoder(waveform)\n transcript = decoder(emission)\n _LG.info(transcript)\n\n torch.jit.script(loader).save(os.path.join(args.output_path, 'loader.zip'))\n torch.jit.script(encoder).save(os.path.join(args.output_path, 'encoder.zip'))\n torch.jit.script(decoder).save(os.path.join(args.output_path, 'decoder.zip'))\n\n\ndef _init_logging(debug=False):\n level = logging.DEBUG if debug else logging.INFO\n format_ = (\n '%(message)s' if not debug else\n '%(asctime)s: %(levelname)7s: %(funcName)10s: %(message)s'\n )\n logging.basicConfig(level=level, format=format_)\n\n\nif __name__ == '__main__':\n _main()\n", "import math\nfrom typing import Optional\nfrom itertools import permutations\n\nimport torch\n\n\ndef sdr(\n estimate: torch.Tensor,\n reference: torch.Tensor,\n mask: Optional[torch.Tensor] = None,\n epsilon: float = 1e-8\n) -> torch.Tensor:\n \"\"\"Computes source-to-distortion ratio.\n\n 1. scale the reference signal with power(s_est * s_ref) / powr(s_ref * s_ref)\n 2. compute SNR between adjusted estimate and reference.\n\n Args:\n estimate (torch.Tensor): Estimtaed signal.\n Shape: [batch, speakers (can be 1), time frame]\n reference (torch.Tensor): Reference signal.\n Shape: [batch, speakers, time frame]\n mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).\n Shape: [batch, 1, time frame]\n epsilon (float, optional): constant value used to stabilize division.\n\n Returns:\n torch.Tensor: scale-invariant source-to-distortion ratio.\n Shape: [batch, speaker]\n\n References:\n - Single-channel multi-speaker separation using deep clustering\n Y. Isik, J. Le Roux, Z. Chen, S. Watanabe, and J. R. Hershey,\n - Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation\n Luo, Yi and Mesgarani, Nima\n https://arxiv.org/abs/1809.07454\n\n Notes:\n This function is tested to produce the exact same result as\n https://github.com/naplab/Conv-TasNet/blob/e66d82a8f956a69749ec8a4ae382217faa097c5c/utility/sdr.py#L34-L56\n \"\"\"\n reference_pow = reference.pow(2).mean(axis=2, keepdim=True)\n mix_pow = (estimate * reference).mean(axis=2, keepdim=True)\n scale = mix_pow / (reference_pow + epsilon)\n\n reference = scale * reference\n error = estimate - reference\n\n reference_pow = reference.pow(2)\n error_pow = error.pow(2)\n\n if mask is None:\n reference_pow = reference_pow.mean(axis=2)\n error_pow = error_pow.mean(axis=2)\n else:\n denom = mask.sum(axis=2)\n reference_pow = (mask * reference_pow).sum(axis=2) / denom\n error_pow = (mask * error_pow).sum(axis=2) / denom\n\n return 10 * torch.log10(reference_pow) - 10 * torch.log10(error_pow)\n\n\nclass PIT(torch.nn.Module):\n \"\"\"Applies utterance-level speaker permutation\n\n Computes the maxium possible value of the given utility function\n over the permutations of the speakers.\n\n Args:\n utility_func (function):\n Function that computes the utility (opposite of loss) with signature of\n (extimate: torch.Tensor, reference: torch.Tensor) -> torch.Tensor\n where input Tensors are shape of [batch, speakers, frame] and\n the output Tensor is shape of [batch, speakers].\n\n References:\n - Multi-talker Speech Separation with Utterance-level Permutation Invariant Training of\n Deep Recurrent Neural Networks\n Morten Kolbæk, Dong Yu, Zheng-Hua Tan and Jesper Jensen\n https://arxiv.org/abs/1703.06284\n \"\"\"\n\n def __init__(self, utility_func):\n super().__init__()\n self.utility_func = utility_func\n\n def forward(\n self,\n estimate: torch.Tensor,\n reference: torch.Tensor,\n mask: Optional[torch.Tensor] = None,\n epsilon: float = 1e-8\n ) -> torch.Tensor:\n \"\"\"Compute utterance-level PIT Loss\n\n Args:\n estimate (torch.Tensor): Estimated source signals.\n Shape: [bacth, speakers, time frame]\n reference (torch.Tensor): Reference (original) source signals.\n Shape: [batch, speakers, time frame]\n mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).\n Shape: [batch, 1, time frame]\n epsilon (float, optional): constant value used to stabilize division.\n\n Returns:\n torch.Tensor: Maximum criterion over the speaker permutation.\n Shape: [batch, ]\n \"\"\"\n assert estimate.shape == reference.shape\n\n batch_size, num_speakers = reference.shape[:2]\n num_permute = math.factorial(num_speakers)\n\n util_mat = torch.zeros(\n batch_size, num_permute, dtype=estimate.dtype, device=estimate.device\n )\n for i, idx in enumerate(permutations(range(num_speakers))):\n util = self.utility_func(estimate, reference[:, idx, :], mask=mask, epsilon=epsilon)\n util_mat[:, i] = util.mean(dim=1) # take the average over speaker dimension\n return util_mat.max(dim=1).values\n\n\n_sdr_pit = PIT(utility_func=sdr)\n\n\ndef sdr_pit(\n estimate: torch.Tensor,\n reference: torch.Tensor,\n mask: Optional[torch.Tensor] = None,\n epsilon: float = 1e-8):\n \"\"\"Computes scale-invariant source-to-distortion ratio.\n\n 1. adjust both estimate and reference to have 0-mean\n 2. scale the reference signal with power(s_est * s_ref) / powr(s_ref * s_ref)\n 3. compute SNR between adjusted estimate and reference.\n\n Args:\n estimate (torch.Tensor): Estimtaed signal.\n Shape: [batch, speakers (can be 1), time frame]\n reference (torch.Tensor): Reference signal.\n Shape: [batch, speakers, time frame]\n mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).\n Shape: [batch, 1, time frame]\n epsilon (float, optional): constant value used to stabilize division.\n\n Returns:\n torch.Tensor: scale-invariant source-to-distortion ratio.\n Shape: [batch, speaker]\n\n References:\n - Single-channel multi-speaker separation using deep clustering\n Y. Isik, J. Le Roux, Z. Chen, S. Watanabe, and J. R. Hershey,\n - Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation\n Luo, Yi and Mesgarani, Nima\n https://arxiv.org/abs/1809.07454\n\n Notes:\n This function is tested to produce the exact same result as the reference implementation,\n *when the inputs have 0-mean*\n https://github.com/naplab/Conv-TasNet/blob/e66d82a8f956a69749ec8a4ae382217faa097c5c/utility/sdr.py#L107-L153\n \"\"\"\n return _sdr_pit(estimate, reference, mask, epsilon)\n\n\ndef sdri(\n estimate: torch.Tensor,\n reference: torch.Tensor,\n mix: torch.Tensor,\n mask: Optional[torch.Tensor] = None,\n epsilon: float = 1e-8,\n) -> torch.Tensor:\n \"\"\"Compute the improvement of SDR (SDRi).\n\n This function compute how much SDR is improved if the estimation is changed from\n the original mixture signal to the actual estimated source signals. That is,\n ``SDR(estimate, reference) - SDR(mix, reference)``.\n\n For computing ``SDR(estimate, reference)``, PIT (permutation invariant training) is applied,\n so that best combination of sources between the reference signals and the esimate signals\n are picked.\n\n Args:\n estimate (torch.Tensor): Estimated source signals.\n Shape: [batch, speakers, time frame]\n reference (torch.Tensor): Reference (original) source signals.\n Shape: [batch, speakers, time frame]\n mix (torch.Tensor): Mixed souce signals, from which the setimated signals were generated.\n Shape: [batch, speakers == 1, time frame]\n mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).\n Shape: [batch, 1, time frame]\n epsilon (float, optional): constant value used to stabilize division.\n\n Returns:\n torch.Tensor: Improved SDR. Shape: [batch, ]\n\n References:\n - Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation\n Luo, Yi and Mesgarani, Nima\n https://arxiv.org/abs/1809.07454\n \"\"\"\n sdr_ = sdr_pit(estimate, reference, mask=mask, epsilon=epsilon) # [batch, ]\n base_sdr = sdr(mix, reference, mask=mask, epsilon=epsilon) # [batch, speaker]\n return sdr_ - base_sdr.mean(dim=1)\n", "import argparse\n\nimport torch\nimport torchaudio\nfrom torchaudio.transforms import MelSpectrogram\nfrom torchaudio.models import wavernn\nfrom torchaudio.models.wavernn import _MODEL_CONFIG_AND_URLS\nfrom torchaudio.datasets import LJSPEECH\n\nfrom wavernn_inference_wrapper import WaveRNNInferenceWrapper\nfrom processing import NormalizeDB\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--output-wav-path\", default=\"./output.wav\", type=str, metavar=\"PATH\",\n help=\"The path to output the reconstructed wav file.\",\n )\n parser.add_argument(\n \"--jit\", default=False, action=\"store_true\",\n help=\"If used, the model and inference function is jitted.\"\n )\n parser.add_argument(\n \"--no-batch-inference\", default=False, action=\"store_true\",\n help=\"Don't use batch inference.\"\n )\n parser.add_argument(\n \"--no-mulaw\", default=False, action=\"store_true\",\n help=\"Don't use mulaw decoder to decoder the signal.\"\n )\n parser.add_argument(\n \"--checkpoint-name\", default=\"wavernn_10k_epochs_8bits_ljspeech\",\n choices=list(_MODEL_CONFIG_AND_URLS.keys()),\n help=\"Select the WaveRNN checkpoint.\"\n )\n parser.add_argument(\n \"--batch-timesteps\", default=100, type=int,\n help=\"The time steps for each batch. Only used when batch inference is used\",\n )\n parser.add_argument(\n \"--batch-overlap\", default=5, type=int,\n help=\"The overlapping time steps between batches. Only used when batch inference is used\",\n )\n args = parser.parse_args()\n return args\n\n\ndef main(args):\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n waveform, sample_rate, _, _ = LJSPEECH(\"./\", download=True)[0]\n\n mel_kwargs = {\n 'sample_rate': sample_rate,\n 'n_fft': 2048,\n 'f_min': 40.,\n 'n_mels': 80,\n 'win_length': 1100,\n 'hop_length': 275,\n 'mel_scale': 'slaney',\n 'norm': 'slaney',\n 'power': 1,\n }\n transforms = torch.nn.Sequential(\n MelSpectrogram(**mel_kwargs),\n NormalizeDB(min_level_db=-100, normalization=True),\n )\n mel_specgram = transforms(waveform)\n\n wavernn_model = wavernn(args.checkpoint_name).eval().to(device)\n wavernn_inference_model = WaveRNNInferenceWrapper(wavernn_model)\n\n if args.jit:\n wavernn_inference_model = torch.jit.script(wavernn_inference_model)\n\n with torch.no_grad():\n output = wavernn_inference_model(mel_specgram.to(device),\n mulaw=(not args.no_mulaw),\n batched=(not args.no_batch_inference),\n timesteps=args.batch_timesteps,\n overlap=args.batch_overlap,)\n\n torchaudio.save(args.output_wav_path, output, sample_rate=sample_rate)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n" ]
[ [ "torch.jit.script", "torch.__version__.split", "torch.quantization.quantize_dynamic" ], [ "torch.log10", "torch.zeros" ], [ "torch.jit.script", "torch.no_grad", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tccw/geotools
[ "babfa6ab190c4ee89e97b87062505c1880f50233" ]
[ "toys/fatiando_2D_videos.py" ]
[ "\r\n\"\"\"\r\nUses the seismic package from Fatiando to create 2D finite difference\r\nmodels for wave propogation in different velocity fields.\r\n\r\nThe BP velocity model is 67km long and 12km deep, and was built on a 6.25m x 12.5m grid.\r\nIn order for Fatiando to work the cells have to be square so I ignore this.\r\nhttp://software.seg.org/datasets/2D/2004_BP_Vel_Benchmark/eage_abstract.pdf\r\n\r\nThe Marmousi2 Vp model is 3.5 km in depth and 17 km across on a 1.25m x 1.25m grid.\r\nhttp://www.ahay.org/RSF/book/data/marmousi2/paper.pdf\r\n\r\nIgnoring the density field for the BP model, and the Marmousi2 density field is\r\nconstant so it is also ignored.\r\n\"\"\"\r\n\r\nimport segyio\r\nimport numpy as np\r\nfrom matplotlib import animation\r\nimport cv2 as cv\r\nimport matplotlib.pyplot as plt\r\nfrom fatiando.seismic import wavefd\r\n\r\nfname1 = r'data/vel_z6.25m_x12.5m_exact.segy'\r\nfname2 = r'data/vp_marmousi-ii.segy'\r\n# fname3 = r\"H:\\DATA\\shenzi_raz_3d_t4_sb11_velocity_mod_depth Random line [2D Converted] 1.sgy\"\r\nwith segyio.open(fname1) as f:\r\n BP_2004_vpModel = segyio.tools.cube(f)\r\n BP_2004_vpModel = np.squeeze(BP_2004_vpModel.T)\r\n # np.save('data\\BP_2004_vpModel_6.25x12.5m_.npy',BP_2004_vpModel)\r\n\r\nwith segyio.open(fname2) as f:\r\n vp_marmousi = segyio.tools.cube(f)\r\n vp_marmousi = np.squeeze(vp_marmousi.T * 1000) # rescaling from km/s to m/s\r\n # np.save(r'data\\vp_marmousi-ii_1.25x1.25m_.npy', vp_marmousi)\r\n# with segyio.open(fname3) as f:\r\n# vp_shenzi = segyio.tools.cube(f)\r\n# vp_shenzi = np.squeeze(vp_shenzi.T * 0.3048) # conver from ft/s to m/s\r\n\r\n# Fun with Repsol logos\r\ndef rescale(array, new_min, new_max):\r\n array_rscl = (array - array.min()) * (new_max - new_min) / (array.max() - array.min()) + new_min\r\n return array_rscl\r\n\r\n# img = cv.imread(r\"C:\\Users\\Thomas Cowan\\Documents\\GitHub\\geotools\\toys\\velocities_from_images\\Orange_Line_Logo_sq.png\")\r\n# img_gryscl = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\r\n# vel_repsol = np.where(img_gryscl == 255, 1500, 3000)\r\nfname_logo = r\"C:\\Users\\Thomas Cowan\\Documents\\GitHub\\geotools\\toys\\velocities_from_images\\REPSOL.png\"\r\ncolor_logo = cv.imread(fname_logo)\r\ncolor_logo_rgb = cv.cvtColor(color_logo, cv.COLOR_BGR2RGB)\r\ncolor_logo = np.sum(color_logo,axis=2)\r\nvel_repsol_color = rescale(color_logo, 6500,1500)\r\n\r\n\r\nprint('The BP velocity model has shape: ' + str(BP_2004_vpModel.shape))\r\nprint('The Marmousi velocity model has the shape: ' + str(vp_marmousi.shape))\r\n\r\n\r\n# Clipping the model to a sqaure. Seems to break with a rectangle but\r\n# I've made rectangular models work before... not sure what is wrong.\r\n\r\nBP_2004_vpModel_sq = BP_2004_vpModel[0:1910, 1910:3820]\r\nvp_marmousi_sq = vp_marmousi[:2800, 6500:9300]\r\n# vp_shenzi_sq = vp_shenzi[5:1270,225:1490]\r\n# print('The Shenzi velocity model has the shape: ' + str(vp_shenzi_sq.shape))\r\n# Downsampled the square models for faster computing\r\nsrc_model = vp_marmousi_sq\r\ndst = src_model\r\n\r\n# pyrDown likes integers\r\nrows, cols = src_model.shape\r\nrows = int(rows)\r\ncols = int(cols)\r\n\r\n# Rows need to be divisible by 2(ish...) for this method of downsaplting to work\r\ndst = cv.pyrDown(src_model, (rows/2, cols/2))\r\n# dst = cv2.pyrDown(dst, (int(dst.shape[0])/2, int(dst.shape[1]/2)))\r\n\r\nprint('The original model is ' + str(src_model.shape))\r\nprint('The downsampled model is ' + str(dst.shape))\r\n\r\n\r\n# ------------------------------------------------------------------------------\r\n# ------------------------------------------------------------------------------\r\n\r\n# Initializing and plotting the finite-difference model\r\n\r\n# Initialize a blank finite-difference grid with a spacing of your choosing\r\n# shape = BP_2004_vpModel_sq.shape\r\nshape = vel_repsol_color.shape\r\nds = 1.6667 # spacing in meters\r\narea = [0, shape[0] * ds, 0, shape[1] * ds]\r\n\r\n# Fill the velocity field\r\nvelocity = vel_repsol_color\r\n\r\n# Instantiate the source\r\nfc = 100. # The approximate frequency of the source\r\nsource = [wavefd.GaussSource((velocity.shape[0] / 2 -2) * ds,\r\n 2 * ds, area, shape, 1000., fc)]\r\n# source = [wavefd.MexHatSource(950 * ds, 2 * ds, area, shape, 400., fc)]\r\ndt = wavefd.scalar_maxdt(area, shape, np.max(velocity))\r\nduration = 1\r\nmaxit = int(duration / dt)\r\n\r\n# Generate the stations and reciever location\r\nnum_stations = 100\r\nspac = velocity.shape[0]/num_stations # station spacing\r\nstations = [[i*spac*ds, 3 * ds] for i in range(1,num_stations)] # geophone coordinates (x,z)\r\nseismogram_list = ['seis' + str(i) for i in range(1,num_stations)] # Supposed to be for labeling geophones\r\n\r\nsnapshots = 10 # number of iterations before the plot updates\r\nsimulation = wavefd.scalar(velocity, area, dt, maxit, source, stations, snapshots, padding = 500, taper = 0.0005)\r\n\r\n# Making the animation\r\nplot_spacing = 50\r\n\r\nx_rec = [i[0] for i in stations] # for plotting geophones\r\ny_rec = [i[1] for i in stations]\r\nx_src = (velocity.shape[0] / 2 -2) * ds\r\ny_src = 2 * ds\r\n\r\nfig = plt.figure(figsize=(15, 10))\r\n# plt.rc('text', usetex=True)\r\nplt.subplots_adjust(right=0.98, left=0.11, hspace=0.0, top=0.93)\r\nplt.subplot2grid((2, 8), (0, 0), colspan=5, rowspan=3)\r\nplt.imshow(color_logo_rgb, extent=area, origin='lower')# cmap='plasma_r')\r\nticksx = plt.gca().get_xticks() / 1000\r\nticksy = plt.gca().get_yticks() / 1000\r\nfig.gca().set_xticklabels(ticksx.astype(float))\r\nfig.gca().set_yticklabels(ticksy.astype(float))\r\nplt.xticks(rotation=45)\r\n# plt.colorbar(shrink = 0.59,label = r'P-velocity $\\frac{m}{s}$',\r\n# orientation = 'horizontal')\r\nplt.title('2D P-wave simulation', size = 20)\r\nwavefield = plt.imshow(np.zeros_like(velocity), extent=area,\r\n cmap='gray_r', vmin=-100, vmax=100, alpha = 0.4)\r\nplt.scatter(x_rec,y_rec, color = 'b', marker = 'v', s=30)\r\nplt.scatter(x_src,y_src, color = 'r', marker = 'D', s=30)\r\nplt.ylim(area[2:][::-1])\r\nplt.xlabel('x (km)', size = 12)\r\nplt.ylabel('z (km)', size = 12)\r\nplt.subplot2grid((2, 8), (0, 5), colspan=3, rowspan=3)\r\nplt.tick_params(\r\n axis='x',\r\n which='both',\r\n bottom=False,\r\n top=False,\r\n labelbottom=False)\r\nplt.grid(linestyle = '--', alpha = 0.3)\r\nfor i in range(len(seismogram_list)):\r\n seismogram_list[i], = plt.plot([], [], '-k', linewidth=1)\r\n plt.plot(plot_spacing*i, 0, 'vb', markersize=5, linewidth=1)\r\n\r\nplt.ylim(duration, 0)\r\nplt.xlim(-135, 5025)\r\nplt.ylabel('TWT (s)')\r\nplt.tight_layout()\r\ntimes = np.linspace(0, dt * maxit, maxit)\r\n\r\n# This function updates the plot every few timesteps\r\ndef animate(i):\r\n t, u, seismogram = simulation.next()\r\n for j in range(len(seismogram_list)):\r\n seismogram_list[j].set_data(seismogram[j][:t + 1] + plot_spacing*j, times[:t + 1])\r\n wavefield.set_data(u[::-1])\r\n return wavefield, seismogram_list\r\n\r\n\r\nanim = animation.FuncAnimation(\r\n fig, animate, frames=maxit / snapshots, interval=1)\r\nanim.save('repsol_color_logo.mp4', fps=30, dpi=200, bitrate=4000)\r\n# anim # call the animation function\r\n# plt.show()\r\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.linspace", "numpy.squeeze", "matplotlib.pyplot.plot", "numpy.max", "numpy.zeros_like", "matplotlib.pyplot.subplot2grid", "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.animation.FuncAnimation", "numpy.sum", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "matplotlib.pyplot.xlim", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.tick_params" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jeffzi/optuna
[ "133e9d678723ad9e5183789f9271b7f96db32322", "133e9d678723ad9e5183789f9271b7f96db32322" ]
[ "examples/tensorflow_eager_simple.py", "tests/samplers_tests/tpe_tests/test_parzen_estimator.py" ]
[ "\"\"\"\nOptuna example that optimizes multi-layer perceptrons using Tensorflow (Eager Execution).\n\nIn this example, we optimize the validation accuracy of hand-written digit recognition using\nTensorflow and MNIST. We optimize the neural network architecture as well as the optimizer\nconfiguration.\n\n\"\"\"\n\nfrom packaging import version\nimport tensorflow as tf\nfrom tensorflow.keras.datasets import mnist\n\nimport optuna\n\n\nif version.parse(tf.__version__) < version.parse(\"2.0.0\"):\n raise RuntimeError(\"tensorflow>=2.0.0 is required for this example.\")\n\nN_TRAIN_EXAMPLES = 3000\nN_VALID_EXAMPLES = 1000\nBATCHSIZE = 128\nCLASSES = 10\nEPOCHS = 1\n\n\ndef create_model(trial):\n # We optimize the numbers of layers, their units and weight decay parameter.\n n_layers = trial.suggest_int(\"n_layers\", 1, 3)\n weight_decay = trial.suggest_float(\"weight_decay\", 1e-10, 1e-3, log=True)\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Flatten())\n for i in range(n_layers):\n num_hidden = trial.suggest_int(\"n_units_l{}\".format(i), 4, 128, log=True)\n model.add(\n tf.keras.layers.Dense(\n num_hidden,\n activation=\"relu\",\n kernel_regularizer=tf.keras.regularizers.l2(weight_decay),\n )\n )\n model.add(\n tf.keras.layers.Dense(CLASSES, kernel_regularizer=tf.keras.regularizers.l2(weight_decay))\n )\n return model\n\n\ndef create_optimizer(trial):\n # We optimize the choice of optimizers as well as their parameters.\n kwargs = {}\n optimizer_options = [\"RMSprop\", \"Adam\", \"SGD\"]\n optimizer_selected = trial.suggest_categorical(\"optimizer\", optimizer_options)\n if optimizer_selected == \"RMSprop\":\n kwargs[\"learning_rate\"] = trial.suggest_float(\n \"rmsprop_learning_rate\", 1e-5, 1e-1, log=True\n )\n kwargs[\"decay\"] = trial.suggest_float(\"rmsprop_decay\", 0.85, 0.99)\n kwargs[\"momentum\"] = trial.suggest_float(\"rmsprop_momentum\", 1e-5, 1e-1, log=True)\n elif optimizer_selected == \"Adam\":\n kwargs[\"learning_rate\"] = trial.suggest_float(\"adam_learning_rate\", 1e-5, 1e-1, log=True)\n elif optimizer_selected == \"SGD\":\n kwargs[\"learning_rate\"] = trial.suggest_float(\n \"sgd_opt_learning_rate\", 1e-5, 1e-1, log=True\n )\n kwargs[\"momentum\"] = trial.suggest_float(\"sgd_opt_momentum\", 1e-5, 1e-1, log=True)\n\n optimizer = getattr(tf.optimizers, optimizer_selected)(**kwargs)\n return optimizer\n\n\ndef learn(model, optimizer, dataset, mode=\"eval\"):\n accuracy = tf.metrics.Accuracy(\"accuracy\", dtype=tf.float32)\n\n for batch, (images, labels) in enumerate(dataset):\n with tf.GradientTape() as tape:\n logits = model(images, training=(mode == \"train\"))\n loss_value = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)\n )\n if mode == \"eval\":\n accuracy(\n tf.argmax(logits, axis=1, output_type=tf.int64), tf.cast(labels, tf.int64)\n )\n else:\n grads = tape.gradient(loss_value, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n\n if mode == \"eval\":\n return accuracy\n\n\ndef get_mnist():\n (x_train, y_train), (x_valid, y_valid) = mnist.load_data()\n x_train = x_train.astype(\"float32\") / 255\n x_valid = x_valid.astype(\"float32\") / 255\n\n y_train = y_train.astype(\"int32\")\n y_valid = y_valid.astype(\"int32\")\n\n train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n train_ds = train_ds.shuffle(60000).batch(BATCHSIZE).take(N_TRAIN_EXAMPLES)\n\n valid_ds = tf.data.Dataset.from_tensor_slices((x_valid, y_valid))\n valid_ds = valid_ds.shuffle(10000).batch(BATCHSIZE).take(N_VALID_EXAMPLES)\n return train_ds, valid_ds\n\n\n# FYI: Objective functions can take additional arguments\n# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).\ndef objective(trial):\n # Get MNIST data.\n train_ds, valid_ds = get_mnist()\n\n # Build model and optimizer.\n model = create_model(trial)\n optimizer = create_optimizer(trial)\n\n # Training and validating cycle.\n with tf.device(\"/cpu:0\"):\n for _ in range(EPOCHS):\n learn(model, optimizer, train_ds, \"train\")\n\n accuracy = learn(model, optimizer, valid_ds, \"eval\")\n\n # Return last validation accuracy.\n return accuracy.result()\n\n\nif __name__ == \"__main__\":\n study = optuna.create_study(direction=\"maximize\")\n study.optimize(objective, n_trials=100)\n\n print(\"Number of finished trials: \", len(study.trials))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\" Value: \", trial.value)\n\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))\n", "import itertools\nfrom typing import Dict\nfrom typing import List\n\nimport numpy as np\nimport pytest\n\nfrom optuna.samplers._tpe.parzen_estimator import _ParzenEstimator\nfrom optuna.samplers._tpe.sampler import default_weights\n\n\nclass TestParzenEstimator(object):\n @staticmethod\n @pytest.mark.parametrize(\n \"mus, prior, magic_clip, endpoints\",\n itertools.product(\n ([], [0.4], [-0.4, 0.4]), # mus\n (True, False), # prior\n (True, False), # magic_clip\n (True, False), # endpoints\n ),\n )\n def test_calculate_shape_check(\n mus: List[float], prior: bool, magic_clip: bool, endpoints: bool\n ) -> None:\n\n s_weights, s_mus, s_sigmas = _ParzenEstimator._calculate(\n mus,\n -1.0,\n 1.0,\n prior_weight=1.0,\n consider_prior=prior,\n consider_magic_clip=magic_clip,\n consider_endpoints=endpoints,\n weights_func=default_weights,\n )\n\n # Result contains an additional value for a prior distribution if prior is True or\n # len(mus) == 0 (in this case, prior is always used).\n assert len(s_weights) == len(mus) + int(prior) if len(mus) > 0 else len(mus) + 1\n assert len(s_mus) == len(mus) + int(prior) if len(mus) > 0 else len(mus) + 1\n assert len(s_sigmas) == len(mus) + int(prior) if len(mus) > 0 else len(mus) + 1\n\n # TODO(ytsmiling): Improve test coverage for weights_func.\n @staticmethod\n @pytest.mark.parametrize(\n \"mus, flags, expected\",\n [\n [\n [],\n {\"prior\": False, \"magic_clip\": False, \"endpoints\": True},\n {\"weights\": [1.0], \"mus\": [0.0], \"sigmas\": [2.0]},\n ],\n [\n [],\n {\"prior\": True, \"magic_clip\": False, \"endpoints\": True},\n {\"weights\": [1.0], \"mus\": [0.0], \"sigmas\": [2.0]},\n ],\n [\n [0.4],\n {\"prior\": True, \"magic_clip\": False, \"endpoints\": True},\n {\"weights\": [0.5, 0.5], \"mus\": [0.0, 0.4], \"sigmas\": [2.0, 0.6]},\n ],\n [\n [-0.4],\n {\"prior\": True, \"magic_clip\": False, \"endpoints\": True},\n {\"weights\": [0.5, 0.5], \"mus\": [-0.4, 0.0], \"sigmas\": [0.6, 2.0]},\n ],\n [\n [-0.4, 0.4],\n {\"prior\": True, \"magic_clip\": False, \"endpoints\": True},\n {\"weights\": [1.0 / 3] * 3, \"mus\": [-0.4, 0.0, 0.4], \"sigmas\": [0.6, 2.0, 0.6]},\n ],\n [\n [-0.4, 0.4],\n {\"prior\": True, \"magic_clip\": False, \"endpoints\": False},\n {\"weights\": [1.0 / 3] * 3, \"mus\": [-0.4, 0.0, 0.4], \"sigmas\": [0.4, 2.0, 0.4]},\n ],\n [\n [-0.4, 0.4],\n {\"prior\": False, \"magic_clip\": False, \"endpoints\": True},\n {\"weights\": [0.5, 0.5], \"mus\": [-0.4, 0.4], \"sigmas\": [0.8, 0.8]},\n ],\n [\n [-0.4, 0.4, 0.41, 0.42],\n {\"prior\": False, \"magic_clip\": False, \"endpoints\": True},\n {\n \"weights\": [0.25, 0.25, 0.25, 0.25],\n \"mus\": [-0.4, 0.4, 0.41, 0.42],\n \"sigmas\": [0.8, 0.8, 0.01, 0.58],\n },\n ],\n [\n [-0.4, 0.4, 0.41, 0.42],\n {\"prior\": False, \"magic_clip\": True, \"endpoints\": True},\n {\n \"weights\": [0.25, 0.25, 0.25, 0.25],\n \"mus\": [-0.4, 0.4, 0.41, 0.42],\n \"sigmas\": [0.8, 0.8, 0.4, 0.58],\n },\n ],\n ],\n )\n def test_calculate(\n mus: List[float], flags: Dict[str, bool], expected: Dict[str, List[float]]\n ) -> None:\n\n s_weights, s_mus, s_sigmas = _ParzenEstimator._calculate(\n mus,\n -1.0,\n 1.0,\n prior_weight=1.0,\n consider_prior=flags[\"prior\"],\n consider_magic_clip=flags[\"magic_clip\"],\n consider_endpoints=flags[\"endpoints\"],\n weights_func=default_weights,\n )\n\n # Result contains an additional value for a prior distribution if consider_prior is True.\n np.testing.assert_almost_equal(s_weights, expected[\"weights\"])\n np.testing.assert_almost_equal(s_mus, expected[\"mus\"])\n np.testing.assert_almost_equal(s_sigmas, expected[\"sigmas\"])\n" ]
[ [ "tensorflow.metrics.Accuracy", "tensorflow.device", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.keras.regularizers.l2", "tensorflow.keras.Sequential", "tensorflow.cast", "tensorflow.keras.datasets.mnist.load_data", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.argmax", "tensorflow.keras.layers.Flatten", "tensorflow.GradientTape" ], [ "numpy.testing.assert_almost_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MuharremOkutan/Screeni-py
[ "77a6626ce470a1812693a0350dcc8c2e2eb37dc5" ]
[ "src/classes/ParallelProcessing.py" ]
[ "\n'''\n * Project : Screenipy\n * Author : Pranjal Joshi, Swar Patel\n * Created : 18/05/2021\n * Description : Class for managing multiprocessing\n'''\n\nimport multiprocessing\nimport pandas as pd\nimport numpy as np\nimport sys\nimport os\nimport pytz\nfrom queue import Empty\nfrom datetime import datetime\nimport classes.Fetcher as Fetcher\nimport classes.Utility as Utility\nfrom classes.CandlePatterns import CandlePatterns\nfrom classes.ColorText import colorText\nfrom classes.SuppressOutput import SuppressOutput\n\nif sys.platform.startswith('win'):\n import multiprocessing.popen_spawn_win32 as forking\nelse:\n import multiprocessing.popen_fork as forking\n\n\nclass StockConsumer(multiprocessing.Process):\n\n def __init__(self, task_queue, result_queue, screenCounter, screenResultsCounter, stockDict, proxyServer, keyboardInterruptEvent):\n multiprocessing.Process.__init__(self)\n self.multiprocessingForWindows()\n self.task_queue = task_queue\n self.result_queue = result_queue\n self.screenCounter = screenCounter\n self.screenResultsCounter = screenResultsCounter\n self.stockDict = stockDict\n self.proxyServer = proxyServer\n self.keyboardInterruptEvent = keyboardInterruptEvent\n self.isTradingTime = Utility.tools.isTradingTime()\n\n def run(self):\n # while True:\n try:\n while not self.keyboardInterruptEvent.is_set():\n try:\n next_task = self.task_queue.get()\n except Empty:\n continue\n if next_task is None:\n self.task_queue.task_done()\n break\n answer = self.screenStocks(*(next_task))\n self.task_queue.task_done()\n self.result_queue.put(answer)\n except Exception as e:\n sys.exit(0)\n\n def screenStocks(self, executeOption, reversalOption, maLength, daysForLowestVolume, minRSI, maxRSI, respChartPattern, insideBarToLookback, totalSymbols,\n configManager, fetcher, screener, candlePatterns, stock, printCounter=False):\n screenResults = pd.DataFrame(columns=[\n 'Stock', 'Consolidating', 'Breaking-Out', 'MA-Signal', 'Volume', 'LTP', 'RSI', 'Trend', 'Pattern'])\n screeningDictionary = {'Stock': \"\", 'Consolidating': \"\", 'Breaking-Out': \"\",\n 'MA-Signal': \"\", 'Volume': \"\", 'LTP': 0, 'RSI': 0, 'Trend': \"\", 'Pattern': \"\"}\n saveDictionary = {'Stock': \"\", 'Consolidating': \"\", 'Breaking-Out': \"\",\n 'MA-Signal': \"\", 'Volume': \"\", 'LTP': 0, 'RSI': 0, 'Trend': \"\", 'Pattern': \"\"}\n\n try:\n period = configManager.period\n # Data download adjustment for IPO Base feature\n if executeOption == 7 and respChartPattern == 3:\n period = 'max'\n\n if (self.stockDict.get(stock) is None) or (respChartPattern == 3) or (configManager.cacheEnabled is False) or self.isTradingTime:\n data = fetcher.fetchStockData(stock,\n period,\n configManager.duration,\n self.proxyServer,\n self.screenResultsCounter,\n self.screenCounter,\n totalSymbols)\n if configManager.cacheEnabled is True and not self.isTradingTime and (self.stockDict.get(stock) is None):\n self.stockDict[stock] = data.to_dict('split')\n else:\n if printCounter:\n try:\n print(colorText.BOLD + colorText.GREEN + (\"[%d%%] Screened %d, Found %d. Fetching data & Analyzing %s...\" % (\n int((self.screenCounter.value / totalSymbols) * 100), self.screenCounter.value, self.screenResultsCounter.value, stock)) + colorText.END, end='')\n print(colorText.BOLD + colorText.GREEN + \"=> Done!\" +\n colorText.END, end='\\r', flush=True)\n except ZeroDivisionError:\n pass\n sys.stdout.write(\"\\r\\033[K\")\n data = self.stockDict.get(stock)\n data = pd.DataFrame(\n data['data'], columns=data['columns'], index=data['index'])\n\n fullData, processedData = screener.preprocessData(\n data, daysToLookback=configManager.daysToLookback)\n\n with self.screenCounter.get_lock():\n self.screenCounter.value += 1\n if not processedData.empty:\n # screeningDictionary['Stock'] = colorText.BOLD + \\\n # colorText.BLUE + stock + colorText.END\n screeningDictionary['Stock'] = colorText.BOLD + \\\n colorText.BLUE + f'\\x1B]8;;https://in.tradingview.com/chart?symbol=NSE%3A{stock}\\x1B\\\\{stock}\\x1B]8;;\\x1B\\\\' + colorText.END\n saveDictionary['Stock'] = stock\n consolidationValue = screener.validateConsolidation(\n processedData, screeningDictionary, saveDictionary, percentage=configManager.consolidationPercentage)\n isMaReversal = screener.validateMovingAverages(\n processedData, screeningDictionary, saveDictionary, maRange=1.25)\n isVolumeHigh = screener.validateVolume(\n processedData, screeningDictionary, saveDictionary, volumeRatio=configManager.volumeRatio)\n isBreaking = screener.findBreakout(\n processedData, screeningDictionary, saveDictionary, daysToLookback=configManager.daysToLookback)\n isLtpValid = screener.validateLTP(\n fullData, screeningDictionary, saveDictionary, minLTP=configManager.minLTP, maxLTP=configManager.maxLTP)\n isLowestVolume = screener.validateLowestVolume(\n processedData, daysForLowestVolume)\n isValidRsi = screener.validateRSI(\n processedData, screeningDictionary, saveDictionary, minRSI, maxRSI)\n try:\n with SuppressOutput(suppress_stderr=True, suppress_stdout=True):\n currentTrend = screener.findTrend(\n processedData,\n screeningDictionary,\n saveDictionary,\n daysToLookback=configManager.daysToLookback,\n stockName=stock)\n except np.RankWarning:\n screeningDictionary['Trend'] = 'Unknown'\n saveDictionary['Trend'] = 'Unknown'\n isCandlePattern = candlePatterns.findPattern(\n processedData, screeningDictionary, saveDictionary)\n isInsideBar = screener.validateInsideBar(\n processedData, screeningDictionary, saveDictionary, chartPattern=respChartPattern, daysToLookback=insideBarToLookback)\n isMomentum = screener.validateMomentum(processedData, screeningDictionary, saveDictionary)\n \n if respChartPattern == 3 and executeOption == 7:\n isIpoBase = screener.validateIpoBase(stock, fullData, screeningDictionary, saveDictionary)\n\n if maLength is not None and executeOption == 6:\n isMaSupport = screener.findReversalMA(fullData, screeningDictionary, saveDictionary, maLength)\n\n with self.screenResultsCounter.get_lock():\n if executeOption == 0:\n self.screenResultsCounter.value += 1\n return screeningDictionary, saveDictionary\n if (executeOption == 1 or executeOption == 2) and isBreaking and isVolumeHigh and isLtpValid:\n self.screenResultsCounter.value += 1\n return screeningDictionary, saveDictionary\n if (executeOption == 1 or executeOption == 3) and (consolidationValue <= configManager.consolidationPercentage and consolidationValue != 0) and isLtpValid:\n self.screenResultsCounter.value += 1\n return screeningDictionary, saveDictionary\n if executeOption == 4 and isLtpValid and isLowestVolume:\n self.screenResultsCounter.value += 1\n return screeningDictionary, saveDictionary\n if executeOption == 5 and isLtpValid and isValidRsi:\n self.screenResultsCounter.value += 1\n return screeningDictionary, saveDictionary\n if executeOption == 6 and isLtpValid:\n if reversalOption == 1:\n if saveDictionary['Pattern'] in CandlePatterns.reversalPatternsBullish or isMaReversal > 0:\n self.screenResultsCounter.value += 1\n return screeningDictionary, saveDictionary\n elif reversalOption == 2:\n if saveDictionary['Pattern'] in CandlePatterns.reversalPatternsBearish or isMaReversal < 0:\n self.screenResultsCounter.value += 1\n return screeningDictionary, saveDictionary\n elif reversalOption == 3 and isMomentum:\n self.screenResultsCounter.value += 1\n return screeningDictionary, saveDictionary\n elif reversalOption == 4 and isMaSupport:\n self.screenResultsCounter.value += 1\n return screeningDictionary, saveDictionary\n if executeOption == 7 and isLtpValid:\n if respChartPattern != 3 and isInsideBar:\n self.screenResultsCounter.value += 1\n return screeningDictionary, saveDictionary\n elif isIpoBase:\n self.screenResultsCounter.value += 1\n return screeningDictionary, saveDictionary\n except KeyboardInterrupt:\n # Capturing Ctr+C Here isn't a great idea\n pass\n except Fetcher.StockDataEmptyException:\n pass\n except KeyError:\n pass\n except Exception as e:\n if printCounter:\n print(colorText.FAIL +\n (\"\\n[+] Exception Occured while Screening %s! Skipping this stock..\" % stock) + colorText.END)\n return\n\n def multiprocessingForWindows(self):\n if sys.platform.startswith('win'):\n\n class _Popen(forking.Popen):\n def __init__(self, *args, **kw):\n if hasattr(sys, 'frozen'):\n os.putenv('_MEIPASS2', sys._MEIPASS)\n try:\n super(_Popen, self).__init__(*args, **kw)\n finally:\n if hasattr(sys, 'frozen'):\n if hasattr(os, 'unsetenv'):\n os.unsetenv('_MEIPASS2')\n else:\n os.putenv('_MEIPASS2', '')\n\n forking.Popen = _Popen\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
DIRECTLab/EVPRE
[ "320e59a6b027a19b725a2a5e9cb2c366ba84a8da" ]
[ "fastsim-2021a/fastsim/tests/accel_test.py" ]
[ "# Demonstrate the use of acceleration test\n\nimport sys\nimport os\nimport numpy as np\n\nfrom fastsim import simdrive, vehicle, cycle\n\ndef create_accel_cyc(length_in_seconds=300, spd_mph=89.48, grade=0.0, hz=10):\n \"\"\"\n Create a synthetic Drive Cycle for acceleration targeting.\n Defaults to a 15 second acceleration cycle. Should be adjusted based on target acceleration time\n and initial vehicle acceleration time, so that time isn't wasted on cycles that are needlessly long.\n\n spd_mph @ 89.48 FASTSim XL version mph default speed for acceleration cycles\n grade @ 0 and hz @ 10 also matches XL version settings\n \"\"\"\n mphPerMps = 2.23694\n cycMps = [(1/mphPerMps)*float(spd_mph)]*(length_in_seconds*hz)\n cycMps[0] = 0.\n cycMps = np.asarray(cycMps)\n cycSecs = np.arange(0, length_in_seconds, 1./hz)\n cycGrade = np.asarray([float(grade)]*(length_in_seconds*hz))\n cycRoadType = np.zeros(length_in_seconds*hz)\n cyc = {'cycMps': cycMps, 'cycSecs': cycSecs, 'cycGrade': cycGrade, 'cycRoadType':cycRoadType}\n return cyc\n\ndef main():\n # just use first vehicle in default database\n for i in range(1,27):\n veh = vehicle.Vehicle(i)\n accel_cyc = cycle.Cycle(std_cyc_name=None,\n cyc_dict=create_accel_cyc())\n accel_out = simdrive.SimAccelTest(cyc=accel_cyc, veh=veh)\n accel_out.sim_drive()\n acvhd_0_to_acc_speed_secs = simdrive.SimDrivePost(accel_out).get_output()['ZeroToSixtyTime_secs']\n print('vehicle {}: acceleration [s] {:.3f}'.format(i, acvhd_0_to_acc_speed_secs))\n\nif __name__=='__main__':\n main()" ]
[ [ "numpy.asarray", "numpy.arange", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kzkymn/defragTrees
[ "6fbf111c9fbc7f0468c7d18b63660e3f63e628c3" ]
[ "example/example_xgb.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n@author: Satoshi Hara\n\"\"\"\n\nimport sys\nsys.path.append('../')\n\nimport numpy as np\nimport xgboost as xgb\nfrom defragTrees import DefragModel\n\n# load data\nZtr = np.loadtxt('./train.csv', delimiter=',')\nZte = np.loadtxt('./test.csv', delimiter=',')\nXtr = Ztr[:, :-1]\nytr = Ztr[:, -1]\nXte = Zte[:, :-1]\nyte = Zte[:, -1]\n\n# train xgboost\nnum_round = 50\ndtrain = xgb.DMatrix(Xtr, label=ytr)\nparam = {'max_depth':4, 'eta':0.3, 'silent':1, 'objective':'binary:logistic'}\nbst = xgb.train(param, dtrain, num_round)\n\n# output xgb model as text\nbst.dump_model('xgbmodel.txt')\n\n# fit simplified model\nKmax = 10\nsplitter = DefragModel.parseXGBtrees('./xgbmodel.txt') # parse XGB model into the array of (feature index, threshold)\nmdl = DefragModel(modeltype='classification', maxitr=100, qitr=0, tol=1e-6, restart=20, verbose=0)\nmdl.fit(Xtr, ytr, splitter, Kmax, fittype='FAB')\n\n# results\nscore, cover, coll = mdl.evaluate(Xte, yte)\nprint()\nprint('<< defragTrees >>')\nprint('----- Evaluated Results -----')\nprint('Test Error = %f' % (score,))\nprint('Test Coverage = %f' % (cover,))\nprint('Overlap = %f' % (coll,))\nprint()\nprint('----- Found Rules -----')\nprint(mdl)\n" ]
[ [ "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
apetkau/thesis-index
[ "6c96e9ed75d8e661437effe62a939727a0b473fc" ]
[ "genomics_data_index/test/integration/api/query/features/test_MutationFeaturesComparator.py" ]
[ "import pandas as pd\n\nfrom genomics_data_index.api.query.GenomicsDataIndex import GenomicsDataIndex\nfrom genomics_data_index.api.query.features.MutationFeaturesComparator import MutationFeaturesComparator\nfrom genomics_data_index.storage.SampleSet import SampleSet\nfrom genomics_data_index.storage.model.db import Sample\nfrom genomics_data_index.test.integration import snippy_all_dataframes\n\n\ndef test_summary_all(loaded_database_genomic_data_store: GenomicsDataIndex):\n db = loaded_database_genomic_data_store.connection.database\n all_sample_ids = {s.id for s in db.get_session().query(Sample).all()}\n\n dfA = pd.read_csv(snippy_all_dataframes['SampleA'], sep='\\t')\n dfB = pd.read_csv(snippy_all_dataframes['SampleB'], sep='\\t')\n dfC = pd.read_csv(snippy_all_dataframes['SampleC'], sep='\\t')\n expected_df = pd.concat([dfA, dfB, dfC])\n expected_df = expected_df.groupby('Mutation').agg({\n 'Sequence': 'first',\n 'Position': 'first',\n 'Deletion': 'first',\n 'Insertion': 'first',\n 'Mutation': 'count',\n }).rename(columns={'Mutation': 'Count'}).sort_index()\n expected_df['Total'] = 9\n expected_df['Percent'] = 100 * (expected_df['Count'] / expected_df['Total'])\n\n present_set = SampleSet(all_sample_ids)\n mutations_summarizer = MutationFeaturesComparator(connection=loaded_database_genomic_data_store.connection,\n ignore_annotations=True)\n\n mutations_df = mutations_summarizer.summary(present_set)\n mutations_df['Percent'] = mutations_df['Percent'].astype(int) # Convert to int for easier comparison\n mutations_df = mutations_df.sort_index()\n\n assert len(expected_df) == len(mutations_df)\n assert list(expected_df.columns) == list(mutations_df.columns)\n assert list(expected_df.index) == list(mutations_df.index)\n assert list(expected_df['Count']) == list(mutations_df['Count'])\n assert list(expected_df['Total']) == list(mutations_df['Total'])\n assert 22 == mutations_df.loc['reference:619:G:C', 'Percent']\n\n\ndef test_summary_unique(loaded_database_genomic_data_store: GenomicsDataIndex):\n db = loaded_database_genomic_data_store.connection.database\n sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()\n sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()\n sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()\n all_sample_ids = {s.id for s in db.get_session().query(Sample).all()}\n\n mutations_summarizer = MutationFeaturesComparator(connection=loaded_database_genomic_data_store.connection,\n ignore_annotations=True)\n\n dfA = pd.read_csv(snippy_all_dataframes['SampleA'], sep='\\t')\n dfB = pd.read_csv(snippy_all_dataframes['SampleB'], sep='\\t')\n dfC = pd.read_csv(snippy_all_dataframes['SampleC'], sep='\\t')\n\n # Unique to A\n present_set = SampleSet({sampleA.id})\n other_set = SampleSet(all_sample_ids - {sampleA.id})\n mutations_df = mutations_summarizer.unique_summary(present_set, other_set=other_set).sort_index()\n\n expected_df = dfA\n expected_df = expected_df.groupby('Mutation').agg({\n 'Sequence': 'first',\n 'Position': 'first',\n 'Deletion': 'first',\n 'Insertion': 'first',\n 'Mutation': 'count',\n }).rename(columns={'Mutation': 'Count'}).sort_index()\n expected_df['Total'] = 1\n expected_df['Percent'] = 100 * (expected_df['Count'] / expected_df['Total'])\n\n mutations_df['Percent'] = mutations_df['Percent'].astype(int) # Convert to int for easier comparison\n\n assert len(expected_df) == len(mutations_df)\n assert 46 == len(mutations_df) # Check length against independently generated length\n assert list(expected_df.index) == list(mutations_df.index)\n assert list(expected_df['Count']) == list(mutations_df['Count'])\n assert list(expected_df['Total']) == list(mutations_df['Total'])\n assert 100 == mutations_df.loc['reference:3656:CATT:C', 'Percent']\n\n # Unique to B\n present_set = SampleSet({sampleB.id})\n other_set = SampleSet(all_sample_ids - {sampleB.id})\n mutations_df = mutations_summarizer.unique_summary(present_set, other_set=other_set).sort_index()\n\n dfAC = pd.concat([dfA, dfC])\n expected_df = dfB[~dfB['Mutation'].isin(list(dfAC['Mutation']))]\n expected_df = expected_df.groupby('Mutation').agg({\n 'Sequence': 'first',\n 'Position': 'first',\n 'Deletion': 'first',\n 'Insertion': 'first',\n 'Mutation': 'count',\n }).rename(columns={'Mutation': 'Count'}).sort_index()\n expected_df['Total'] = 1\n expected_df['Percent'] = 100 * (expected_df['Count'] / expected_df['Total'])\n\n mutations_df['Percent'] = mutations_df['Percent'].astype(int) # Convert to int for easier comparison\n\n assert len(expected_df) == len(mutations_df)\n assert list(expected_df.index) == list(mutations_df.index)\n assert list(expected_df['Count']) == list(mutations_df['Count'])\n assert list(expected_df['Total']) == list(mutations_df['Total'])\n assert 100 == mutations_df.loc['reference:349:AAGT:A', 'Percent']\n\n # Unique to BC\n present_set = SampleSet({sampleB.id, sampleC.id})\n other_set = SampleSet(all_sample_ids - {sampleB.id, sampleC.id})\n mutations_df = mutations_summarizer.unique_summary(present_set, other_set=other_set).sort_index()\n\n dfBC = pd.concat([dfB, dfC])\n expected_df = dfBC[~dfBC['Mutation'].isin(list(dfA['Mutation']))]\n expected_df = expected_df.groupby('Mutation').agg({\n 'Sequence': 'first',\n 'Position': 'first',\n 'Deletion': 'first',\n 'Insertion': 'first',\n 'Mutation': 'count',\n }).rename(columns={'Mutation': 'Count'}).sort_index()\n expected_df['Total'] = 2\n expected_df['Percent'] = 100 * (expected_df['Count'] / expected_df['Total'])\n\n mutations_df['Percent'] = mutations_df['Percent'].astype(int) # Convert to int for easier comparison\n\n assert len(expected_df) == len(mutations_df)\n assert 66 == len(mutations_df) # Check length against independently generated length\n assert list(expected_df.index) == list(mutations_df.index)\n assert list(expected_df['Count']) == list(mutations_df['Count'])\n assert list(expected_df['Total']) == list(mutations_df['Total'])\n assert 100 == mutations_df.loc['reference:619:G:C', 'Percent']\n assert 50 == mutations_df.loc['reference:866:GCCAGATCC:G', 'Percent']\n assert 50 == mutations_df.loc['reference:349:AAGT:A', 'Percent']\n\n\ndef test_summary_annotations(loaded_database_genomic_data_store_annotations: GenomicsDataIndex):\n db = loaded_database_genomic_data_store_annotations.connection.database\n\n mutations_summarizer = MutationFeaturesComparator(\n connection=loaded_database_genomic_data_store_annotations.connection,\n ignore_annotations=False)\n\n sample_sh14_001 = db.get_session().query(Sample).filter(Sample.name == 'SH14-001').one()\n sample_sh14_014 = db.get_session().query(Sample).filter(Sample.name == 'SH14-014').one()\n sample_sh10_014 = db.get_session().query(Sample).filter(Sample.name == 'SH10-014').one()\n three_samples = {sample_sh14_001.id, sample_sh14_014.id, sample_sh10_014.id}\n\n present_set = SampleSet(three_samples)\n mutations_df = mutations_summarizer.summary(present_set)\n\n assert ['Sequence', 'Position', 'Deletion', 'Insertion',\n 'Count', 'Total', 'Percent', 'Annotation', 'Annotation_Impact',\n 'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',\n 'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c', 'ID_HGVS_GN.p'] == list(mutations_df.columns)\n assert 177 == len(mutations_df)\n mutations_df['Percent'] = mutations_df['Percent'].astype(int) # easier to compare percents in assert\n\n # missense variant (3/3)\n assert ['NC_011083', 140658, 'C', 'A', 3, 3, 100,\n 'missense_variant', 'MODERATE', 'murF', 'SEHA_RS01180', 'transcript', 'protein_coding',\n 'c.497C>A', 'p.Ala166Glu',\n 'hgvs:NC_011083:SEHA_RS01180:c.497C>A', 'hgvs:NC_011083:SEHA_RS01180:p.Ala166Glu',\n 'hgvs_gn:NC_011083:murF:c.497C>A', 'hgvs_gn:NC_011083:murF:p.Ala166Glu'] == list(\n mutations_df.loc['NC_011083:140658:C:A'])\n\n # Intergenic variant (1/3)\n assert ['NC_011083', 4555461, 'T', 'TC', 1, 3, 33,\n 'intergenic_region', 'MODIFIER', 'SEHA_RS22510-SEHA_RS26685', 'SEHA_RS22510-SEHA_RS26685',\n 'intergenic_region', 'NA',\n 'n.4555461_4555462insC', 'NA',\n 'hgvs:NC_011083:n.4555461_4555462insC', 'NA',\n 'hgvs_gn:NC_011083:n.4555461_4555462insC', 'NA'] == list(\n mutations_df.loc['NC_011083:4555461:T:TC'].fillna('NA'))\n" ]
[ [ "pandas.concat", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
DLunin/pygraphmodels
[ "4ea8ebed74f3a7d5d56af4d5f189a514aab420f9" ]
[ "graphmodels/information/information.py" ]
[ "from ..factor import TableFactor\nimport numpy as np\nimport pandas as pd\nfrom itertools import combinations\n\n\ndef discrete_entropy(x):\n def make_factor(data, arguments, leak=1e-9):\n factor = TableFactor(arguments, list(data.columns))\n factor.fit(data)\n factor.table += leak\n factor.normalize(*factor.scope, copy=False)\n return factor\n arguments = list(x.columns)\n factor_x = make_factor(x, arguments).normalize(*arguments)\n prob = factor_x.table.flatten()\n return -np.sum(prob * np.log(prob))\n\n\ndef discrete_mutual_information(x, y):\n if x.shape[1] == 0 or y.shape[1] == 0:\n return 0\n\n def make_factor(data, arguments, leak=1e-9):\n factor = TableFactor(arguments, list(data.columns))\n factor.fit(data)\n factor.table += leak\n factor.normalize(*factor.scope, copy=False)\n return factor\n\n xy = pd.concat([x, y], axis=1)\n arguments = list(xy.columns)\n\n factor_x = make_factor(x, arguments)\n factor_y = make_factor(y, arguments)\n factor_xy = make_factor(xy, arguments).normalize(*arguments, copy=False)\n\n part1 = factor_xy.table.flatten()\n part2 = (factor_xy / (factor_x * factor_y).normalize(*arguments, copy=False)).table.flatten()\n\n result = np.sum(part1 * np.log(part2))\n if np.isnan(result):\n return +np.inf\n return result\n\n\ndef information_matrix(data, mi_estimator=discrete_mutual_information):\n m = len(data.columns)\n values = data.values\n information_matrix = np.zeros((m, m))\n for (i, fst), (j, snd) in combinations(enumerate(data.columns), 2):\n information_matrix[i, j] = information_matrix[j, i] = mi_estimator(data[[fst]], data[[snd]])\n return pd.DataFrame(data=dict(zip(data.columns, information_matrix)), index=data.columns)\n" ]
[ [ "numpy.isnan", "pandas.concat", "numpy.zeros", "numpy.log" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Marcnuth/KDD2017
[ "34332b76ab1910dbd85488123e5dbb24f7663cac" ]
[ "src/tasks/reg/preprocessor.py" ]
[ "import luigi\nimport pandas as pd\nimport numpy as mp\nfrom sklearn import preprocessing\nfrom tasks.reg.combiner import CombineFeatures\nfrom pathlib import Path\nfrom common import utils\nfrom sklearn import decomposition\nfrom sklearn import pipeline\nfrom sklearn import feature_selection\n\n\nclass Preprocess(luigi.Task):\n\n uuid = luigi.Parameter()\n task = luigi.Parameter()\n pipe = luigi.Parameter(default='poly,std,dec')\n\n def requires(self):\n return CombineFeatures(self.uuid, self.task)\n\n def output(self):\n self.dir = Path(self.input().path).absolute().parent\n fname = 'preprocess_' + Path(self.input().path).name\n outfile = self.dir / fname\n return luigi.LocalTarget(outfile.absolute().as_posix())\n\n def _process(self, data):\n #features = preprocessing.PolynomialFeatures().fit_transform(features)\n features = preprocessing.RobustScaler().fit_transform(data)\n #features = decomposition.TruncatedSVD().fit_transform(features)\n \n #cols = list(['f_' + i for i in range(features.shape[1])])\n return pd.DataFrame(features, columns=data.columns)\n\n def run(self):\n self._source()\n\n df = utils.load_data(self.input().path)\n\n final = pd.DataFrame([])\n for g in df.groupby(self.key_cols):\n\n (keys, data) = g\n\n meta_cols = data[self.meta_cols]\n processed_df = self._process(data.drop(self.meta_cols, axis=1))\n\n # reset the index, otherwise the concat will not work\n meta_cols.reset_index(drop=True, inplace=True)\n processed_df.reset_index(drop=True, inplace=True)\n g_final = pd.concat([meta_cols, processed_df], axis=1)\n\n final = pd.concat([final, g_final])\n\n #features = feature_selection.VarianceThreshold().fit_transform(features)\n\n #print(ployed_df.head())\n #print(meta_df.head())\n #print(final.head())\n\n final.to_csv(self.output().path, index=False)\n\n def _source(self):\n time_cols = ['time_window_start', 'time_window_end']\n if self.task == 'volume':\n keycols = ['tollgate_id', 'direction']\n vcol = 'volume'\n else:\n keycols = ['intersection_id', 'tollgate_id']\n vcol = 'avg_travel_time'\n\n self.key_cols = keycols\n self.meta_cols = [*time_cols, *keycols, vcol]\n self.vcol = vcol\n" ]
[ [ "sklearn.preprocessing.RobustScaler", "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
myyim/placecellperceptron
[ "8e03b880f47a1f0b7934afd91afb167f669ceeab" ]
[ "revision/fig8_gaussian.py" ]
[ "# This code was developed by Man Yi Yim ([email protected]) under Python 2.7.13.\n# Note that the figure order, label and the content may have changed during revision.\n\nimport numpy as np\nimport pylab\nimport matplotlib as mpl\nimport pickle\nimport itertools\nimport os\nfigpath = './figure/'\nif not os.path.exists(figpath):\n os.makedirs(figpath)\ndatapath = './data/'\nif not os.path.exists(datapath):\n os.makedirs(datapath)\n#exec(open('gridplacefunc.py').read())\nexec(open('mlfunc.py').read())\nexec(open('mathfunc.py').read())\n\nfont_size = 14\nmpl.rcParams['axes.titlesize'] = font_size\nmpl.rcParams['xtick.labelsize'] = font_size-2\nmpl.rcParams['ytick.labelsize'] = font_size-2\nmpl.rcParams['axes.labelsize'] = font_size-2\nmpl.rcParams['legend.fontsize'] = font_size-7\nmpl.rcParams['font.size'] = font_size-1\nnew_rc_params = {'text.usetex': False,\"svg.fonttype\": 'none'}\nmpl.rcParams.update(new_rc_params)\nfs = 16\n\n### Figures\ncolor = ['#1e90ff','#ff8c00','#3cb371']\ncolor4 = ['#DC143C','#3cb371','#9932CC','#FFD700','#1e90ff','#ff8c00','#3cb371','m']\n\ndef phase(x,period):\n \"\"\"phase(x,period) returns the phase of location x with respect to the module with spacing period.\"\"\"\n return np.mod(x/period,1)\n\ndef grid(x,period,prefphase,phsh=0.,gridmodel='gau',sig=0.16):\n \"\"\"grid(x,period,prefphase,phsh) returns the grid cell activity with prefphase at all location x.\"\"\"\n if gridmodel == 'gau':\n temp_array = np.array((abs(phase(x-phsh*period,period)-prefphase),1-abs(phase(x-phsh*period,period)-prefphase)))\n temp = np.exp(-np.min(temp_array,axis=0)**2/(2*sig**2))\n elif gridmodel == 'del':\n temp = np.zeros(x.size)\n temp[int(np.round((phsh+prefphase)*period)):x.size:period] = 1\n return temp\n\ndef act_mat_grid_gau(l,R=None):\n if R == None:\n R = l[0]\n for j in range(len(l) - 1):\n R = lcm(R, l[j + 1])\n u = []\n for iN in range(len(l)):\n for iM in range(l[iN]):\n u.append(grid(np.arange(R), l[iN], float(iM) / l[iN], 0))\n return np.array(u)\n\ndef margin_gridvsrandom(l,K=6,num=10,mode='ext',is_qp=0): # ext=exact, sX=sample X without replacement\n #u = act_mat_grid_binary(l)/len(l)\n u = act_mat_grid_gau(l)\n u /= sum(u[:,0])\n rng = np.random.RandomState(1)\n margin = []\n rmargin = [] # rmargin[K][trial]\n smargin = [] # smargin[K][trial]\n numKarr = []\n rnumKarr = []\n snumKarr = []\n partfunc = []\n for k in range(1,K+1):\n partition = partitions(k)\n part = []\n margin.append([])\n rmargin.append([])\n smargin.append([])\n numKarr.append([])\n rnumKarr.append([])\n snumKarr.append([])\n numK = 0\n # grid\n for p in partition:\n if np.all(np.array(p)<=np.min(l)):\n part.append(list(p))\n # Young diagram\n mat = np.zeros((l[0],l[1]))\n for j in range(len(p)):\n mat[:p[j],j] = 1\n #pylab.figure()\n #pylab.imshow(mat,aspect='auto')\n i1 = np.tile(range(l[0]),l[1])\n i2 = np.tile(range(l[1]),l[0])\n Y = mat[i1,i2]\n if is_qp == 0:\n m,w,b = svm_margin(u.T,Y)\n dec = np.sign(np.dot(w.T,u)+b)\n dec[dec<0] = 0\n else:\n try:\n Y[Y==0] = -1\n m,w,b = svm_qp(u,Y,1,1)\n except:\n m = np.inf\n w = np.inf*np.ones(u.shape[0])\n b = np.inf\n dec = np.sign(np.dot(w.T,u)+b)\n margin[k-1].append(m)\n denominator = math.factorial(l[0]-p[0])*math.factorial(p[-1])*math.factorial(l[1]-len(p))\n for j in np.diff(p):\n denominator *= math.factorial(abs(j))\n (chist,temp) = np.histogram(p,np.arange(0.5,p[0]+1))\n chist = chist[chist>0]\n for j in chist:\n denominator *= math.factorial(j)\n numK = math.factorial(l[0])*math.factorial(l[1])/denominator\n numKarr[k-1].append(numK)\n partfunc.append(len(part))\n print(margin)\n # random\n for j in range(num):\n print('Random '+str(j))\n v = rng.rand(u.shape[0],u.shape[1])\n for jj in range(u.shape[1]):\n v[:,jj] = v[:,jj]/np.sum(v[:,jj])\n for k in range(1,K+1):\n print('Number of fields: '+str(k))\n rmargin[k-1].append([])\n if mode == 'ext':\n com = [list(temp) for temp in itertools.combinations(range(u.shape[1]),k)]\n elif mode[0] == 's':\n com = []\n for jj in range(int(mode[1:])):\n temp = rng.choice(range(u.shape[1]),k,replace=False)\n temp.sort()\n com.append(list(temp))\n numK = 0\n for icom in com: # len(com) or partfunc[k-1] or 1\n Y = np.zeros(u.shape[1])\n Y[icom] = 1\n if is_qp == 0:\n m,w,b = svm_margin(v.T,Y)\n dec = np.sign(np.dot(w.T,v)+b)\n dec[dec<0] = 0\n else:\n try:\n Y[Y==0] = -1\n m,w,b = svm_qp(v,Y,1,1)\n except:\n m = np.inf\n w = np.inf*np.ones(v.shape[0])\n b = np.inf\n dec = np.sign(np.dot(w.T,v)+b)\n if abs(np.sum(np.abs(Y-dec))) < 1e-6:\n numK += 1\n rmargin[k-1][j].append(m)\n rnumKarr[k-1].append(numK)\n if j == num-1:\n print(m)\n # shuffled\n for j in range(num):\n print('Shuffled '+str(j))\n v = np.copy(u)\n if 1:\n # shuffle column only\n for jj in range(u.shape[1]):\n temp = v[:,jj]\n rng.shuffle(temp)\n v[:,jj] = temp\n if 0:\n # shuffle both row and column\n v = v.ravel()\n rng.shuffle(v)\n v = v.reshape(u.shape)\n for k in range(1,K+1):\n print('Number of fields: '+str(k))\n smargin[k-1].append([])\n if mode == 'ext':\n com = [list(temp) for temp in itertools.combinations(range(u.shape[1]),k)]\n elif mode[0] == 's':\n com = []\n for jj in range(int(mode[1:])):\n temp = rng.choice(range(u.shape[1]),k,replace=False)\n temp.sort()\n com.append(list(temp))\n numK = 0\n for icom in com: # len(com) or partfunc[k-1] or 1\n Y = np.zeros(u.shape[1])\n Y[icom] = 1\n if is_qp == 0:\n m,w,b = svm_margin(v.T,Y)\n dec = np.sign(np.dot(w.T,v)+b)\n dec[dec<0] = 0\n else:\n try:\n Y[Y==0] = -1\n m,w,b = svm_qp(v,Y,1,1)\n except:\n m = np.inf\n w = np.inf*np.ones(v.shape[0])\n b = np.inf\n dec = np.sign(np.dot(w.T,v)+b)\n if abs(np.sum(np.abs(Y-dec))) < 1e-6:\n numK += 1\n smargin[k-1][j].append(m)\n snumKarr[k-1].append(numK)\n if j == num-1:\n print(m)\n with open(datapath+'f8_'+mode+'_qp'*is_qp+'_gau.txt','wb') as f:\n pickle.dump((margin,rmargin,smargin,numKarr,rnumKarr,snumKarr),f)\n return margin,rmargin,smargin,numKarr,rnumKarr,snumKarr\n\ndef fig8gau():\n is_qp = 0 # use quadratic programming with the options of constrained weights\n if is_qp:\n print('Using quadratic programming')\n else:\n print('Using sklearn SVM')\n mpl.rcParams['legend.fontsize'] = font_size-6\n rng = np.random.RandomState(4)\n import seaborn as sns\n gridmodel = 'del'\n mth = np.arange(0,1.0001,0.01)\n sym = ['^','+','x']\n num = 10\n # A\n fig = pylab.figure(figsize=[7,8.5*0.8])\n fig.text(0.02,0.65,'A',fontsize=fs)\n fig.text(0.48,0.65,'B',fontsize=fs)\n fig.text(0.02,0.3,'C',fontsize=fs)\n fig.text(0.48,0.3,'D',fontsize=fs)\n ax = fig.add_subplot(323)\n l = [31,43]\n N = len(l)\n R = l[0]\n for j in range(N-1):\n R = lcm(R,l[j+1])\n K = 6\n #u = act_mat_grid_binary(l)/len(l)\n u = act_mat_grid_gau(l)\n u /= sum(u[:,0])\n mode = 's1000'\n if os.path.exists(datapath+'f8_s1000_gau.txt'):\n with open(datapath+'f8_s1000_gau.txt','rb') as f:\n margin,rmargin,smargin,numKarr,rnumKarr,snumKarr = pickle.load(f)\n else:\n margin,rmargin,smargin,numKarr,rnumKarr,snumKarr = margin_gridvsrandom(l,K=K,num=num,mode=mode)\n margin = margin[:K]\n rmargin = rmargin[:K]\n smargin = smargin[:K]\n numKarr = numKarr[:K]\n rnumKarr = rnumKarr[:K]\n snumKarr = snumKarr[:K]\n # for violin plot: random\n kmat1 = []\n for k in range(K):\n kmat1.extend([k+1]*np.sum(rnumKarr[k]))\n mmat1 = [item for sublist in rmargin for subsublist in sublist for item in subsublist]\n nmat1 = ['random']*np.sum(np.sum(rnumKarr))\n # for violin plot: shuffle\n kmat2 = []\n for k in range(K):\n kmat2.extend([k+1]*np.sum(snumKarr[k]))\n mmat2 = [item for sublist in smargin for subsublist in sublist for item in subsublist]\n nmat2 = ['shuffled']*np.sum(np.sum(snumKarr))\n kmat1 = np.array(kmat1)\n kmat2 = np.array(kmat2)\n #sns.violinplot(np.append(kmat1,kmat2),np.append(mmat1,mmat2),np.append(nmat1,nmat2),inner=None,linewidth=.4,scale='width',width=0.5,bw=.2,gridsize=100)\n sns.violinplot(kmat1,mmat1,inner=None,linewidth=.4,scale='width',width=0.5,bw=.2,gridsize=100,color='m')\n sns.violinplot(kmat2,mmat2,inner=None,linewidth=.4,scale='width',width=0.5,bw=.2,gridsize=100,color='b')\n #sns.violinplot(kmat1,mmat1,nmat1,inner=None,linewidth=.4,bw=.2)\n #sns.violinplot(kmat2,mmat2,nmat2,inner=None,linewidth=.4,bw=.2,color='#ff7f0e')\n for k in range(1,K+1):\n for mu in margin[k-1]:\n pylab.plot(k-1+np.array([-0.3,0.3]),2*[mu],'k')\n ax.set_yticks(np.arange(0,0.5,0.2))\n ax.set_xlim(-0.5,K-0.5)\n ax.set_ylim(0,0.4)\n ax.set_xlabel('number of fields ($K$)')\n ax.set_ylabel('margin')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n # B\n ax = fig.add_subplot(324)\n m_inset = []\n for k in range(1,K+1):\n count,temp = np.histogram(rng.rand(int(mode[1:])*num),np.array([0.]+list(np.cumsum(numKarr[k-1]))+[nCr(R,k)])/nCr(R,k)) # randomly draw field arrangements based on fractions of margins\n for j in range(len(count)-1):\n m_inset.extend([margin[k-1][j]]*count[j])\n print(np.array([0.]+list(np.cumsum(numKarr[k-1]))+[nCr(R,k)]),count,m_inset)\n count0L = []\n for m in mth:\n count0L.append(np.sum(np.array(m_inset)>=m))\n countr = []\n for m in mth:\n countr.append(np.sum(mmat1>=m))\n counts = []\n for m in mth:\n counts.append(np.sum(mmat2>=m))\n temp = float(K)*int(mode[1:])*num\n ax.plot(mth,np.array(count0L)/temp,'k')\n ax.plot(mth,np.array(countr)/temp,color='m',lw=1.5) #1f77b4\n ax.plot(mth,np.array(counts)/temp,color='b',lw=1.5) #ff7f0e\n ax.plot([0,1],2*[1],'k--',lw=1)\n ax.set_ylim(0,1)\n ax.set_xlim(0,0.4)\n ax.set_xlabel('margin $\\kappa$')\n ax.set_ylabel('CDF')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n pylab.savefig('fig8_gaussian.pdf')\n pylab.savefig('fig8_gaussian.eps')\n pylab.savefig('fig8_gaussian.svg')\n\n#fig1_2()\n#fig3()\n#fig5()\n#fig5()\n#fig6()\n#fig6b()\nfig8gau()\n#for j in range(1,11):\n# fig7(j)\n#readfig7()\n" ]
[ [ "numpy.dot", "numpy.abs", "numpy.min", "numpy.arange", "numpy.cumsum", "numpy.ones", "numpy.round", "numpy.copy", "matplotlib.rcParams.update", "numpy.diff", "numpy.mod", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GeraldCSC/jax
[ "16c809ce7fcd4a1041216002c8088844410151f6", "6411f8a03388ce63eb365188f2e2880815745125", "16c809ce7fcd4a1041216002c8088844410151f6" ]
[ "jax/experimental/jax2tf/jax2tf.py", "tests/global_device_array_test.py", "jax/experimental/global_device_array.py" ]
[ "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Experimental module transforms JAX functions to be executed by TensorFlow.\"\"\"\nfrom functools import partial\nimport contextlib\nimport os\nimport re\nimport threading\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union\n\nimport jax\nfrom jax import lax\nfrom jax._src import ad_util\nfrom jax._src import api_util\nfrom jax import config\nfrom jax import core, custom_derivatives\nfrom jax import linear_util as lu\nfrom jax import random, tree_util\nfrom jax import numpy as jnp\nfrom jax._src import ad_checkpoint\nfrom jax._src import api\nfrom jax._src import dispatch\nfrom jax._src import dtypes\nfrom jax._src.lax import control_flow as lax_control_flow\nfrom jax._src.lax import lax as lax_internal\nfrom jax._src.lax import linalg as lax_linalg\nfrom jax._src.lax import slicing as lax_slicing\nfrom jax._src import source_info_util\nfrom jax._src import util\nimport jax._src.prng\nimport jax._src.random\nfrom jax.experimental import maps\nfrom jax.experimental import pjit\nfrom jax.interpreters import ad\nfrom jax.interpreters import partial_eval\nfrom jax.interpreters import pxla\nfrom jax.interpreters import sharded_jit\nfrom jax.interpreters import xla\nfrom jax._src.lib import xla_client\n\nfrom jax.experimental.jax2tf import shape_poly\nfrom jax.experimental.jax2tf import impl_no_xla\n\nimport numpy as np\nimport tensorflow as tf # type: ignore[import]\n\n# These don't have public equivalents.\n# pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.compiler.tf2xla.python import xla as tfxla # type: ignore[import]\nfrom tensorflow.compiler.xla import xla_data_pb2 # type: ignore[import]\nfrom tensorflow.core.framework import attr_value_pb2 # type: ignore[import]\nfrom tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding # type: ignore[import]\nfrom tensorflow.python.framework import ops as tf_ops # type: ignore[import]\n# pylint: enable=g-direct-tensorflow-import\n\nPolyShape = shape_poly.PolyShape\n\n# A temporary internal flag, to enable the wrapping of jax.jit functions\n# with tf.function(jit_compile=True). See #7389. This change has triggered a\n# number of failures in TF. We keep this until we are confident that it does\n# not create problems.\n# TODO(b/207464757): figure out why this change breaks test\n_WRAP_JAX_JIT_WITH_TF_FUNCTION = False\n\n# The scope name need to be a valid TensorFlow name. See\n# https://github.com/tensorflow/tensorflow/blob/r2.3/tensorflow/core/framework/node_def_util.cc#L731\n_VALID_SCOPE_REGEX = re.compile(\"^[A-Za-z0-9.][A-Za-z0-9_.\\\\/>-]*$\")\n_INVALID_SCOPE_CHAR = re.compile(\"[^A-Za-z0-9_.\\\\/>-]\")\n\nmap = util.safe_map\nzip = util.safe_zip\n\n\ndef _sanitize_scope_name(name):\n scope_name = _INVALID_SCOPE_CHAR.sub(\"_\", name)\n if not _VALID_SCOPE_REGEX.match(scope_name):\n scope_name = \".{}\".format(scope_name)\n return scope_name\n\n\n# A value suitable in a TF tracing context: tf.Tensor, tf.Variable,\n# or Python scalar or numpy.ndarray. (A tf.EagerTensor is a tf.Tensor.)\nTfVal = Any\nDType = Any\nPrecisionType = int # Enum xla_data.PrecisionConfig.Precision\n\ndef _is_tfval(v: TfVal) -> bool:\n if isinstance(v, (tf.Tensor, tf.Variable)):\n return True\n try:\n # Include all convertible types, even if not supported on accelerators.\n with tf.device(\"CPU\"):\n tf.constant(v)\n return True\n except:\n return False\n\n\n# The implementation rules for primitives. The rule will be called with the\n# arguments (TfVal) and must return TfVal (or a sequence thereof,\n# if primitive.multiple_results). The vast majority of primitives do not need\n# to worry about core.unit inputs or results. The exception are primarily the\n# control-flow primitives.\ntf_impl: Dict[core.Primitive, Callable[..., Any]] = {}\n\n# Some primitive implementation rules need the abstract values of arguments\n# and the results. This is the case for the primitives implemented using\n# _convert_jax_impl and those that need to adjust the shape of the outputs\n# due to missing TF shape inference rules for TFXLA ops. The rules for these\n# primitives should be added to `tf_impl_with_avals`.\n# The abstract value are passed to the implementation as two special kwargs\n# `_in_avals` (a tuple of core.ShapedArray) and `_out_aval` (a\n# core.ShapedArray, or a tuple thereof when primitive.multiple_results).\ntf_impl_with_avals: Dict[core.Primitive, Callable[..., Any]] = {}\n\n# XLA is not linked in all environments when converting a primitive. If this is\n# the case, we first search for implementation rules for primitives in the\n# following map. These implementations are workarounds, making use of TF ops\n# that do work when XLA is not linked in.\ntf_impl_no_xla = impl_no_xla.tf_impl_no_xla\n\n# In order to ensure that JAX picks up the proper user-frame for source\n# locations we will register the TensorFlow source path as an internal\n# path with source_info_util. The typical stack when a JAX primitive\n# conversion happens is:\n# jax2tf.process_primitive (top of stack)\n# jax tracing machinery ...\n# tf.custom_gradient machinery ...\n# jax2tf.converted_fun\n# tf function machinery ...\n# user code invokes the converted function on TF tensors\n#\n# We need to skip over not only JAX internal frames, but TF internal frames\n# also.\n# We register the TensorFlow source path lazily\n_has_registered_tf_source_path = False\n\nclass _ThreadLocalState(threading.local):\n def __init__(self):\n self.name_stack = \"\"\n # XLA is not linked in all environments; when converting a primitive, if this\n # variable is disabled, we try harder to use only standard TF ops if they are\n # applicable to the concrete use case; if the resulting conversion path ends up\n # requiring a TFXLA operation, an exception is thrown instead.\n self.enable_xla = True\n\n # Keep track if we are inside a call_tf. In that context we disable the\n # safety check that we are not inside JAX transformations.\n self.inside_call_tf = False\n\n # Maps dimension variables to TF expressions\n self.shape_env: Sequence[Tuple[str, TfVal]] = ()\n\n # Whether to actually include XLA op metadata in the generated TF ops\n self.include_xla_op_metadata = True\n\n # A cache for the tf.convert_to_tensor for constants. We try to preserve\n # sharing for constants, to enable tf.Graph to take advantage of it.\n # See https://github.com/google/jax/issues/7992.\n self.constant_cache = None # None means that we don't use a cache. We\n # may be outside a conversion scope.\n\n\n_thread_local_state = _ThreadLocalState()\n\ndef _get_current_name_stack():\n return _thread_local_state.name_stack\n\[email protected]\ndef inside_call_tf():\n # Set the inside_call_tf flag for a context.\n prev = _thread_local_state.inside_call_tf\n _thread_local_state.inside_call_tf = True\n try:\n yield\n finally:\n _thread_local_state.inside_call_tf = prev\n\n@partial(api_util.api_hook, tag=\"jax2tf_convert\")\ndef convert(fun: Callable,\n *,\n polymorphic_shapes=None,\n with_gradient=True,\n enable_xla=True\n ) -> Callable:\n \"\"\"Transforms `fun` to be executed by TensorFlow.\n\n See\n [README](https://github.com/google/jax/blob/main/jax/experimental/jax2tf/README.md)\n for more details about usage and common problems.\n\n Args:\n fun: Function to be transformed. Its arguments and return value should be\n JAX arrays, or nested standard Python containers (tuple/list/dict) thereof\n (pytrees).\n polymorphic_shapes: Specifies input shapes to be treated polymorphically\n during conversion.\n\n .. warning:: The shape-polymorphic conversion is an experimental feature.\n It is meant to be sound, but it is known to reject some JAX programs\n that are shape polymorphic. The details of this feature can change.\n\n It should be `None` (all arguments are monomorphic), a single PolyShape\n or string (applies to all arguments), or a tuple/list of the same length\n as the function arguments. For each argument the shape specification\n should be `None` (monomorphic argument), or a Python object with the\n same pytree structure as the argument.\n See [how optional parameters are matched to\n arguments](https://jax.readthedocs.io/en/latest/pytrees.html#applying-optional-parameters-to-pytrees).\n\n A shape specification for an array argument should be an object\n `PolyShape(dim0, dim1, ..., dimn)`\n where each `dim` is a dimension specification: a positive integer denoting\n a monomorphic dimension of the given size, or a string denoting a\n dimension variable assumed to range over non-zero dimension sizes, or\n the special placeholder string \"_\" denoting a monomorphic dimension\n whose size is given by the actual argument. As a shortcut, an Ellipsis\n suffix in the list of dimension specifications stands for a list of \"_\"\n placeholders.\n\n For convenience, a shape specification can also be given as a string\n representation, e.g.: \"batch, ...\", \"batch, height, width, _\", possibly\n with surrounding parentheses: \"(batch, ...)\".\n\n The conversion fails if it cannot ensure that the it would produce the same\n sequence of TF ops for any non-zero values of the dimension variables.\n\n polymorphic_shapes are only supported for positional arguments; shape\n polymorphism is not supported for keyword arguments.\n\n See [the README](https://github.com/google/jax/blob/main/jax/experimental/jax2tf/README.md#shape-polymorphic-conversion)\n for more details.\n\n in_shapes: DEPRECATED in favor of `polymorphic_shapes`.\n with_gradient: if set (default), add a tf.custom_gradient to the converted\n function, by converting the ``jax.vjp(fun)``. This means that reverse-mode\n TensorFlow AD is supported for the output TensorFlow function, and the\n value of the gradient will be JAX-accurate.\n enable_xla: if set (default), the converter will use the simplest conversion\n and use XLA TF ops when necessary. These ops are known to create issues\n for the TFLite and TFjs converters. For those cases, unset this parameter\n so the converter tries harder to use non-XLA TF ops to convert the\n function and aborts if this is not possible.\n\n Returns:\n A version of `fun` that expects TfVals as arguments (or\n tuple/lists/dicts) thereof, and returns TfVals as outputs, and uses\n only TensorFlow ops.\n \"\"\"\n api._check_callable(fun)\n fun_name = getattr(fun, \"__name__\", \"unknown\")\n name_stack = util.extend_name_stack(util.wrap_name(fun_name, \"jax2tf\"))\n def converted_fun(*args: TfVal, **kwargs: TfVal) -> TfVal:\n # TODO: is there a better way to check if we are inside a transformation?\n if not core.trace_state_clean() and not _thread_local_state.inside_call_tf:\n # It is Ok to nest convert when we are inside a call_tf\n raise ValueError(\"convert must be used outside all JAX transformations.\" +\n f\"Trace state: {core.thread_local_state.trace_state.trace_stack}\")\n\n # We support kwargs by wrapping the function to take only positional arguments.\n # This is in part because jax.vjp does not support kwargs.\n nr_positional_args = len(args)\n kw_names = kwargs.keys()\n args = tuple(args) + tuple(kwargs[kw] for kw in kw_names)\n\n def fun_no_kwargs(*args_and_kwargs):\n assert len(args_and_kwargs) == nr_positional_args + len(kw_names)\n args = args_and_kwargs[:nr_positional_args]\n kwargs = {kw: args_and_kwargs[nr_positional_args + i]\n for i, kw in enumerate(kw_names)}\n return fun(*args, **kwargs)\n\n def check_arg(a):\n if not _is_tfval(a):\n msg = (f\"Argument {a} of type {type(a)} of jax2tf.convert(f) should \"\n \"be NumPy array, scalar, tf.Variable, or tf.Tensor\")\n raise TypeError(msg)\n\n tree_util.tree_map(check_arg, args)\n\n args_flat, in_tree = tree_util.tree_flatten((args, {}))\n # May need to cast the arguments to have the type assumed by JAX\n args_and_dtypes_flat = tuple(map(_tfval_to_tensor_jax_dtype, args_flat))\n args_flat, arg_dtypes_flat = util.unzip2(args_and_dtypes_flat)\n # Name input tensors; do this after we have cast the arguments\n def _apply_name(a: TfVal, suffix) -> TfVal:\n return tf.identity(a, f\"jax2tf_arg_{suffix}\")\n args_flat = tuple(_apply_name(a, i) for i, a in enumerate(args_flat))\n\n if polymorphic_shapes is None:\n polymorphic_shapes_ = (polymorphic_shapes,) * len(args)\n elif isinstance(polymorphic_shapes, (PolyShape, str)):\n polymorphic_shapes_ = (polymorphic_shapes,) * len(args) # type: ignore\n else:\n if not isinstance(polymorphic_shapes, Sequence) or len(polymorphic_shapes) != len(args) - len(kw_names):\n msg = (\"polymorphic_shapes must be a sequence with the same length as the positional argument list \"\n f\"({len(args)}). Got polymorphic_shapes={repr(polymorphic_shapes)}.\")\n raise TypeError(msg)\n polymorphic_shapes_ = tuple(polymorphic_shapes) + (None,) * len(kw_names)\n\n # Expand the polymorphic_shapes to match the argument pytree\n polymorphic_shapes_flat = tuple(api_util.flatten_axes(\"jax2tf.convert polymorphic_shapes\",\n in_tree.children()[0],\n polymorphic_shapes_))\n\n def fix_tf1_shape(arg: TfVal) -> Sequence[Optional[int]]:\n tf_arg_shape = np.shape(arg)\n return tuple(d.value if isinstance(d, tf.compat.v1.Dimension) else d for d in tf_arg_shape)\n args_shapes_flat = tuple(fix_tf1_shape(a) for a in args_flat)\n\n # Construct the abstract values for the flat arguments, possibly based on\n # the input shapes and the polymorphic_shapes if given. May create new shape\n # variables. May cast the args_flat to JAX types, using JAX's interpretation\n # of types of constants.\n args_avals_flat = shape_poly.args_avals(\n args_shapes_flat, arg_dtypes_flat, polymorphic_shapes_flat)\n\n dim_vars, get_dim_values = shape_poly.prepare_dim_var_env(args_avals_flat)\n dim_values, _ = util.unzip2(_interpret_fun(lu.wrap_init(get_dim_values),\n args_flat, args_avals_flat, \"\"))\n shape_env = zip(dim_vars, dim_values)\n\n # This function may take pytrees of TfVals. We can only set\n # tf.custom_gradient on functions that take a flat argument list.\n f = lu.wrap_init(fun_no_kwargs)\n # out_tree_thunk() will be the output tree, after running _interpret_fun.\n flat_fun, out_tree_thunk = api_util.flatten_fun(f, in_tree)\n # out_tree_thunk will be ready after _interpret_fun below.\n\n # Prepare the grad_fn for tf.custom_gradient.\n def converted_grad_fn(*out_cts_flat: TfVal,\n _out_cts_avals: Sequence[core.ShapedArray],\n variables=None):\n if variables:\n raise ValueError(\n \"Unexpected variables used in forward pass. \"\n \"This should not happen for first-order differentiation. \"\n f\"variables={variables}\")\n\n out_tree = out_tree_thunk()\n if polymorphic_shapes is None:\n vjp_polymorphic_shapes = None\n else:\n args_flat_polymorphic_shapes = polymorphic_shapes_flat\n out_cts_flat_polymorphic_shapes = tuple(str(out_aval.shape) # Note: may be polynomials, not just DimVar\n for out_aval in _out_cts_avals) # type: ignore\n vjp_polymorphic_shapes = [\n args_flat_polymorphic_shapes, out_cts_flat_polymorphic_shapes\n ]\n\n def fun_vjp_jax(args_flat_jax, out_cts_flat_jax):\n # One may think that we can get the pullback while we are converting\n # the main function in the first place. That is problematic, because the\n # pullback may contain captured tracers from the conversion of the\n # main function. Those tracers will confuse the conversion of the\n # pullback. So, we construct the vjp anew and we convert it separately.\n args_jax, kwargs_jax = tree_util.tree_unflatten(in_tree, args_flat_jax)\n assert not kwargs_jax\n _, pullback_jax = jax.vjp(fun_no_kwargs, *args_jax)\n\n def fix_out_ct(out_ct_jax, out_ct_aval: core.ShapedArray):\n # If the primal function has outputs of integer or bool types, and if we are\n # under a tf.function context, then TF will pass None in _out_cts_flat\n # in place of these values. We should change these to float0 or\n # else JAX gets unhappy. See issue #6975.\n if out_ct_jax is not None:\n return out_ct_jax\n assert core.primal_dtype_to_tangent_dtype(out_ct_aval.dtype) == dtypes.float0, f\"out_ct={out_ct_jax}\"\n # Note that out_ct_aval.shape contains dimension variable from the\n # primal function scope. It is Ok to use them here because we\n # use the same shape variables for the VJP function.\n return jnp.zeros(out_ct_aval.shape, dtype=_tf_np_dtype_for_float0)\n\n out_cts_fixed_flat = tuple(map(fix_out_ct, out_cts_flat_jax, _out_cts_avals))\n\n out_cts_fixed = tree_util.tree_unflatten(out_tree, out_cts_fixed_flat)\n in_cts_jax = pullback_jax(out_cts_fixed)\n\n in_cts_flat_jax, in_cts_tree = tree_util.tree_flatten(in_cts_jax)\n def fix_in_ct(in_ct, arg_aval: core.ShapedArray):\n if jnp.issubdtype(arg_aval.dtype, jnp.inexact):\n return in_ct\n else:\n assert in_ct.dtype == dtypes.float0\n return jnp.zeros(arg_aval.shape, _tf_np_dtype_for_float0)\n\n in_cts_fixed_flat_jax = tuple(map(fix_in_ct, in_cts_flat_jax, args_avals_flat))\n return in_cts_fixed_flat_jax\n\n # TODO: enable higher-order gradients\n with tf.name_scope(\"jax2tf_vjp\"):\n in_cts_flat = convert(\n fun_vjp_jax,\n with_gradient=False,\n polymorphic_shapes=vjp_polymorphic_shapes)(args_flat, out_cts_flat)\n in_cts, kwin_cts = tree_util.tree_unflatten(in_tree, in_cts_flat)\n assert not kwin_cts\n return in_cts\n\n try:\n assert not _thread_local_state.shape_env, f\"Unexpected shape environment {_thread_local_state.shape_env}\"\n\n prev_enable_xla = _thread_local_state.enable_xla\n _thread_local_state.enable_xla = enable_xla\n\n prev_include_xla_op_metadata = _thread_local_state.include_xla_op_metadata\n _thread_local_state.include_xla_op_metadata = False\n\n _thread_local_state.shape_env = shape_env\n global _has_registered_tf_source_path\n if not _has_registered_tf_source_path:\n source_info_util.register_exclusion(os.path.dirname(tf.__file__))\n _has_registered_tf_source_path = True\n\n if with_gradient:\n\n @tf.custom_gradient\n def converted_fun_flat_with_custom_gradient(*args_flat: TfVal) -> TfVal:\n out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat,\n name_stack,\n fresh_constant_cache=True)\n outs, out_avals = util.unzip2(out_with_avals)\n return (tuple(outs),\n partial(converted_grad_fn, _out_cts_avals=tuple(out_avals)))\n\n out_flat = converted_fun_flat_with_custom_gradient(*args_flat)\n else:\n out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat,\n name_stack, fresh_constant_cache=True)\n outs, out_avals = util.unzip2(out_with_avals)\n message = (\"The jax2tf-converted function does not support gradients. \"\n \"Use `with_gradient` parameter to enable gradients\")\n # We use PreventGradient, which is propagated through a SavedModel.\n out_flat = [\n tf.raw_ops.PreventGradient(input=o, message=message)\n for o in outs\n ]\n finally:\n _thread_local_state.shape_env = ()\n _thread_local_state.enable_xla = prev_enable_xla\n _thread_local_state.include_xla_op_metadata = prev_include_xla_op_metadata\n\n out_flat = [tf.identity(x, \"jax2tf_out\") for x in out_flat]\n out = tree_util.tree_unflatten(out_tree_thunk(), out_flat)\n return out\n\n return converted_fun\n\n\ndef dtype_of_val(val: TfVal) -> DType:\n \"\"\"Computes the TensorFlow dtype using JAX's typing rules.\n\n If the value is a tf.Tensor, it starts with its dtype. If the value is a\n constant it uses JAX to infer its dtype. The resulting dtype follows the\n JAX type inference rules, and depends on the value of the\n JAX_ENABLE_X64 flag.\n\n See README.md for how 64-bit values are treated.\n \"\"\"\n tval, _ = _tfval_to_tensor_jax_dtype(val)\n return tval.dtype\n\n# Internals\n\[email protected]\ndef _extended_name_stack(extra_name_stack: Optional[str]):\n prev_name_stack = _thread_local_state.name_stack\n if extra_name_stack:\n if not prev_name_stack:\n _thread_local_state.name_stack = extra_name_stack\n else:\n _thread_local_state.name_stack = util.extend_name_stack(\n _thread_local_state.name_stack, extra_name_stack)\n try:\n yield\n finally:\n _thread_local_state.name_stack = prev_name_stack\n\n\ndef _interpret_fun(\n fun: lu.WrappedFun, in_vals: Sequence[TfVal],\n in_avals: Sequence[core.ShapedArray],\n extra_name_stack: Optional[str],\n fresh_constant_cache: bool = False\n) -> Sequence[Tuple[TfVal, core.ShapedArray]]:\n with core.new_base_main(TensorFlowTrace) as main: # type: ignore\n fun = _interpret_subtrace(fun, main, in_avals)\n with _extended_name_stack(extra_name_stack):\n with core.new_sublevel():\n out_vals: Sequence[Tuple[TfVal, core.ShapedArray]] = \\\n _call_wrapped_with_new_constant_cache(fun, in_vals,\n fresh_constant_cache=fresh_constant_cache)\n\n del main\n\n return tuple(out_vals)\n\ndef _call_wrapped_with_new_constant_cache(fun: lu.WrappedFun,\n in_vals: Sequence[TfVal],\n fresh_constant_cache: bool = False\n ) -> Sequence[Tuple[TfVal, core.ShapedArray]]:\n try:\n prev_constant_cache = _thread_local_state.constant_cache\n prev_constant_cache_keys = set(prev_constant_cache.keys()) if prev_constant_cache is not None else set()\n # Start a new cache, so that we don't share constants across tf.function\n # boundaries.\n if fresh_constant_cache:\n _thread_local_state.constant_cache = {}\n\n out_vals: Sequence[Tuple[TfVal, core.ShapedArray]] = \\\n fun.call_wrapped(*in_vals)\n finally:\n if prev_constant_cache is not None and not fresh_constant_cache:\n newly_added_keys = set(prev_constant_cache.keys()) - prev_constant_cache_keys\n # Delete the newly added keys\n for k in newly_added_keys:\n del prev_constant_cache[k]\n _thread_local_state.constant_cache = prev_constant_cache\n return out_vals\n\ndef _convert_jax_impl(jax_impl: Callable, *,\n multiple_results=True,\n extra_name_stack: Optional[str] = None) -> Callable:\n \"\"\"Convert the JAX implementation of a primitive.\n\n Args:\n jax_impl: typically the impl-rule for a primitive, with signature\n `(*args: JaxVal, **kwargs) -> Sequence[JaxVal]`. This function implements\n a primitive in terms of other primitives.\n multiple_results: whether `jax_impl` returns a sequence of results.\n extra_name_stack: additional element to add to the name stack for the\n converted ops.\n\n Returns:\n a function with signature `(*args: TfVal, _in_avals, _out_aval, **kwargs)\n -> Sequence[TfVal]`.\n \"\"\"\n\n def wrapped(*tf_args: TfVal, _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray,\n **kwargs) -> Sequence[TfVal]:\n\n # We wrap the jax_impl under _interpret_fun to abstract the TF values\n # from jax_impl and turn them into JAX abstract values.\n def jax_impl_jax_args(*jax_args):\n jax_results = jax_impl(*jax_args, **kwargs)\n return jax_results if multiple_results else [jax_results]\n\n tf_results_with_avals = _interpret_fun(\n lu.wrap_init(jax_impl_jax_args), tf_args, _in_avals,\n extra_name_stack)\n tf_results, _ = util.unzip2(tf_results_with_avals)\n return tf_results if multiple_results else tf_results[0]\n\n return wrapped\n\n\[email protected]\ndef _interpret_subtrace(main: core.MainTrace,\n in_avals: Sequence[core.ShapedArray],\n *in_vals: TfVal):\n trace = TensorFlowTrace(main, core.cur_sublevel())\n in_tracers = tuple(\n TensorFlowTracer(trace, val, aval)\n for val, aval in zip(in_vals, in_avals))\n # The outs may be core.unit, see comment in TensorFlowTrace.pure.\n outs = yield in_tracers, {} # type: Sequence[Union[TfVal, core.Unit]]\n out_tracers: Iterable[TensorFlowTracer] = (\n map(trace.full_raise, outs)) # type: ignore\n out_vals_with_avals: Sequence[Tuple[TfVal, core.ShapedArray]] = (\n tuple((t.val, t.aval) for t in out_tracers))\n yield out_vals_with_avals\n\n\ndef _interpret_jaxpr(jaxpr: core.ClosedJaxpr, *args: TfVal,\n extra_name_stack: Optional[str]) -> Sequence[TfVal]:\n \"\"\"Evaluates a Jaxpr with tf.Tensor arguments.\n\n The output is a sequence of TfVal (no `core.unit`), suitable for use with TF.\n \"\"\"\n fun: lu.WrappedFun = lu.wrap_init(core.jaxpr_as_fun(jaxpr))\n out_with_avals = _interpret_fun(fun, args, jaxpr.in_avals, extra_name_stack)\n return tuple(v for v, _ in out_with_avals)\n\n\ndef _aval_to_tf_shape(aval: core.ShapedArray) -> Tuple[Optional[int], ...]:\n \"\"\"Generate a TF shape, possibly containing None for polymorphic dimensions.\"\"\"\n return tuple(map(lambda d: None if shape_poly.is_poly_dim(d) else d,\n aval.shape)) # type: ignore[attr-defined]\n\n# In the TF world, we represent float0 as zeros of this type.\n_tf_np_dtype_for_float0 = np.int32\n\ndef _to_tf_dtype(jax_dtype):\n # Note that converting _to_tf_dtype and _to_jax_dtype are not inverses,\n # due to float0 and 64-bit behavior.\n if jax_dtype == dtypes.float0:\n jax_dtype = _tf_np_dtype_for_float0\n return tf.dtypes.as_dtype(jax_dtype)\n\n\ndef _to_jax_dtype(tf_dtype):\n # Note that converting _to_tf_dtype and _to_jax_dtype are not inverses,\n # due to float0 and 64-bit behavior.\n return dtypes.canonicalize_dtype(tf_dtype.as_numpy_dtype)\n\n\ndef _tfval_to_tensor_jax_dtype(val: TfVal,\n jax_dtype: Optional[DType] = None,\n memoize_constants=False) -> Tuple[TfVal, DType]:\n \"\"\"Converts a scalar, ndarray, or tf.Tensor to a tf.Tensor with proper type.\n\n If `jax_dtype` is missing, uses JAX typing rules.\n See README.md for details regarding 64-bit values.\n\n Args:\n val: a scalar, ndarray, tf.Tensor, or tf.Variable\n jax_dtype: an optional dtype to use. If missing, uses JAX type inference\n rules for constants.\n memoize_constants: whether to memoize TF constants. We can't do this\n everywhere, we may be outside of a conversion scope.\n\n Returns:\n a tuple with a tf.Tensor with the type as needed by JAX, and the JAX type.\n \"\"\"\n if isinstance(val, (tf.Tensor, tf.Variable)):\n jax_dtype = jax_dtype or _to_jax_dtype(val.dtype) # Give JAX a chance to pick the type\n conversion_dtype = _to_tf_dtype(jax_dtype)\n if conversion_dtype != val.dtype:\n return tf.cast(val, conversion_dtype), jax_dtype\n else:\n return val, jax_dtype\n else: # A constant\n jax_dtype = jax_dtype or xla.abstractify(val).dtype\n # TODO(document): We assume that the value of a constant does not\n # change through the scope of the function. But it may be an ndarray, ...\n # JAX has the same problem when generating HLO.\n const_key = (id(val), jax_dtype)\n # Since we use id(val) as a cache key, we have to make sure that we keep\n # the previous `val` alive. Otherwise, for an ndarray, it can get garbage\n # collected and reused for a different value, which would create correctness\n # issues. We keep the `val` alive by storing in the cache the pair\n # `(val, tf_val)`.\n do_memoize = (memoize_constants and np.shape(val) and _thread_local_state.constant_cache is not None)\n if do_memoize:\n _, tf_val = _thread_local_state.constant_cache.get(const_key, (None, None))\n else:\n tf_val = None\n if tf_val is None:\n conversion_dtype = _to_tf_dtype(jax_dtype)\n # The float0 type is not known to TF.\n if jax_dtype == dtypes.float0:\n val = np.zeros(np.shape(val), conversion_dtype.as_numpy_dtype)\n tf_val = tf.convert_to_tensor(val, dtype=conversion_dtype)\n if do_memoize:\n _thread_local_state.constant_cache[const_key] = (val, tf_val)\n return tf_val, jax_dtype\n\n\ndef _eval_shape(shape: Sequence[shape_poly.DimSize]) -> Sequence[TfVal]:\n assert all(map(lambda x: x is not None, shape)), (\n f\"Argument shape should be a valid JAX shape but got {shape}\")\n dim_vars, dim_values = util.unzip2(_thread_local_state.shape_env)\n eval_shape, dim_avals = shape_poly.get_shape_evaluator(dim_vars, shape)\n shape_values, _ = util.unzip2(_interpret_fun(lu.wrap_init(eval_shape),\n dim_values, dim_avals, \"\")) # type: ignore\n return shape_values\n\n\n# TODO(b/26854495): pylint doesn't understand slots and inheritance.\n# pylint: disable=assigning-non-slot\n\n\nclass TensorFlowTracer(core.Tracer):\n \"\"\"Tracer class that boxes a TF value and a JAX abstract value.\n\n In addition to the TF value we carry the JAX abstract value because there are\n two cases when it cannot be recovered from the value: (a) when the abstract\n value is core.abstract_unit, in which case the value is tf.nan; (b) when we\n are converting with polymorphic shapes, in which case the shape of the value\n may have dimensions set to `None`, which the JAX abstract value may contain\n more precise information.\n\n When the value has a partially-known shape, the dimensions marked as `None`\n must correspond to non-constant dimensions in the abstract value.\n\n See README.md for details.\n \"\"\"\n # val: TfVal\n # _aval: core.ShapedArray\n __slots__ = [\"val\", \"_aval\"]\n\n def __init__(self, trace: \"TensorFlowTrace\", val: TfVal,\n aval: core.AbstractValue):\n self._trace = trace\n self._aval = aval\n if aval is core.abstract_unit:\n self.val = val\n return\n\n if isinstance(val, (tf.Tensor, tf.Variable)):\n val_shape = val.shape\n\n if config.jax_enable_checks:\n assert len(self._aval.shape) == len(val_shape), f\"_aval.shape={self._aval.shape} different rank than val_shape={val_shape}\"\n # To compare types, we must handle float0 in JAX and x64 in TF\n if self._aval.dtype == dtypes.float0:\n assert _to_tf_dtype(self._aval.dtype) == val.dtype, f\"expected {self._aval.dtype} == {val.dtype}\"\n else:\n assert self._aval.dtype == _to_jax_dtype(val.dtype), f\"expected {self._aval.dtype} == {val.dtype}\"\n\n for aval_dim, val_dim in zip(self._aval.shape, val_shape): # type: ignore[attr-defined]\n if val_dim is None:\n assert shape_poly.is_poly_dim(aval_dim), f\"expected {self._aval.shape} == {val_shape}\" # type: ignore[attr-defined]\n elif not shape_poly.is_poly_dim(aval_dim):\n assert aval_dim == val_dim, f\"expected {self._aval.shape} == {val_shape}\" # type: ignore[attr-defined]\n else:\n # We have a TF value with known shape, and the abstract shape is a shape variable.\n try:\n aval_int = int(_eval_shape([aval_dim])) # type: ignore\n except (TypeError, KeyError):\n continue\n assert aval_int == val_dim, f\"expected {self._aval.shape} == {val_shape}. Found {aval_int} != {val_dim}.\" # type: ignore\n\n self.val = _tfval_to_tensor_jax_dtype(val,\n self._aval.dtype,\n memoize_constants=True)[0] # type: ignore[attr-defined]\n\n @property\n def aval(self):\n return self._aval\n\n def full_lower(self):\n return self\n\n\nclass TensorFlowTrace(core.Trace):\n \"\"\"Trace class that underlies the jax2tf transformation.\n\n We are going to ensure that jax2tf.convert is never nested inside other\n transformations. This is sufficient for intended use cases (converting\n fully-transformed JAX code). It also simplifies our job because we do not have\n to handle situations where we apply primitives on a mix of TF values and\n JAX tracers from an outer transformation. E.g., for addition both the TF\n values\n and the JAX tracers have an override and they get confused if they see values\n from the other world.\n\n Hence a TFT trace does not interact with non-TFT traces at lower-level. For\n higher-order control-flow primitives we invoke recursively\n _interpret_fun on the body of the conditional, which will create a nested TFT.\n\n We do want to allow transformations nested inside a TensorFlowTrace (TFT), but\n those will introduce their own MainTrace, and any operations involving those\n will be done on those traces, i.e., not a concern for TFT.\n \"\"\"\n def pure(self, val: Union[TfVal, core.Unit]) -> TensorFlowTracer:\n \"\"\"Lifts a non-Tracer into the TensorFlowTracer.\n\n This function may be called by way of trace.full_raise.\n\n The value may be a core.unit. During JAX transformations we sometimes\n produce a Jaxpr that has arguments of abstract value core.abstract_unit\n and results equal to core.unit. These are arguments and results that are\n not used in the computation.\n\n In TF world, we represent core.unit as NaN. This is safe, as these values\n should never be used.\n \"\"\"\n if val is core.unit:\n return TensorFlowTracer(self, tf.constant(np.nan, tf.float32),\n core.abstract_unit)\n else:\n tf_val, jax_dtype = _tfval_to_tensor_jax_dtype(val, memoize_constants=True)\n return TensorFlowTracer(\n self, val, core.ShapedArray(tf_val.shape, jax_dtype,\n weak_type=dtypes.is_weakly_typed(val)))\n\n def lift(self, val: core.Tracer) -> TensorFlowTracer:\n # This would be called when we need to raise a tracer from a lower-level\n # main into the TensorFlowTrace. Since the TensorFlowTrace is never nested\n # inside another transform, there are no lower-level main traces.\n assert False\n\n def sublift(self, val: TensorFlowTracer) -> TensorFlowTracer:\n # This is called when we need to raise a tracer from the same main,\n # but a lower sublevel. This could come from a nested jit.\n return TensorFlowTracer(self, val.val, val._aval)\n\n def process_primitive(self, primitive: core.Primitive,\n tracers: Sequence[TensorFlowTracer],\n params) -> TensorFlowTracer:\n impl, impl_needs_avals = self.get_primitive_impl(primitive)\n args_avals: Sequence[core.ShapedArray] = tuple(t.aval for t in tracers)\n # This is a bit conservative, doing abstract_eval even in op-by-op execution\n # but we needed it for, e.g., shape_polymorphism where only JAX's\n # abstract evaluation rules can properly track polymorphic shapes.\n # Unfortunately under op-by-op execution this is a rare occasion where we\n # need abstract evaluation.\n out_aval = primitive.abstract_eval(*args_avals, **params)\n args_tf: Sequence[TfVal] = [t.val for t in tracers]\n def invoke_impl() -> TfVal:\n if impl_needs_avals:\n return impl(\n *args_tf,\n _in_avals=args_avals, # type: ignore\n _out_aval=out_aval,\n **params)\n else:\n return impl(*args_tf, **params)\n\n if _thread_local_state.include_xla_op_metadata:\n op_metadata = xla.make_op_metadata(primitive, params,\n name_stack=_get_current_name_stack(),\n source_info=source_info_util.current())\n op_metadata_proto = xla_data_pb2.OpMetadata(\n op_type=op_metadata.op_type,\n op_name=op_metadata.op_name,\n source_file=op_metadata.source_file,\n source_line=op_metadata.source_line\n )\n with tf_ops.get_default_graph()._attr_scope(\n {\"_XlaOpMetadata\": attr_value_pb2.AttrValue(\n s=op_metadata_proto.SerializeToString())}):\n val_out = invoke_impl()\n else:\n val_out = invoke_impl()\n\n if primitive.multiple_results:\n out = [\n TensorFlowTracer(self, v, a)\n for v, a in zip(val_out, out_aval)\n ] # type: ignore\n else:\n out = TensorFlowTracer(self, val_out, out_aval) # type: ignore\n\n # Check that the impl rule returned a value of expected shape and dtype\n # TODO: adapt this to match polymorphic shapes\n if config.jax_enable_checks:\n if primitive.multiple_results:\n for o, expected_aval in zip(out, out_aval): # type: ignore\n assert o.aval.strip_weak_type() == expected_aval.strip_weak_type(), (\n f\"{primitive}: out.aval = {o.aval}; expected {expected_aval}\")\n else:\n assert out.aval == out_aval, ( # type: ignore\n f\"{primitive}: out.aval = {out.aval}; expected {out_aval}\"\n ) # type: ignore\n return out # type: ignore\n\n def process_call(self, call_primitive: core.Primitive, fun: lu.WrappedFun,\n tracers: Sequence[TensorFlowTracer], params):\n assert call_primitive.multiple_results\n vals: Sequence[TfVal] = [t.val for t in tracers]\n avals: Sequence[core.ShapedArray] = tuple(t.aval for t in tracers)\n interpreted_fun = _interpret_subtrace(fun, self.main, avals)\n extra_name_stack = None\n if call_primitive == core.named_call_p:\n extra_name_stack = util.wrap_name(params[\"name\"], \"named\")\n elif call_primitive == xla.xla_call_p:\n extra_name_stack = util.wrap_name(params[\"name\"], \"jit\")\n with _extended_name_stack(extra_name_stack):\n with core.new_sublevel():\n if call_primitive == core.named_call_p:\n with tf.name_scope(_sanitize_scope_name(params[\"name\"])):\n vals_out: Sequence[Tuple[TfVal, core.ShapedArray]] = \\\n interpreted_fun.call_wrapped(*vals)\n elif call_primitive == sharded_jit.sharded_call_p:\n vals_out = _sharded_call(interpreted_fun, vals, **params)\n elif call_primitive == xla.xla_call_p:\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n # Make a nested tf.function(jit_compile=True)\n store_tf_res_avals = None\n def f_tf(*tf_args):\n nonlocal store_tf_res_avals\n tf_res_out: Sequence[Tuple[TfVal, core.ShapedArray]] = \\\n _call_wrapped_with_new_constant_cache(interpreted_fun, tf_args,\n fresh_constant_cache=False)\n tf_res_vals, tf_res_avals = util.unzip2(tf_res_out)\n store_tf_res_avals = tf_res_avals\n return tf_res_vals\n tf_vals_out = tf.function(f_tf, autograph=False, jit_compile=True)(*vals)\n vals_out = zip(tf_vals_out, store_tf_res_avals)\n else:\n vals_out = interpreted_fun.call_wrapped(*vals)\n else:\n vals_out = interpreted_fun.call_wrapped(*vals)\n return [TensorFlowTracer(self, v, a) for v, a in vals_out]\n\n def post_process_call(self, call_primitive: core.Primitive,\n out_tracers: Sequence[TensorFlowTracer], params):\n # We encountered a call primitive, e.g., remat_call_p, whose result\n # (out_tracers) include TensorFlowTracer that were not passed through\n # its arguments (captured from the environment).\n vals = tuple(t.val for t in out_tracers)\n main = self.main\n\n def todo(vals: Sequence[TfVal]):\n # TODO: is name_stack correct?\n trace = TensorFlowTrace(main, core.cur_sublevel())\n return [\n TensorFlowTracer(trace, v, out_tracer.aval)\n for v, out_tracer in zip(vals, out_tracers)\n ]\n\n return vals, todo\n\n def process_map(self, map_primitive, f, tracers, params):\n raise NotImplementedError(\"process_map\")\n\n def post_process_map(self, map_primitive, out_tracers, params):\n raise NotImplementedError(\"post_process_map\")\n\n def process_custom_jvp_call(self, prim, fun, jvp, tracers):\n # Drop the custom differentiation rule and act like a call primitive. This\n # behavior is desirable because jax2tf stages code out of the JAX system, so\n # there are no more JAX differentiation transformations to be applied.\n del jvp # Unused.\n return self.process_call(core.call_p, fun, tracers, {})\n\n def post_process_custom_jvp_call(self, out_tracers, _):\n assert False # unreachable assuming jax2tf runs with clean trace state\n\n def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, out_trees):\n # Drop the custom differentiation rule and act like a call primitive. This\n # behavior is desirable because jax2tf stages code out of the JAX system, so\n # there are no more JAX differentiation transformations to be applied.\n del fwd, bwd, out_trees # Unused.\n return self.process_call(core.call_p, fun, tracers, {})\n\n def post_process_custom_vjp_call(self, out_tracers, _):\n assert False # unreachable assuming jax2tf runs with clean trace state\n\n def post_process_custom_vjp_call_fwd(self, *_, **__):\n assert False # unreachable assuming jax2tf runs with clean trace state\n\n def get_primitive_impl(self, p: core.Primitive) -> Tuple[Callable, bool]:\n # Returns the primitive implementation and whether the implementation\n # takes abstract values (see definition of tf_impl_with_avals)\n if not _thread_local_state.enable_xla:\n try:\n return tf_impl_no_xla[p], True # Always require avals.\n except KeyError:\n pass\n try:\n return tf_impl[p], False\n except KeyError:\n try:\n return tf_impl_with_avals[p], True\n except KeyError as err:\n msg = \"TensorFlow interpretation rule for '{}' not implemented\"\n raise NotImplementedError(msg.format(p)) from err\n\ndef _unexpected_primitive(p: core.Primitive, *args, **kwargs):\n assert False, f\"Encountered unexpected primitive {p}\"\n\n\n# Call primitives are inlined\nfor unexpected in [core.call_p, core.named_call_p, xla.xla_call_p,\n partial_eval.remat_call_p, sharded_jit.sharded_call_p,\n maps.xmap_p]:\n tf_impl[unexpected] = partial(_unexpected_primitive, unexpected)\n\n# Primitives that are not yet implemented must be explicitly declared here.\ntf_not_yet_impl = [\n \"clz\",\n \"igamma_grad_a\",\n \"random_gamma_grad\",\n \"reduce_precision\",\n \"schur\",\n \"name\",\n\n # Not high priority?\n \"after_all\",\n \"all_to_all\",\n \"create_token\",\n \"custom_transpose_call\",\n \"custom_vmap_call\",\n \"infeed\",\n \"linear_call\",\n \"outfeed\",\n \"pmax_p\",\n \"pmin\",\n \"ppermute\",\n \"psum\",\n \"pmax\",\n \"pgather\",\n \"reduce_scatter\",\n \"axis_index\",\n \"pdot\",\n \"all_gather\",\n \"lu_pivots_to_permutation\",\n \"xla_pmap\",\n]\n\ntf_impl[ad_util.stop_gradient_p] = tf.stop_gradient\ntf_impl[ad_util.zeros_like_p] = tf.zeros_like\n\n\ndef _add(x: TfVal, y: TfVal) -> TfVal:\n return tf.raw_ops.AddV2(x=x, y=y)\n\n\ntf_impl[ad_util.add_jaxvals_p] = _add\ntf_impl[dispatch.device_put_p] = lambda x, device=None: x\ntf_impl[lax_internal.copy_p] = lambda x: x\n\ndef _neg(x: TfVal) -> TfVal:\n if x.dtype.is_unsigned:\n signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[x.dtype]\n x_signed = tf.cast(x, signed_dtype)\n res_signed = tf.math.negative(x_signed)\n return tf.cast(res_signed, x.dtype)\n else:\n return tf.math.negative(x)\n\ntf_impl[lax.neg_p] = _neg\n\n\ndef _sign(x: TfVal) -> TfVal:\n if x.dtype.is_unsigned:\n # TF and XLA do not support tf.math.sign for unsigned types.\n return tf.where(\n tf.math.equal(x, 0), tf.constant(0, dtype=x.dtype),\n tf.constant(1, dtype=x.dtype))\n else:\n return tf.math.sign(x)\n\n\ntf_impl[lax.sign_p] = _sign\ntf_impl[lax.floor_p] = tf.math.floor\ntf_impl[lax.ceil_p] = tf.math.ceil\n\n\ndef _round(operand, *, rounding_method,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n if rounding_method is lax.RoundingMethod.AWAY_FROM_ZERO:\n # JAX uses a single HLO op Round here\n sign = _sign(operand)\n operand *= sign\n floor = tf.math.floor(operand)\n operand -= floor\n cond = tf.math.equal(operand, tf.constant(np.array(0.5), operand.dtype))\n return sign * (\n tf.where(cond, tf.constant(np.array(1), operand.dtype),\n tf.math.round(operand)) + floor)\n else: # rounding_method is RoundingMethod.TO_NEAREST_EVEN\n rounding_fun = _convert_jax_impl(\n lax_internal._round_to_nearest_even, multiple_results=False)\n return rounding_fun(operand, _in_avals=_in_avals, _out_aval=_out_aval)\n\ntf_impl_with_avals[lax.round_p] = _round\ntf_impl[lax.nextafter_p] = tf.math.nextafter\n\n\ndef _population_count(x):\n orig_dtype = x.dtype\n return tf.cast(tf.raw_ops.PopulationCount(x=x), orig_dtype)\n\n\ntf_impl[lax.population_count_p] = _population_count\ntf_impl[lax.is_finite_p] = tf.math.is_finite\n\n\ndef _abs(x: TfVal) -> TfVal:\n # TF and XLA do not support tf.math.abs for unsigned types.\n return tf.math.abs(x) if not x.dtype.is_unsigned else x\n\n\ntf_impl[lax.abs_p] = _abs\ntf_impl[lax.pow_p] = tf.math.pow\n\n\ndef _integer_pow(x, *, y: int, _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n # Follows the implementation in lax._integer_pow_translation_rule\n if y == 0:\n return tf.broadcast_to(\n tf.constant(1, dtype=x.dtype, shape=()), _eval_shape(_out_aval.shape))\n is_reciprocal = y < 0\n if is_reciprocal:\n y = -y\n acc = None\n while y > 0:\n if y & 1:\n acc = x if acc is None else tf.math.multiply(acc, x)\n y >>= 1\n if y > 0:\n x = tf.math.multiply(x, x)\n return tf.math.reciprocal(acc) if is_reciprocal else acc\n\n\ntf_impl_with_avals[lax.integer_pow_p] = _integer_pow\ntf_impl[lax.exp_p] = tf.math.exp\ntf_impl[lax.expm1_p] = tf.math.expm1\ntf_impl[lax.log_p] = tf.math.log\ntf_impl[lax.log1p_p] = tf.math.log1p\ntf_impl[lax.tan_p] = tf.math.tan\ntf_impl[lax.tanh_p] = tf.math.tanh\ntf_impl[lax.sin_p] = tf.math.sin\ntf_impl[lax.sinh_p] = tf.math.sinh\ntf_impl[lax.cos_p] = tf.math.cos\ntf_impl[lax.cosh_p] = tf.math.cosh\ntf_impl_with_avals[lax.acos_p] = _convert_jax_impl(\n lax_internal.acos_translation_rule, multiple_results=False)\ntf_impl_with_avals[lax.asin_p] = _convert_jax_impl(\n lax_internal.asin_translation_rule, multiple_results=False)\ntf_impl_with_avals[lax.atan_p] = _convert_jax_impl(\n lax_internal.atan_translation_rule, multiple_results=False)\n\ndef _atan2(y, x, **kwargs):\n if x.dtype.is_complex or y.dtype.is_complex:\n complex_component_dtype = {\n tf.complex64: tf.float32,\n tf.complex128: tf.float64\n }.get(y.dtype)\n zero = tf.constant(0, complex_component_dtype)\n one = tf.constant(1, complex_component_dtype)\n i = tf.complex(zero, one)\n return -i * tf.math.log((x + i * y)/tf.math.sqrt(x * x + y * y))\n else:\n return tf.math.atan2(y, x)\n\n\ntf_impl[lax.atan2_p] = _atan2\ntf_impl[lax.acosh_p] = tf.math.acosh\ntf_impl[lax.atanh_p] = tf.math.atanh\ntf_impl[lax.asinh_p] = tf.math.asinh\n\ntf_impl[lax.sqrt_p] = tf.math.sqrt\ntf_impl[lax.rsqrt_p] = tf.math.rsqrt\n\ndef _cbrt(x):\n return tf.math.sign(x) * tf.math.pow(tf.math.abs(x), 1/3)\n\ntf_impl[lax.cbrt_p] = _cbrt\n\ntf_impl[lax.lgamma_p] = tf.math.lgamma\ntf_impl[lax.digamma_p] = tf.math.digamma\ntf_impl[lax.igamma_p] = tf.math.igamma\ntf_impl[lax.igammac_p] = tf.math.igammac\ntf_impl[lax.regularized_incomplete_beta_p] = tf.math.betainc\ntf_impl[lax.erf_p] = tf.math.erf\ntf_impl[lax.erfc_p] = tf.math.erfc\ntf_impl[lax.erf_inv_p] = tf.math.erfinv\ntf_impl[lax.bessel_i0e_p] = tf.math.bessel_i0e\ntf_impl[lax.bessel_i1e_p] = tf.math.bessel_i1e\n\ntf_impl[lax.complex_p] = tf.complex\n\n\ndef _conj(x, **kwargs):\n # The only dtypes that are allowed are: float32, float64, complex64, and\n # complex128.\n if x.dtype == tf.float32:\n return tf.cast(x, tf.complex64)\n elif x.dtype == tf.float64:\n return tf.cast(x, tf.complex128)\n else:\n return tf.math.conj(x)\n\n\ntf_impl[lax.conj_p] = _conj\ntf_impl[lax.real_p] = tf.math.real\ntf_impl[lax.imag_p] = tf.math.imag\n\ntf_impl[lax.add_p] = _add\ntf_impl[lax.sub_p] = tf.math.subtract\ntf_impl[lax.mul_p] = tf.math.multiply\n\n\ndef _iota(*, dtype, shape, dimension):\n dtype = _to_tf_dtype(dtype)\n # Some dtypes are unsupported, like uint32, so we just fall back to int32.\n # TODO(mattjj, necula): improve tf.range dtype handling\n shape_tf = _eval_shape(shape)\n vec = tf.range(tf.cast(shape_tf[dimension], tf.int32), dtype=tf.int32)\n vec_shape = [-1 if i == dimension else 1 for i in range(len(shape))]\n return tf.cast(tf.broadcast_to(tf.reshape(vec, vec_shape), shape_tf), dtype)\n\n\ntf_impl[lax.iota_p] = _iota\n\n\ndef _div(lhs, rhs):\n if lhs.dtype.is_integer:\n quotient = tf.math.floordiv(lhs, rhs)\n select = tf.math.logical_and(\n tf.not_equal(_sign(lhs), _sign(rhs)),\n tf.not_equal(tf.math.floormod(lhs, rhs), 0))\n return tf.where(select, quotient + 1, quotient)\n else:\n return tf.math.truediv(lhs, rhs)\n\n\ndef _rem(lhs, rhs):\n return _sign(lhs) * tf.math.floormod(_abs(lhs), _abs(rhs))\n\n\ntf_impl[lax.div_p] = _div\ntf_impl[lax.rem_p] = _rem\n\n\ndef _minmax(x: TfVal, y: TfVal, *, is_min: bool,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray,) -> TfVal:\n # For complex numbers use lexicographic ordering, like JAX\n if dtypes.issubdtype(x.dtype.as_numpy_dtype, np.complexfloating):\n return _convert_jax_impl(\n partial(lax_internal._minmax_complex_lowering,\n lax_cmp_pick_x=lax.lt if is_min else lax.gt),\n multiple_results=False)(x, y, _in_avals=_in_avals, _out_aval=_out_aval)\n elif x.dtype.as_numpy_dtype == np.bool_:\n return (tf.math.logical_and if is_min else tf.math.logical_or)(x, y)\n else:\n return (tf.math.minimum if is_min else tf.math.maximum)(x, y)\n\ndef _minmax_scalar(x: TfVal, y: TfVal, *, is_min: bool) -> TfVal:\n # For reducers we will need min/max for scalars only. In that case we\n # can construct the AbstractValues outselves, even in the presence of\n # shape polymorphism.\n assert len(x.shape) == 0 and len(y.shape) == 0, f\"x: {x.shape}, y: {y.shape}\"\n aval = core.ShapedArray((), _to_jax_dtype(x.dtype))\n return _minmax(x, y, is_min=is_min,\n _in_avals=[aval, aval], _out_aval=aval)\n\ntf_impl_with_avals[lax.max_p] = partial(_minmax, is_min=False)\ntf_impl_with_avals[lax.min_p] = partial(_minmax, is_min=True)\n\n# Map from TF signed types to TF unsigned types.\n_SIGNED_TO_UNSIGNED_TABLE = {\n tf.int8: tf.uint8,\n tf.int16: tf.uint16,\n tf.int32: tf.uint32,\n tf.int64: tf.uint64,\n}\n\n# Map from TF unsigned types to TF signed types.\n_UNSIGNED_TO_SIGNED_TABLE = {u: s for s, u in _SIGNED_TO_UNSIGNED_TABLE.items()}\n\n\n# Note: Bitwise operations only yield identical results on unsigned integers!\n# pylint: disable=protected-access\ndef _shift_right_arithmetic_raw(x, y):\n if x.dtype.is_unsigned:\n assert x.dtype == y.dtype\n orig_dtype = x.dtype\n signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[orig_dtype]\n x = tf.cast(x, signed_dtype)\n y = tf.cast(y, signed_dtype)\n res = tf.bitwise.right_shift(x, y)\n return tf.cast(res, orig_dtype)\n else:\n return tf.bitwise.right_shift(x, y)\n\n\ndef _shift_right_arithmetic(x, y):\n # TF shift is \"implementation defined\" if the shift amount is negative\n # or larger or equal to the size of the value. We implement the XLA\n # semantics to return the shift by the max value (x_bits - 1).\n # TODO: it is likely better to add XlaOps for shifts\n x_bits = 8 * x.dtype.size\n clamp_y = tf.where(_shift_in_bounds(x, y), y, x_bits - 1)\n return _shift_right_arithmetic_raw(x, clamp_y)\n\n\ntf_impl[lax.shift_right_arithmetic_p] = _shift_right_arithmetic\n\n\ndef _shift_right_logical_raw(x, y):\n if x.dtype.is_unsigned:\n return tf.bitwise.right_shift(x, y)\n else:\n assert x.dtype == y.dtype\n orig_dtype = x.dtype\n unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[orig_dtype]\n x = tf.cast(x, unsigned_dtype)\n y = tf.cast(y, unsigned_dtype)\n res = tf.bitwise.right_shift(x, y)\n return tf.cast(res, orig_dtype)\n\n\ndef _shift_right_logical(x, y):\n # TF shift is \"implementation defined\" if the shift amount is negative\n # or larger or equal to the size of the value. We implement the XLA semantics\n # to return 0.\n # TODO: it is likely better to add XlaOps for shifts\n return tf.where(\n _shift_in_bounds(x, y), _shift_right_logical_raw(x, y), tf.zeros_like(x))\n\n\ntf_impl[lax.shift_right_logical_p] = _shift_right_logical\n\n\ndef _shift_left(x, y):\n # TF shift is \"implementation defined\" if the shift amount is negative\n # or larger or equal to the size of the value. We implement the XLA semantics\n # to return 0.\n # TODO: it is likely better to add XlaOps for shifts\n return tf.where(\n _shift_in_bounds(x, y), tf.bitwise.left_shift(x, y), tf.zeros_like(x))\n\n\ntf_impl[lax.shift_left_p] = _shift_left\n\n\ndef _shift_in_bounds(x: TfVal, y: TfVal) -> TfVal:\n # Return the TF expression for when y is within bounds (0 <= y < |x|)\n x_bits = 8 * x.dtype.size\n # TF does not have comparisons for uint16 and uint32 (despite what the\n # documentation says)\n y_comp = tf.cast(\n y, _UNSIGNED_TO_SIGNED_TABLE[y.dtype]) if y.dtype.is_unsigned else y\n y_lt_x_bits = tf.math.less(y_comp, x_bits)\n y_ge_0 = tf.math.greater_equal(y_comp, 0)\n return tf.logical_and(y_lt_x_bits, y_ge_0)\n\n\ndef _not(x):\n \"\"\"Computes bitwise not with support for booleans.\n\n Numpy and JAX support bitwise not for booleans by applying a logical not!\n This means that applying bitwise_not yields an unexpected result:\n jnp.bitwise_not(jnp.array([True, False]))\n >> DeviceArray([False, True], dtype=bool)\n\n if you assume that booleans are simply casted to integers.\n jnp.bitwise_not(jnp.array([True, False]).astype(np.int32)).astype(bool)\n >> DeviceArray([True, True], dtype=bool)\n \"\"\"\n if x.dtype == tf.bool:\n return tf.logical_not(x)\n else:\n return tf.bitwise.invert(x)\n\n\ntf_impl[lax.not_p] = _not\n\n\ndef bool_to_int8(f, argnums: Sequence[int]):\n \"\"\"Computes functions with some bool args and bool results using int8.\n\n This is needed because some TF ops do not work for bool args, e.g.,\n inequalities, min/max.\n\n Args:\n f: a TF callable to wrap. It will be called with non-boolean arguments.\n argnums: the positional arguments that may be booleans.\n\n Returns: a TF callable that can take a mix of boolean positional arguments\n (in the positions specified by `argnums`) and some non-boolean positional\n arguments. If there are no boolean arguments, just calls `f`. Otherwise,\n casts the boolean arguments to `int8`, calls `f`, then casts the result to\n `bool`.\n \"\"\"\n argnums = tf.nest.flatten(argnums)\n\n def wrapper(*args: TfVal, **kwargs):\n argnum_types = {args[i].dtype for i in argnums}\n if tf.bool not in argnum_types:\n return f(*args, **kwargs)\n else:\n # All argnums should be boolean\n assert len(argnum_types) == 1, argnum_types\n args_cast = [(tf.cast(a, tf.int8) if i in argnums else a)\n for i, a in enumerate(args)]\n if \"_in_avals\" in kwargs:\n\n def cast_aval(aval):\n assert aval.dtype == np.bool_\n return core.ShapedArray(aval.shape, np.int8)\n\n _in_avals_cast = [\n cast_aval(aval) if i in argnums else aval\n for i, aval in enumerate(kwargs[\"_in_avals\"])\n ]\n _out_aval_cast = tf.nest.map_structure(cast_aval, kwargs[\"_out_aval\"])\n kwargs = dict(\n kwargs, _in_avals=_in_avals_cast, _out_aval=_out_aval_cast)\n out = f(*args_cast, **kwargs)\n return tf.nest.map_structure(lambda o: tf.cast(o, tf.bool), out)\n\n return wrapper\n\n\ntf_impl[lax.or_p] = bool_to_int8(tf.bitwise.bitwise_or, argnums=(0, 1))\ntf_impl[lax.and_p] = bool_to_int8(tf.bitwise.bitwise_and, argnums=(0, 1))\ntf_impl[lax.xor_p] = bool_to_int8(tf.bitwise.bitwise_xor, argnums=(0, 1))\n\ntf_impl[lax.eq_p] = tf.math.equal\ntf_impl[lax.ne_p] = tf.math.not_equal\n\ntf_impl[lax.ge_p] = bool_to_int8(tf.math.greater_equal, argnums=(0, 1))\ntf_impl[lax.gt_p] = bool_to_int8(tf.math.greater, argnums=(0, 1))\ntf_impl[lax.le_p] = bool_to_int8(tf.math.less_equal, argnums=(0, 1))\ntf_impl[lax.lt_p] = bool_to_int8(tf.math.less, argnums=(0, 1))\n\ntf_impl[lax.linalg.cholesky_p] = tf.linalg.cholesky\n\n\ndef _convert_element_type(operand, *, new_dtype, weak_type=False):\n old_dtype = operand.dtype.as_numpy_dtype\n if (dtypes.issubdtype(old_dtype, np.complexfloating) and\n not dtypes.issubdtype(new_dtype, np.complexfloating)):\n operand = tf.math.real(operand)\n if (dtypes.issubdtype(old_dtype, np.floating) and\n not (dtypes.issubdtype(new_dtype, np.floating) or dtypes.issubdtype(\n new_dtype, np.complexfloating) or new_dtype == np.bool_)):\n sign = _sign(operand)\n operand = sign * tf.math.floor(sign * operand)\n return tf.dtypes.cast(operand, _to_tf_dtype(new_dtype))\n\n\ntf_impl[lax.convert_element_type_p] = _convert_element_type\n\n\ndef _bitcast_convert_type(operand, new_dtype):\n if operand.dtype == new_dtype:\n return operand\n return tf.bitcast(operand, _to_tf_dtype(new_dtype))\n\n\ntf_impl[lax.bitcast_convert_type_p] = _bitcast_convert_type\n\n\ndef _clamp(minval, operand, maxval, *, _in_avals, _out_aval):\n # The below permits mirroring the behavior of JAX when maxval < minval\n op_shape_tf_val = _eval_shape(_in_avals[1].shape)\n maxval = tf.broadcast_to(maxval, op_shape_tf_val)\n minval = tf.math.minimum(tf.broadcast_to(minval, op_shape_tf_val), maxval)\n return tf.clip_by_value(operand, minval, maxval)\n\n\ntf_impl_with_avals[lax.clamp_p] = _clamp\n\n\ndef _concatenate(*operands, dimension):\n return tf.concat(operands, axis=dimension)\n\n\ntf_impl[lax.concatenate_p] = _concatenate\n\n\ndef _conv_general_dimension_numbers_proto(dimension_numbers):\n \"\"\"Converts a ConvDimensionNumbers to an XLA ConvolutionDimensionNumbers.\"\"\"\n assert isinstance(dimension_numbers, lax.ConvDimensionNumbers)\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n proto = xla_data_pb2.ConvolutionDimensionNumbers()\n proto.input_batch_dimension = lhs_spec[0]\n proto.input_feature_dimension = lhs_spec[1]\n proto.output_batch_dimension = out_spec[0]\n proto.output_feature_dimension = out_spec[1]\n proto.kernel_output_feature_dimension = rhs_spec[0]\n proto.kernel_input_feature_dimension = rhs_spec[1]\n proto.input_spatial_dimensions.extend(lhs_spec[2:])\n proto.kernel_spatial_dimensions.extend(rhs_spec[2:])\n proto.output_spatial_dimensions.extend(out_spec[2:])\n return proto\n\n\ndef _precision_config_proto(precision: Optional[Tuple[PrecisionType,\n PrecisionType]]):\n \"\"\"Convert an integer to an XLA.PrecisionConfig.\"\"\"\n if precision is None:\n return None\n\n proto = xla_data_pb2.PrecisionConfig()\n proto.operand_precision.append(int(precision[0]))\n proto.operand_precision.append(int(precision[1]))\n return proto\n\n\ndef _conv_general_dilated(lhs, rhs, *,\n window_strides, padding, lhs_dilation,\n rhs_dilation,\n dimension_numbers: lax.ConvDimensionNumbers,\n feature_group_count: int,\n batch_group_count: int,\n lhs_shape: Sequence[int],\n rhs_shape: Sequence[int],\n precision: Optional[Tuple[PrecisionType, PrecisionType]],\n preferred_element_type: Optional[DType],\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n \"\"\"Implementation of lax.conv_general_dilated_p using XlaConv.\"\"\"\n out_tf_shape = _aval_to_tf_shape(_out_aval)\n dnums_proto = _conv_general_dimension_numbers_proto(dimension_numbers)\n precision_config_proto = _precision_config_proto(precision)\n\n def gen_conv(lhs, rhs, preferred_element_type: Optional[DType]):\n out = tfxla.conv(\n lhs,\n rhs,\n window_strides,\n padding,\n lhs_dilation,\n rhs_dilation,\n dnums_proto,\n feature_group_count=feature_group_count,\n batch_group_count=batch_group_count,\n precision_config=precision_config_proto,\n preferred_element_type=preferred_element_type,\n use_v2=True)\n # TODO: implement shape inference for XlaConv\n out.set_shape(out_tf_shape)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n # Follow the lowering for complex convolutions from\n # lax._conv_general_dilated_translation. We can use the same conversion on all\n # platforms because on XLA:TPU the compiler does the same as a rewrite.\n preferred_float_et: Optional[Any]\n if np.issubdtype(_in_avals[0].dtype, np.complexfloating):\n if preferred_element_type is not None:\n # Convert complex dtype to types used for real and imaginary parts\n assert np.issubdtype(preferred_element_type, np.complexfloating)\n preferred_float_et = (\n np.float64 if preferred_element_type == np.complex128 else np.float32)\n else:\n preferred_float_et = None\n lhs_real, lhs_imag = tf.math.real(lhs), tf.math.imag(lhs)\n rhs_real, rhs_imag = tf.math.real(rhs), tf.math.imag(rhs)\n k1 = gen_conv(_add(lhs_real, lhs_imag), rhs_real, preferred_float_et)\n k2 = gen_conv(lhs_real, tf.math.subtract(rhs_imag, rhs_real),\n preferred_float_et)\n k3 = gen_conv(lhs_imag, _add(rhs_real, rhs_imag), preferred_float_et)\n return tf.complex(tf.math.subtract(k1, k3), _add(k1, k2))\n else:\n return gen_conv(lhs, rhs, preferred_element_type)\n\n\ntf_impl_with_avals[lax.conv_general_dilated_p] = _conv_general_dilated\n\n\ndef _dot_general(lhs, rhs, *, dimension_numbers,\n precision: Optional[Tuple[PrecisionType, PrecisionType]],\n preferred_element_type: Optional[DType],\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n \"\"\"Implementation of lax.dot_general_p in terms of tf.linalg.einsum.\"\"\"\n (lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers\n dnums_proto = xla_data_pb2.DotDimensionNumbers()\n dnums_proto.lhs_contracting_dimensions.extend(lhs_contracting)\n dnums_proto.rhs_contracting_dimensions.extend(rhs_contracting)\n dnums_proto.lhs_batch_dimensions.extend(lhs_batch)\n dnums_proto.rhs_batch_dimensions.extend(rhs_batch)\n precision_config_proto = _precision_config_proto(precision)\n res = tfxla.dot_general(\n lhs,\n rhs,\n dnums_proto,\n precision_config_proto,\n preferred_element_type=preferred_element_type,\n use_v2=True)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n res = tf.stop_gradient(res) # See #7839\n return res\n\n\ntf_impl_with_avals[lax.dot_general_p] = _dot_general\n\n\ndef _broadcast_in_dim(operand, *, shape, broadcast_dimensions,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n # for i in range(len(operand.shape)):\n # result.shape[bcast_dims[i]] <- operand.shape[i]\n # bcast_dims must be strictly increasing.\n # len(bcast_dims) == len(operand.shape)\n op_shape = _in_avals[0].shape\n add_1s_shape = [1] * len(shape)\n for i, broadcast_dim_i in enumerate(broadcast_dimensions):\n add_1s_shape[broadcast_dim_i] = op_shape[i]\n with_1s = tf.reshape(operand, _eval_shape(add_1s_shape))\n return tf.broadcast_to(with_1s, _eval_shape(shape))\n\n\ntf_impl_with_avals[lax.broadcast_in_dim_p] = _broadcast_in_dim\n\n\ndef _reshape(operand, *, new_sizes, dimensions):\n if dimensions is None:\n dimensions = tf.range(tf.rank(operand))\n new_sizes_tf = _eval_shape(new_sizes)\n return tf.reshape(tf.transpose(operand, dimensions), new_sizes_tf)\n\n\ntf_impl[lax.reshape_p] = _reshape\n\n\ndef _squeeze(operand, *, dimensions, _in_avals, _out_aval):\n op_shape = _in_avals[0].shape\n new_shape = tuple(d for i, d in enumerate(op_shape) if i not in dimensions)\n new_shape_tf = _eval_shape(new_shape)\n return tf.reshape(operand, new_shape_tf)\n\n\ntf_impl_with_avals[lax.squeeze_p] = _squeeze\n\n\ndef _pad(operand, padding_value, *, padding_config,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n low, high, interior = util.unzip3(padding_config)\n out = tfxla.pad(operand, padding_value, low, high, interior)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.pad_p] = _pad\n\n\ndef _rev(operand, *, dimensions):\n return tf.reverse(operand, dimensions)\n\n\ntf_impl[lax.rev_p] = _rev\n\ntf_impl[lax.select_p] = tf.where\n\n\ndef _transpose(operand, *, permutation):\n return tf.transpose(operand, perm=permutation)\n\n\ntf_impl[lax.transpose_p] = _transpose\n\naxes_to_axis = lambda func: lambda operand, axes: func(operand, axis=axes)\n\n# reduce_sum and reduce_prod are not supported for bool\ntf_impl[lax.reduce_sum_p] = axes_to_axis(tf.reduce_sum)\ntf_impl[lax.reduce_prod_p] = axes_to_axis(tf.reduce_prod)\ntf_impl[lax.reduce_max_p] = (\n bool_to_int8(axes_to_axis(tf.reduce_max), argnums=[0]))\ntf_impl[lax.reduce_min_p] = (\n bool_to_int8(axes_to_axis(tf.reduce_min), argnums=[0]))\ntf_impl[lax.reduce_or_p] = axes_to_axis(tf.reduce_any)\ntf_impl[lax.reduce_and_p] = axes_to_axis(tf.reduce_all)\n\n\ndef _argminmax(is_min: bool, operand: TfVal, axes: Sequence[int],\n index_dtype: DType,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n # Follow the JAX implementation, using a XlaReduce with a custom comparator\n if is_min:\n extra_name_stack = \"argmin\"\n value_comparator = lax.lt\n get_identity = lax_internal._get_min_identity\n else:\n extra_name_stack = \"argmax\"\n value_comparator = lax.gt\n get_identity = lax_internal._get_max_identity\n\n res = _convert_jax_impl(\n partial(lax_internal._compute_argminmax, value_comparator, get_identity),\n multiple_results=False,\n extra_name_stack=extra_name_stack)(\n operand,\n index_dtype=index_dtype,\n axes=axes,\n _in_avals=_in_avals,\n _out_aval=_out_aval)\n return res\n\n\ntf_impl_with_avals[lax.argmin_p] = partial(_argminmax, True)\ntf_impl_with_avals[lax.argmax_p] = partial(_argminmax, False)\n\n\n_add_fn = tf.function(_add, autograph=False)\n_ge_fn = tf.function(tf.math.greater_equal, autograph=False)\n\n\ndef _select_and_gather_add(\n tangents: TfVal, operand: TfVal, select_prim: core.Primitive,\n window_dimensions: Sequence[int], window_strides: Sequence[int],\n base_dilation: Sequence[int], window_dilation: Sequence[int],\n padding: Sequence[Tuple[int, int]], _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n # Note: this function follows the pattern in\n # jax.lax._select_and_gather_add_translation.\n dtype = operand.dtype\n nbits = dtypes.finfo(dtype.as_numpy_dtype).bits\n\n # Specializing the function for 64 bits. Only up to 32 bits are supported on TPU,\n # we thus intend to let the code throw a different exception on this platform.\n max_bits = 64\n\n assert nbits <= max_bits\n double_word_reduction = nbits * 2 <= max_bits\n\n const = lambda dtype, x: tf.constant(np.array(x), dtype)\n\n if double_word_reduction:\n word_dtype = lax_internal._UINT_DTYPES[nbits]\n double_word_dtype = lax_internal._UINT_DTYPES[nbits * 2]\n\n # Packs two values into a tuple.\n def pack(a, b):\n a = _bitcast_convert_type(a, word_dtype)\n b = _bitcast_convert_type(b, word_dtype)\n a = _convert_element_type(a, new_dtype=double_word_dtype)\n b = _convert_element_type(b, new_dtype=double_word_dtype)\n a = tf.bitwise.left_shift(a, const(double_word_dtype, nbits))\n return tf.bitwise.bitwise_or(a, b)\n\n # Unpacks the first element of a tuple.\n def fst(t):\n assert t.dtype == double_word_dtype\n st = _shift_right_logical(t, const(double_word_dtype, nbits))\n return _bitcast_convert_type(\n _convert_element_type(st, new_dtype=word_dtype), dtype)\n\n # Unpacks the second element of a tuple.\n def snd(t):\n return _bitcast_convert_type(\n _convert_element_type(t, new_dtype=word_dtype), dtype)\n\n else:\n raise NotImplementedError(\n f\"TODO: need to pack {nbits * 2} bits but this platform can only go up to {max_bits} bits.\"\n )\n\n assert select_prim is lax.ge_p or select_prim is lax.le_p, select_prim\n\n def reducer(x, y):\n which = tf_impl[select_prim]\n return tf_impl[lax.select_p](which(fst(x), fst(y)), x=x, y=y)\n\n init = -np.inf if select_prim is lax.ge_p else np.inf\n init_identity = lambda x: pack(const(dtype, init), const(dtype, 0))\n\n out = _specialized_reduce_window(\n reducer,\n init_identity,\n pack(operand, tangents),\n window_dimensions=window_dimensions,\n window_strides=window_strides,\n padding=padding,\n base_dilation=base_dilation,\n window_dilation=window_dilation,\n _in_avals=_in_avals,\n _out_aval=_out_aval)\n\n return snd(out)\n\n\ntf_impl_with_avals[lax.select_and_gather_add_p] = _select_and_gather_add\n\n\ndef _get_shape_from_tensor_or_array(x):\n if isinstance(x.shape, tf.TensorShape):\n return tuple(x.shape.as_list())\n return tuple(x.shape)\n\n\ndef _common_reduce_window(operand, init_val, reducer, window_dimensions,\n window_strides, padding, base_dilation,\n window_dilation, _in_avals, _out_aval):\n o_spec = tf.TensorSpec((), dtype=operand.dtype)\n reducer_fn = tf.function(\n reducer, autograph=False).get_concrete_function(o_spec, o_spec)\n\n if not isinstance(init_val, (tf.Tensor, tf.Variable)):\n init_val = tf.constant(init_val, operand.dtype)\n out = tfxla.reduce_window(\n operand,\n init_val,\n reducer_fn,\n window_dimensions,\n window_strides,\n base_dilations=base_dilation,\n window_dilations=window_dilation,\n padding=padding)\n # TODO: implement shape inference for XlaReduceWindow\n out.set_shape(_aval_to_tf_shape(_out_aval))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ndef _reduce_window(*args, jaxpr, consts, window_dimensions,\n window_strides, padding, base_dilation, window_dilation,\n _in_avals, _out_aval):\n \"\"\"TensorFlow implementation of reduce_window.\n\n Args:\n operands: N dimensional arrays containing elements of type T\n init_values: starting values of the reduction\n jaxpr: the jaxpr corresponding to the reduction function\n consts: the constants associated with jaxpr.\n window_dimensions: array of integers for window dimension values\n window_strides: array of integers for window stride values\n padding: array of pairs of integers for padding values\n base_dilation: array of integers for base dilation values\n window_dilation: array of integers for window dilation values\n\n Returns:\n The reduced operand.\n \"\"\"\n assert len(consts) == 0, \"Reduction computation cannot have constants\"\n operands, init_values = util.split_list(args, [len(args) // 2])\n\n if len(operands) != 1:\n raise NotImplementedError(\"jax2tf does not support variadic reduce_window\")\n\n def reducer(arg1: TfVal, arg2: TfVal) -> TfVal:\n closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)\n res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2, extra_name_stack=None)\n return res\n\n return (_common_reduce_window(operands[0], init_values[0], reducer,\n window_dimensions, window_strides, padding,\n base_dilation, window_dilation, _in_avals,\n _out_aval[0]),)\n\n\n\ndef _specialized_reduce_window(reducer,\n identity,\n operand,\n *,\n window_dimensions,\n window_strides,\n padding,\n base_dilation,\n window_dilation,\n _in_avals,\n _out_aval,\n name=None):\n \"\"\"Wraps the TensorFlow reduce window operation based on a reducer and an\n\n identity function defining the initial value of the reduction depending on\n the dtype of the operand.\n\n Args:\n reducer: reduction function of type TfVal -> TfVal -> TfVal\n identity: function that takes a TensorFlow dtype as a parameter and returns\n the starting value of the reduction.\n operand: N dimensional array containing elements of type T\n window_dimensions: array of integers for window dimension values\n window_strides: array of integers for window stride values\n padding: array of pairs of integers for padding values\n base_dilation: array of integers for base dilation values\n window_dilation: array of integers for window dilation values\n name: the name of the specialized reduce window primitive for which this\n conversion function is called. This information may help to choose a\n different conversion path (optional)\n\n Returns:\n The reduced operand.\n \"\"\"\n return _common_reduce_window(operand, identity(operand.dtype), reducer,\n window_dimensions, window_strides, padding,\n base_dilation, window_dilation, _in_avals,\n _out_aval)\n\n\ndef _get_max_identity(tf_dtype):\n numpy_tf_dtype = tf_dtype.as_numpy_dtype\n if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):\n return numpy_tf_dtype(-np.inf)\n elif dtypes.issubdtype(numpy_tf_dtype, np.integer):\n return dtypes.iinfo(numpy_tf_dtype).min\n else:\n assert dtypes.issubdtype(\n numpy_tf_dtype, np.bool_), (f\"{tf_dtype} has no defined max identity\")\n return False\n\n\ndef _get_min_identity(tf_dtype):\n numpy_tf_dtype = tf_dtype.as_numpy_dtype\n if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):\n return numpy_tf_dtype(np.inf)\n elif dtypes.issubdtype(numpy_tf_dtype, np.integer):\n return dtypes.iinfo(numpy_tf_dtype).max\n else:\n assert dtypes.issubdtype(\n numpy_tf_dtype, np.bool_), (f\"{tf_dtype} has no defined min identity\")\n return True\n\n\n# pylint: disable=protected-access\ntf_impl_with_avals[lax.reduce_window_sum_p] = (\n partial(_specialized_reduce_window, _add, lambda x: 0,\n name=\"reduce_window_sum\"))\ntf_impl_with_avals[lax.reduce_window_min_p] = (\n partial(_specialized_reduce_window,\n partial(_minmax_scalar, is_min=True),\n _get_min_identity,\n name=\"reduce_window_min\"))\ntf_impl_with_avals[lax.reduce_window_max_p] = (\n partial(_specialized_reduce_window,\n partial(_minmax_scalar, is_min=False),\n _get_max_identity,\n name=\"reduce_window_max\"))\ntf_impl_with_avals[lax.reduce_window_p] = _reduce_window\n# pylint: enable=protected-access\n\ndef _reduce(*operands: TfVal,\n computation: Callable,\n jaxpr: core.Jaxpr,\n consts: Sequence[Any],\n dimensions: Sequence[int],\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray) -> Sequence[TfVal]:\n del computation\n assert not consts\n assert len(operands) % 2 == 0\n # operands: op1, op2, ..., init_val1, init_val2, ...\n # reducer takes op1[i], op2[i], ..., init_val1, init_val2, ...\n nr_operands = len(operands) // 2\n init_vals = operands[nr_operands:]\n operands = operands[0:nr_operands]\n\n reducer_arg_spec = tuple([tf.TensorSpec((), op.dtype) for op in init_vals] * 2)\n\n def reducer_computation(*args: TfVal) -> TfVal:\n closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)\n res = _interpret_jaxpr(closed_jaxpr, *args, extra_name_stack=None)\n return res\n\n xla_reducer_computation = (\n tf.function(reducer_computation,\n autograph=False).get_concrete_function(*reducer_arg_spec))\n\n outs = tfxla.variadic_reduce(operands, init_vals,\n dimensions_to_reduce=dimensions,\n reducer=xla_reducer_computation)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n outs = tuple(tf.stop_gradient(out) for out in outs) # See #7839\n return outs\n\ntf_impl_with_avals[lax.reduce_p] = _reduce\n\n\n# We use lax._cumred_tpu_translation_rule to convert cummax,\n# cummin, cumsum and cumprod. This is efficient on TPU, but the complexity is\n# O(n^2) on other backends. This may be implemented using associative_scan\n# instead to favor different backends.\ndef _cumred(lax_reduce_fn: Callable,\n lax_reduce_window_fn: Callable,\n extra_name_stack: str):\n if config.jax2tf_associative_scan_reductions:\n return _convert_jax_impl(partial(lax_control_flow.associative_scan,\n lax_reduce_fn),\n multiple_results=False,\n extra_name_stack=extra_name_stack)\n else:\n return _convert_jax_impl(partial(lax_control_flow._cumred_tpu_translation_rule,\n lax_reduce_window_fn),\n multiple_results=False,\n extra_name_stack=extra_name_stack)\n\n\ntf_impl_with_avals[lax.cummax_p] = _cumred(lax_reduce_window_fn=lax._reduce_window_max,\n lax_reduce_fn=lax.max,\n extra_name_stack=\"cummax\")\ntf_impl_with_avals[lax.cummin_p] = _cumred(lax_reduce_window_fn=lax._reduce_window_min,\n lax_reduce_fn=lax.min,\n extra_name_stack=\"cummin\")\ntf_impl_with_avals[lax.cumsum_p] = _cumred(lax_reduce_window_fn=lax._reduce_window_sum,\n lax_reduce_fn=lax.add,\n extra_name_stack=\"cumsum\")\ntf_impl_with_avals[lax.cumprod_p] = _cumred(lax_reduce_window_fn=lax._reduce_window_prod,\n lax_reduce_fn=lax.mul,\n extra_name_stack=\"cumprod\")\n\n\ndef _select_and_scatter(operand, source, init_value, select_jaxpr,\n select_consts, scatter_jaxpr, scatter_consts,\n window_dimensions, window_strides, padding):\n raise NotImplementedError(\"TODO: jax2tf can not convert _select_and_scatter\")\n\n\ntf_impl[lax.select_and_scatter_p] = _select_and_scatter\n\n\n@partial(bool_to_int8, argnums=(0, 1))\ndef _select_and_scatter_add(source, operand, *, select_prim, window_dimensions,\n window_strides, padding, _in_avals, _out_aval):\n init_value = tf.zeros((), operand.dtype)\n select_fn = (\n tf.function(tf_impl[select_prim], autograph=False).get_concrete_function(\n init_value, init_value))\n scatter_fn = _add_fn.get_concrete_function(init_value, init_value)\n out = tfxla.select_and_scatter(operand, window_dimensions, window_strides,\n padding, source, init_value, select_fn,\n scatter_fn)\n out.set_shape(_aval_to_tf_shape(_out_aval))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.select_and_scatter_add_p] = _select_and_scatter_add\n\n\ndef _threefry2x32_jax_impl(*args: TfVal, _in_avals, _out_aval):\n res = _convert_jax_impl(\n partial(jax._src.prng._threefry2x32_lowering, use_rolled_loops=False),\n multiple_results=True, extra_name_stack=\"threefry\")(\n *args, _in_avals=_in_avals, _out_aval=_out_aval)\n return res\n\n\ntf_impl_with_avals[jax._src.prng.threefry2x32_p] = _threefry2x32_jax_impl\n\n# Use the vmap implementation, otherwise on TPU the performance is really bad\n# With use_vmap=True on, we get about the same performance for JAX and jax2tf.\ntf_impl_with_avals[random.random_gamma_p] = _convert_jax_impl(\n partial(jax._src.random._gamma_impl, use_vmap=True),\n multiple_results=False, extra_name_stack=\"random_gamma\")\n\n\ndef _rng_bit_generator(key: TfVal, *, shape, dtype, algorithm) -> Sequence[TfVal]:\n shape_tf = _eval_shape(shape)\n # JAX uses XLA algorithm enums; tfxla uses tf.random.Algorithm\n if algorithm == lax.RandomAlgorithm.RNG_THREE_FRY:\n algorithm_tf = tf.random.Algorithm.THREEFRY\n elif algorithm == lax.RandomAlgorithm.RNG_PHILOX:\n algorithm_tf = tf.random.Algorithm.PHILOX\n elif algorithm == lax.RandomAlgorithm.RNG_DEFAULT:\n algorithm_tf = tf.random.Algorithm.AUTO_SELECT\n else:\n assert False\n outs = tfxla.rng_bit_generator(algorithm_tf.value, key, shape_tf,\n dtype=_to_tf_dtype(dtype))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n outs = tuple(tf.stop_gradient(out) for out in outs) # See #7839\n return outs\n\n\ntf_impl[lax.rng_bit_generator_p] = _rng_bit_generator\n\n\ndef _rng_uniform(minval: TfVal, maxval: TfVal, *, shape) -> TfVal:\n shape_tf = _eval_shape(shape)\n return tf.random.uniform(shape_tf, minval=minval, maxval=maxval, dtype=minval.dtype)\n\ntf_impl[lax.rng_uniform_p] = _rng_uniform\n\n\ndef _gather_dimensions_proto(indices_shape, dimension_numbers):\n proto = xla_data_pb2.GatherDimensionNumbers()\n proto.offset_dims.extend(dimension_numbers.offset_dims)\n proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)\n proto.start_index_map.extend(dimension_numbers.start_index_map)\n assert indices_shape\n proto.index_vector_dim = len(indices_shape) - 1\n return proto\n\n\n@partial(bool_to_int8, argnums=[0])\ndef _gather(operand, start_indices, *, dimension_numbers, slice_sizes: core.Shape,\n indices_are_sorted, unique_indices, mode, fill_value,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n \"\"\"Tensorflow implementation of gather.\"\"\"\n if mode == lax.GatherScatterMode.FILL_OR_DROP:\n gather_fill_fn = _convert_jax_impl(lax_slicing._gather_fill,\n multiple_results=False)\n return gather_fill_fn(\n operand, start_indices, dimension_numbers=dimension_numbers,\n slice_sizes=slice_sizes, unique_indices=unique_indices,\n indices_are_sorted=indices_are_sorted, fill_value=fill_value,\n output_shape=_out_aval.shape, _in_avals=_in_avals, _out_aval=_out_aval)\n\n proto = _gather_dimensions_proto(start_indices.shape, dimension_numbers)\n slice_sizes_tf = _eval_shape(slice_sizes)\n out = tfxla.gather(operand, start_indices, proto, slice_sizes_tf,\n indices_are_sorted)\n out.set_shape(_aval_to_tf_shape(_out_aval))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.gather_p] = _gather\n\n\ndef _slice(operand, start_indices, limit_indices, strides, _in_avals,\n _out_aval):\n if strides is None:\n strides = [1] * len(start_indices)\n slices = tuple(\n map(slice, _eval_shape(start_indices), _eval_shape(limit_indices),\n _eval_shape(strides)))\n out = operand[slices]\n # TODO(b/184503314): improve shape inference for __getitem__\n # E.g., operand.shape=(b, 5, 3), start_indices=(0, 1, 1), limit_indices=(b, 5, 3), strides=(1, 2, 1)\n out.set_shape(_aval_to_tf_shape(_out_aval))\n return out\n\n\ntf_impl_with_avals[lax.slice_p] = _slice\n\n\ndef _dynamic_slice(operand, *start_indices, slice_sizes: core.Shape,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n start_indices = tf.stack(start_indices)\n slice_sizes_tf = _eval_shape(slice_sizes)\n\n res = tfxla.dynamic_slice(operand, start_indices, size_indices=slice_sizes_tf)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n res = tf.stop_gradient(res) # See #7839\n return res\n\n\ntf_impl_with_avals[lax.dynamic_slice_p] = _dynamic_slice\n\n\ndef _dynamic_update_slice(operand, update, *start_indices,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n out = tfxla.dynamic_update_slice(operand, update, tf.stack(start_indices))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.dynamic_update_slice_p] = _dynamic_update_slice\n\n\ndef _scatter_dimensions_proto(indices_shape, dimension_numbers):\n proto = xla_data_pb2.ScatterDimensionNumbers()\n proto.update_window_dims.extend(dimension_numbers.update_window_dims)\n proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)\n proto.scatter_dims_to_operand_dims.extend(\n dimension_numbers.scatter_dims_to_operand_dims)\n assert indices_shape\n proto.index_vector_dim = len(indices_shape) - 1\n return proto\n\n\ndef _scatter(operand, scatter_indices, updates, *, update_jaxpr, update_consts,\n dimension_numbers, indices_are_sorted, unique_indices, mode,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n del unique_indices\n\n if mode == lax.GatherScatterMode.CLIP:\n clip_fn = _convert_jax_impl(lax_slicing._clamp_scatter_indices,\n multiple_results=False)\n scatter_indices = clip_fn(\n operand, scatter_indices, updates, dnums=dimension_numbers,\n _in_avals=_in_avals, _out_aval=_in_avals[1])\n\n assert len(update_consts) == 0, \"Update computation cannot have constants\"\n\n proto = _scatter_dimensions_proto(scatter_indices.shape, dimension_numbers)\n\n def update_computation(arg1: TfVal, arg2: TfVal) -> TfVal:\n closed_jaxpr = core.ClosedJaxpr(update_jaxpr, update_consts)\n res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2, extra_name_stack=None)\n return res\n\n o_spec = tf.TensorSpec((), dtype=operand.dtype)\n xla_update_computation = (\n tf.function(update_computation,\n autograph=False).get_concrete_function(o_spec, o_spec))\n out = tfxla.scatter(\n operand,\n scatter_indices,\n updates,\n xla_update_computation,\n proto,\n indices_are_sorted=indices_are_sorted)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.scatter_p] = _scatter\ntf_impl_with_avals[lax.scatter_min_p] = _scatter\ntf_impl_with_avals[lax.scatter_max_p] = _scatter\ntf_impl_with_avals[lax.scatter_mul_p] = _scatter\ntf_impl_with_avals[lax.scatter_add_p] = _scatter\n\n\ndef _cond(index: TfVal, *operands: TfVal, branches: Sequence[core.ClosedJaxpr],\n linear: Sequence[bool]) -> Sequence[TfVal]:\n del linear\n # tf.cond needs lambdas with no arguments.\n branches_tf = [\n partial(_interpret_jaxpr, jaxpr, *operands,\n # Same name stack as the XLA translation of cond_p\n extra_name_stack=f\"branch_{i}_fun\")\n for jaxpr in branches\n for i, jaxpr in enumerate(branches)\n ]\n return tf.switch_case(index, branches_tf)\n\n\ntf_impl[lax.cond_p] = _cond\n\n\ndef _while(*args: TfVal, cond_nconsts: int, cond_jaxpr: core.ClosedJaxpr,\n body_nconsts: int, body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:\n cond_consts, body_consts, init_carry = util.split_list(\n args, [cond_nconsts, body_nconsts])\n if cond_jaxpr.out_avals[0].shape: # type: ignore[attr-defined]\n # The conditional is not a scalar, this must be a batched while\n return _batched_cond_while(\n *args,\n cond_nconsts=cond_nconsts,\n cond_jaxpr=cond_jaxpr,\n body_nconsts=body_nconsts,\n body_jaxpr=body_jaxpr)\n\n # The conditional must return a single value to TF\n def cond_tf_func(*args: TfVal) -> TfVal:\n pred, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *args,\n # Same name stack as the XLA translation of while_p\n extra_name_stack=\"while/cond\")\n return pred\n\n body_tf_func = partial(_interpret_jaxpr, body_jaxpr, *body_consts,\n extra_name_stack=\"while/body\")\n return tf.while_loop(cond_tf_func, body_tf_func, init_carry)\n\n\ndef _batched_cond_while(*args: TfVal, cond_nconsts: int,\n cond_jaxpr: core.ClosedJaxpr, body_nconsts: int,\n body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:\n \"\"\"Interprets a while_loop with a batched condition.\n\n A batched while has a conditional that returns a tensor of booleans, and\n a body that returns a list of tensors whose leading dimensions match those\n of the conditional tensor.\n\n We need to turn it into a while with scalar boolean conditional. We will\n expand the loop carry to include a prefix with the current tensor boolean\n condition. We prepend to the loop the first calculation of the tensor boolean\n condition. The loop condition will use a \"reduce_any\" to calculate a scalar\n boolean from the tensor boolean condition. The end of the loop body will\n compute the new carry using a \"tf.where\", and we compute the new tensor\n boolean condition.\n \"\"\"\n cond_consts, body_consts, init_carry = util.split_list(\n args, [cond_nconsts, body_nconsts])\n # Initial computation of batched condition\n init_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *init_carry,\n extra_name_stack=\"while/body_pred\")\n assert init_pred_b is not core.unit\n\n def new_cond_tf_func(pred_b: TfVal, *carry: TfVal) -> TfVal:\n pred = tf.reduce_any(pred_b, axis=list(range(len(pred_b.shape))))\n return pred\n\n def new_body_tf_func(pred_b: TfVal, *carry: TfVal) -> Sequence[TfVal]:\n new_carry: Sequence[TfVal] = _interpret_jaxpr(body_jaxpr, *body_consts,\n *carry,\n extra_name_stack=\"while/body\")\n # We repeat those carries for which the loop termination condition is false\n def select_one_carry(new_c: TfVal, c: TfVal, c_aval: core.ShapedArray) -> TfVal:\n pred_b_bcast = _broadcast_in_dim(\n pred_b,\n shape=c_aval.shape, # a JAX shape\n broadcast_dimensions=list(range(len(pred_b.shape))),\n _in_avals=cond_jaxpr.out_avals,\n _out_aval=core.ShapedArray(c_aval.shape, np.bool_))\n return tf.where(pred_b_bcast, new_c, c)\n\n selected_carry: Sequence[TfVal] = list(map(select_one_carry, new_carry, carry, body_jaxpr.out_avals))\n next_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *selected_carry,\n extra_name_stack=\"body_pred\")\n return (next_pred_b, *selected_carry)\n\n _, *res_carry = tf.while_loop(new_cond_tf_func, new_body_tf_func,\n (init_pred_b, *init_carry))\n return res_carry\n\n\ntf_impl[lax.while_p] = _while\n\n# We use the scan impl rule to rewrite in terms of while.\ntf_impl_with_avals[lax.scan_p] = _convert_jax_impl(\n lax_control_flow._scan_impl,\n extra_name_stack=\"scan\")\n\ntf_impl_with_avals[ad_checkpoint.remat_p] = \\\n _convert_jax_impl(partial(lax_control_flow._remat_translation_rule,\n # TODO: jax2tf cannot discriminate by platform\n platform=\"tpu\"),\n multiple_results=True,\n extra_name_stack=\"checkpoint\")\n\ndef _top_k(operand: TfVal, k: int) -> Tuple[TfVal, TfVal]:\n # Some types originally incompatible with tf.math.top_k can be promoted\n # to a compatible type without loss of precision.\n def promote_tf_dtype(tf_dtype):\n if tf_dtype in [tf.bool, tf.uint8, tf.uint16]:\n return tf.uint32\n if tf_dtype in [tf.int8, tf.int16]:\n return tf.int32\n if tf_dtype is tf.float16:\n return tf.float32\n return None\n\n conversion_dtype = promote_tf_dtype(operand.dtype)\n if conversion_dtype:\n values, indices = tf.math.top_k(\n tf.dtypes.cast(operand, conversion_dtype), k=k, sorted=True)\n return tf.dtypes.cast(values, operand.dtype), indices\n else:\n return tf.math.top_k(operand, k=k, sorted=True)\n\n\ntf_impl[lax.top_k_p] = _top_k\n\n\ndef _sort(*operands: TfVal, dimension: int, is_stable: bool,\n num_keys: int) -> Tuple[TfVal, ...]:\n assert 1 <= num_keys <= len(operands)\n assert 0 <= dimension < len(\n operands[0].shape\n ), f\"Invalid {dimension} for ndim {len(operands[0].shape)}\"\n\n comparator_spec: List[tf.TensorSpec] = []\n comparator_jax_in_avals: List[core.ShapedArray] = []\n for op in operands:\n o_spec = tf.TensorSpec((), dtype=op.dtype)\n comparator_spec.extend([o_spec, o_spec])\n o_aval = core.ShapedArray((), _to_jax_dtype(op.dtype))\n comparator_jax_in_avals.extend([o_aval, o_aval])\n\n # Use the same comparator that JAX uses when compiling to XLA, to get the\n # proper NaN/Inf total order, and the lexicographic ordering.\n # The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]\n # corresponding to two scalars from operand[k].\n def lexicographic_comparator(*tf_args: TfVal) -> TfVal:\n return _convert_jax_impl(\n lax_internal._sort_lt_comparator, multiple_results=False)(\n *tf_args,\n _in_avals=comparator_jax_in_avals,\n _out_aval=core.ShapedArray((), np.bool_),\n num_keys=num_keys)\n\n xla_comparator_computation = (\n tf.function(lexicographic_comparator,\n autograph=False).get_concrete_function(*comparator_spec))\n results = tfxla.variadic_sort(\n operands,\n dimension=dimension,\n is_stable=is_stable,\n comparator=xla_comparator_computation)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n results = tuple(tf.stop_gradient(out) for out in results) # See #7839\n return results\n\n\ntf_impl[lax.sort_p] = _sort\n\n\ndef _fft(x, fft_type, fft_lengths):\n FFT, IFFT, RFFT, IRFFT = list(map(xla_client.FftType, [0, 1, 2, 3]))\n if fft_type == IRFFT:\n expected_lengths = x.shape[-len(fft_lengths):-1] + ((x.shape[-1] - 1) * 2,)\n else:\n expected_lengths = x.shape[-len(fft_lengths):]\n if expected_lengths != fft_lengths:\n raise NotImplementedError(\n f\"Unsupported fft_lengths={fft_lengths} for fft_type={fft_type} of \"\n f\"array with shape={x.shape}.\")\n tf_funcs = {\n FFT: [tf.signal.fft, tf.signal.fft2d, tf.signal.fft3d],\n IFFT: [tf.signal.ifft, tf.signal.ifft2d, tf.signal.ifft3d],\n RFFT: [tf.signal.rfft, tf.signal.rfft2d, tf.signal.rfft3d],\n IRFFT: [tf.signal.irfft, tf.signal.irfft2d, tf.signal.irfft3d]\n }\n return tf_funcs[fft_type][len(fft_lengths) - 1](x)\n\n\ntf_impl[lax.fft_p] = _fft\n\n\ndef _qr(operand, full_matrices):\n return tf.linalg.qr(operand, full_matrices=full_matrices)\n\n\ntf_impl[lax.linalg.qr_p] = _qr\n\n\ndef _svd(operand, full_matrices, compute_uv):\n result = tf.linalg.svd(operand, full_matrices, compute_uv)\n if not compute_uv:\n return result,\n s, u, v = result\n return s, u, tf.linalg.adjoint(v)\n\n\ntf_impl[lax.linalg.svd_p] = _svd\n\n\ndef _eig(operand: TfVal, compute_left_eigenvectors: bool,\n compute_right_eigenvectors: bool):\n if compute_left_eigenvectors and compute_right_eigenvectors:\n # TODO(bchetioui): didn't find a 100% reliable, easy and satisfying way to\n # sort the left eigenvectors in the right order. The jax.numpy.linalg API\n # suggests to me that left eigenvectors are anyway seldom used, so I\n # think it is acceptable to leave as unimplemented for now.\n msg = (\"Conversion of eig is not implemented when both \"\n \"compute_left_eigenvectors and compute_right_eigenvectors are set \"\n \"to True.\")\n raise NotImplementedError(msg)\n elif not (compute_left_eigenvectors or compute_right_eigenvectors):\n return tuple([tf.linalg.eigvals(operand)])\n elif compute_right_eigenvectors:\n return tuple(tf.linalg.eig(operand))\n else: # compute_left_eigenvectors == True\n wH, vl = tf.linalg.eig(tf.linalg.adjoint(operand))\n wHH = tf.math.conj(wH)\n return tuple([wHH, vl])\n\n\ntf_impl[lax.linalg.eig_p] = _eig\n\n\ndef _eigh(operand: TfVal, lower: bool, _in_avals, _out_aval):\n if operand.shape[-1] == 0:\n v, w = operand, tf.reshape(operand, _eval_shape(_in_avals[0].shape[:-1]))\n else:\n if not lower:\n operand = tf.linalg.adjoint(operand)\n w, v = tf.linalg.eigh(operand)\n cast_type = {\n tf.complex64: tf.float32,\n tf.complex128: tf.float64\n }.get(operand.dtype)\n if cast_type is not None:\n w = tf.cast(w, cast_type)\n return v, w\n\n\ntf_impl_with_avals[lax.linalg.eigh_p] = _eigh\n\n\ndef _lu(operand: TfVal, _in_avals, _out_aval):\n return _convert_jax_impl(lax_linalg._lu_python, extra_name_stack=\"lu\")(\n operand, _in_avals=_in_avals, _out_aval=_out_aval)\n\n\ntf_impl_with_avals[lax.linalg.lu_p] = _lu\n\n\ndef _triangular_solve(a: TfVal, b: TfVal, *, left_side: bool, lower: bool,\n transpose_a: bool, conjugate_a: bool, unit_diagonal: bool,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n if unit_diagonal:\n a_aval, _ = _in_avals\n a_shape = _eval_shape(a_aval.shape)\n a = tf.linalg.set_diag(a, tf.ones(a_shape[:-1], dtype=a.dtype))\n if not left_side:\n rank = len(a.shape)\n transpose_dimensions = list(range(rank - 2)) + [rank - 1, rank - 2]\n a = tf.transpose(a, transpose_dimensions)\n b = tf.transpose(b, transpose_dimensions)\n lower = not lower\n # adjoint == transpose for real dtypes, so special care need only be taken\n # for complex types.\n if a.dtype in [tf.complex64, tf.complex128]:\n if (transpose_a and not conjugate_a) or (not transpose_a and conjugate_a):\n a = tf.math.conj(a)\n result = tf.linalg.triangular_solve(a, b, lower=lower, adjoint=transpose_a)\n if not left_side:\n result = tf.transpose(result, transpose_dimensions)\n return result\n\n\ntf_impl_with_avals[lax.linalg.triangular_solve_p] = _triangular_solve\n\n\ndef _linear_solve(*args: TfVal, const_lengths, jaxprs, _in_avals, _out_aval):\n return _convert_jax_impl(lax_control_flow._custom_linear_solve_impl,\n extra_name_stack=\"linear_solve\")(\n *args,\n const_lengths=const_lengths,\n jaxprs=jaxprs,\n _in_avals=_in_avals,\n _out_aval=_out_aval)\n\n\ntf_impl_with_avals[lax.linear_solve_p] = _linear_solve\n\ndef _tridiagonal_solve(*args: TfVal, _in_avals, _out_aval, **params):\n return _convert_jax_impl(lax_linalg._tridiagonal_solve_jax,\n multiple_results=False,\n extra_name_stack=\"tridiagonal_solve\")(\n *args,\n _in_avals=_in_avals,\n _out_aval=_out_aval)\n\n\ntf_impl_with_avals[lax.linalg.tridiagonal_solve_p] = _tridiagonal_solve\n\ndef _custom_jvp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,\n jvp_jaxpr_thunk: Callable,\n num_consts: int) -> Sequence[TfVal]:\n # TODO(necula): ensure that there is no AD transformation in scope\n return _interpret_jaxpr(fun_jaxpr, *args, extra_name_stack=\"custom_jvp\")\n\n\ntf_impl[custom_derivatives.custom_jvp_call_jaxpr_p] = _custom_jvp_call_jaxpr\n\n\ndef _custom_vjp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,\n **_) -> Sequence[TfVal]:\n # TODO(necula): ensure that there is no AD transformation in scope\n return _interpret_jaxpr(fun_jaxpr, *args, extra_name_stack=\"custom_vjp\")\n\n\ntf_impl[custom_derivatives.custom_vjp_call_jaxpr_p] = _custom_vjp_call_jaxpr\n\n\ndef _custom_lin(*args: TfVal, **_) -> Sequence[TfVal]:\n raise TypeError(\"can't apply forward-mode autodiff (jvp) to a custom_vjp \"\n \"function.\")\n\n\ntf_impl[ad.custom_lin_p] = _custom_lin\n\n\ndef split_to_logical_devices(tensor: TfVal,\n partition_dimensions: pxla.PartitionsOrReplicated):\n \"\"\"Like TPUMPStrategy.experimental_split_to_logical_devices.\n\n For jax2tf purposes we want to avoid needing to thread the `strategy` object\n through the generated computation. It seems that the original function needs\n the strategy object only for error checking, which we assume is done upstream\n by JAX.\n\n Args:\n tensor: Input tensor to annotate.\n partition_dimensions: A list of integers, with one integer per tensor\n dimension, specifying in how many parts the dimension should be split. The\n product of integers must equal the number of devices per replica.\n use_sharding_op: whether to use a sharding op, or not.\n\n Returns:\n an annotated tensor.\n \"\"\"\n # TODO: this is only for sharded_jit. Either remove, or implement in terms\n # of _shard_values.\n if partition_dimensions is None:\n return xla_sharding.replicate(tensor, use_sharding_op=True)\n num_partition_splits = np.prod(partition_dimensions)\n tile_assignment = np.arange(num_partition_splits).reshape(\n partition_dimensions)\n return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True)\n\n\ndef _shard_value(mesh: maps.Mesh,\n val: TfVal,\n aval: core.ShapedArray,\n axis_resources: pjit.ParsedPartitionSpec) -> TfVal:\n \"\"\"Apply sharding to a TfVal.\"\"\"\n sharding_proto: xla_client.OpSharding = pjit.get_aval_sharding_proto(\n aval, axis_resources, mesh)\n # To use xla_sharding.py, we must have a xla_data_pb2.OpSharding.\n xla_sharding_proto: xla_data_pb2.OpSharding = (\n xla_data_pb2.OpSharding(\n type=int(sharding_proto.type),\n tile_assignment_dimensions=sharding_proto.tile_assignment_dimensions,\n tile_assignment_devices=sharding_proto.tile_assignment_devices,\n replicate_on_last_tile_dim=sharding_proto.replicate_on_last_tile_dim))\n return xla_sharding.Sharding(proto=xla_sharding_proto).apply_to_tensor(\n val, use_sharding_op=True)\n\n\ndef _sharded_call(f: lu.WrappedFun, vals: Sequence[TfVal],\n in_parts: Sequence[pxla.PartitionsOrReplicated],\n out_parts_thunk,\n **_) -> Sequence[Tuple[TfVal, core.ShapedArray]]:\n sharded_vals = map(split_to_logical_devices, vals, in_parts)\n vals_out = f.call_wrapped(*sharded_vals) # caller handles new_sublevel\n out_parts_flat = out_parts_thunk()\n assert len(out_parts_flat) == len(\n vals_out), f\"expected {len(out_parts_flat)} == {len(vals_out)}\"\n sharded_vals_out = [\n (split_to_logical_devices(val, val_part), val_aval)\n for (val, val_aval), val_part in zip(vals_out, out_parts_flat)\n ]\n return sharded_vals_out\n\n\ndef _sharded_jit_sharding_constraint(arg: TfVal, *,\n partitions: pxla.PartitionsOrReplicated,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n del _in_avals, _out_aval\n return split_to_logical_devices(arg, partitions)\n\n\ntf_impl_with_avals[sharded_jit.sharding_constraint_p] = _sharded_jit_sharding_constraint\n\n\ndef _pjit(*args: TfVal,\n jaxpr: core.ClosedJaxpr,\n in_axis_resources: Sequence[pjit.ParsedPartitionSpec],\n out_axis_resources: Sequence[pjit.ParsedPartitionSpec],\n resource_env: maps.ResourceEnv,\n donated_invars,\n name: str,\n in_positional_semantics,\n out_positional_semantics,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray) -> TfVal:\n del donated_invars\n if resource_env.physical_mesh.is_multi_process:\n raise NotImplementedError(\"jax2tf translation for pjit over multi-process \"\n \"meshes is not supported yet\")\n # TODO: add `name` to the name stack\n shard_value_for_mesh = partial(_shard_value, resource_env.physical_mesh)\n # Apply sharding annotation to the arguments\n sharded_args: Sequence[TfVal] = tuple(\n map(shard_value_for_mesh, args, _in_avals, in_axis_resources))\n results = _interpret_jaxpr(jaxpr, *sharded_args,\n extra_name_stack=util.wrap_name(name, \"pjit\"))\n sharded_results: Sequence[TfVal] = tuple(\n map(shard_value_for_mesh, results, _out_aval, out_axis_resources))\n return tuple(sharded_results)\n\n\ntf_impl_with_avals[pjit.pjit_p] = _pjit\n\n\ndef _pjit_sharding_constraint(arg: TfVal, *,\n axis_resources: pjit.ParsedPartitionSpec,\n resource_env: maps.ResourceEnv,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray,\n **kwargs) -> TfVal:\n return _shard_value(resource_env.physical_mesh, arg, _in_avals[0], axis_resources)\n\n\ntf_impl_with_avals[pjit.sharding_constraint_p] = _pjit_sharding_constraint\n\ndef _dimension_size_jax2tf(op: TfVal, *, dimension):\n return tf.shape(op)[dimension]\n\ntf_impl[shape_poly.dimension_size_p] = _dimension_size_jax2tf\n\ndef _dim_as_value_jax2tf(dim: shape_poly.DimSize):\n dim_tf, = _eval_shape((dim,))\n return dim_tf\n\ntf_impl[shape_poly.dim_as_value_p] = _dim_as_value_jax2tf\n\ndef _register_checkpoint_pytrees():\n \"\"\"Registers TF custom container types as pytrees.\"\"\"\n m = tf.Module()\n # The types here are automagically changed by TensorFlow's checkpointing\n # infrastructure.\n m.a = (tf.Module(), tf.Module())\n m.b = [tf.Module(), tf.Module()]\n m.c = {\"a\": tf.Module()}\n tuple_wrapper = type(m.a)\n list_wrapper = type(m.b)\n dict_wrapper = type(m.c)\n\n # TF AutoTrackable swaps container types out for wrappers.\n assert tuple_wrapper is not tuple\n assert list_wrapper is not list\n assert dict_wrapper is not dict\n\n jax.tree_util.register_pytree_node(tuple_wrapper, lambda xs:\n (tuple(xs), None), lambda _, xs: tuple(xs))\n\n jax.tree_util.register_pytree_node(list_wrapper, lambda xs: (tuple(xs), None),\n lambda _, xs: list(xs))\n\n jax.tree_util.register_pytree_node(\n dict_wrapper,\n lambda s: (tuple(s.values()), tuple(s.keys())),\n lambda k, xs: dict(zip(k, xs)))\n\n\n_register_checkpoint_pytrees()\n", "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for GlobalDeviceArray.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport numpy as np\n\nimport jax\nfrom jax import core\nfrom jax._src import test_util as jtu\nfrom jax._src.util import prod, safe_zip\n\nfrom jax.experimental import PartitionSpec as P\nfrom jax.experimental.global_device_array import GlobalDeviceArray\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\n\n\nclass GDATest(jtu.JaxTestCase):\n\n @parameterized.named_parameters(\n (\"mesh_x_y\", [\"x\", \"y\"],\n # There are more slices but for convienient purposes, checking for only\n # 2. The indices + shard_shape + replica_id should be unique enough.\n ((slice(0, 2), slice(0, 1)), (slice(0, 2), slice(1, 2))),\n (2, 1),\n [0, 0, 0, 0, 0, 0, 0, 0], False),\n (\"mesh_x_y_pspec\", P(\"x\", \"y\"),\n ((slice(0, 2), slice(0, 1)), (slice(0, 2), slice(1, 2))),\n (2, 1),\n [0, 0, 0, 0, 0, 0, 0, 0], False),\n (\"mesh_x\", [\"x\"],\n ((slice(0, 2), slice(None)), (slice(0, 2), slice(None))),\n (2, 2),\n [0, 1, 0, 1, 0, 1, 0, 1], False),\n (\"mesh_y\", [\"y\"],\n ((slice(0, 4), slice(None)), (slice(4, 8), slice(None))),\n (4, 2),\n [0, 0, 1, 1, 2, 2, 3, 3], False),\n (\"mesh_none_y\", [None, \"y\"],\n ((slice(None), slice(0, 1)), (slice(None), slice(1, 2))),\n (8, 1),\n [0, 0, 1, 1, 2, 2, 3, 3], False),\n (\"mesh_xy\", [(\"x\", \"y\")],\n ((slice(0, 1), slice(None)), (slice(1, 2), slice(None))),\n (1, 2),\n [0, 0, 0, 0, 0, 0, 0, 0], False),\n (\"mesh_fully_replicated\", [],\n ((slice(None), slice(None)), (slice(None), slice(None))),\n (8, 2),\n [0, 1, 2, 3, 4, 5, 6, 7], True),\n )\n def test_gda_2d_shard(self, mesh_axes, expected_index, expected_shard_shape,\n expected_replica_ids, expected_is_fully_replicated):\n global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))\n global_input_shape = (8, 2)\n global_input_data = np.arange(\n prod(global_input_shape)).reshape(global_input_shape)\n def cb(index):\n return global_input_data[index]\n\n gda = GlobalDeviceArray.from_callback(global_input_shape, global_mesh,\n mesh_axes, cb)\n self.assertEqual(gda.local_shards[0].index, expected_index[0])\n self.assertArraysEqual(gda.local_data(0),\n global_input_data[expected_index[0]])\n self.assertEqual(gda.local_shards[1].index, expected_index[1])\n self.assertArraysEqual(gda.local_data(1),\n global_input_data[expected_index[1]])\n self.assertEqual(gda.local_data(0).shape, expected_shard_shape)\n replica_ids = [i.replica_id for i in gda.local_shards]\n self.assertListEqual(replica_ids, expected_replica_ids)\n self.assertListEqual([i.device.id for i in gda.local_shards],\n [0, 1, 2, 3, 4, 5, 6, 7])\n self.assertEqual(gda.is_fully_replicated, expected_is_fully_replicated)\n for s in gda.local_shards:\n self.assertEqual(s.data.aval,\n core.ShapedArray(expected_shard_shape, s.data.dtype))\n for g, l in safe_zip(gda.global_shards, gda.local_shards):\n self.assertEqual(g.device, l.device)\n self.assertEqual(g.index, l.index)\n self.assertEqual(g.replica_id, l.replica_id)\n self.assertEqual(g.data.aval, l.data.aval)\n self.assertArraysEqual(g.data, l.data)\n\n\n @parameterized.named_parameters(\n (\"mesh_x_y_z\", [\"x\", \"y\", \"z\"],\n # There are more slices but for convienient purposes, checking for only\n # 2. The indices + shard_shape + replica_id should be unique enough.\n ((slice(0, 4), slice(0, 2), slice(0, 1)), (slice(0, 4), slice(0, 2), slice(1, 2))),\n (4, 2, 1),\n [0, 0, 0, 0, 0, 0, 0, 0]),\n (\"mesh_xy_z\", [(\"x\", \"y\"), \"z\"],\n ((slice(0, 2), slice(0, 2), slice(None)), (slice(0, 2), slice(2, 4), slice(None))),\n (2, 2, 2),\n [0, 0, 0, 0, 0, 0, 0, 0]),\n (\"mesh_z\", [\"z\"],\n ((slice(0, 4), slice(None), slice(None)), (slice(4, 8), slice(None), slice(None))),\n (4, 4, 2),\n [0, 0, 1, 1, 2, 2, 3, 3]),\n )\n def test_gda_3d_shard(self, mesh_axes, expected_index, expected_shard_shape,\n expected_replica_ids):\n global_mesh = jtu.create_global_mesh((2, 2, 2), ('x', 'y', 'z'))\n global_input_shape = (8, 4, 2)\n global_input_data = np.arange(\n prod(global_input_shape)).reshape(global_input_shape)\n def cb(index):\n return global_input_data[index]\n\n gda = GlobalDeviceArray.from_callback(global_input_shape, global_mesh,\n mesh_axes, cb)\n self.assertEqual(gda.local_shards[0].index, expected_index[0])\n self.assertArraysEqual(gda.local_data(0),\n global_input_data[expected_index[0]])\n self.assertEqual(gda.local_shards[1].index, expected_index[1])\n self.assertArraysEqual(gda.local_data(1),\n global_input_data[expected_index[1]])\n self.assertEqual(gda.local_data(0).shape, expected_shard_shape)\n\n replica_ids = [i.replica_id for i in gda.local_shards]\n self.assertListEqual(replica_ids, expected_replica_ids)\n\n @parameterized.named_parameters(\n (\"mesh_x\", [\"x\"],\n # There are more slices but for convienient purposes, checking for only\n # 2. The indices + shard_shape + replica_id should be unique enough.\n ((slice(0, 2),), (slice(2, 4),)),\n (2,),\n [0, 0, 0, 0, 0, 0, 0, 0]),\n (\"mesh_none\", [],\n ((slice(None),), (slice(None),)),\n (16,),\n [0, 1, 2, 3, 4, 5, 6, 7]),\n )\n def test_gda_1d_shard(self, mesh_axes, expected_index, expected_shard_shape,\n expected_replica_ids):\n global_mesh = jtu.create_global_mesh((8,), ('x'))\n global_input_shape = (16,)\n global_input_data = np.arange(prod(global_input_shape)).reshape(-1)\n def cb(index):\n return global_input_data[index]\n\n gda = GlobalDeviceArray.from_callback(global_input_shape, global_mesh,\n mesh_axes, cb)\n self.assertEqual(gda.local_shards[0].index, expected_index[0])\n self.assertArraysEqual(gda.local_data(0),\n global_input_data[expected_index[0]])\n self.assertEqual(gda.local_shards[1].index, expected_index[1])\n self.assertArraysEqual(gda.local_data(1),\n global_input_data[expected_index[1]])\n self.assertEqual(gda.local_data(0).shape, expected_shard_shape)\n replica_ids = [i.replica_id for i in gda.local_shards]\n self.assertListEqual(replica_ids, expected_replica_ids)\n\n @parameterized.named_parameters(\n (\"mesh_x_y\", [\"x\", \"y\"],\n # There are more slices but for convienient purposes, checking for only\n # 2. The indices + shard_shape + replica_id should be unique enough.\n ((slice(0, 4), slice(0, 1)), (slice(0, 4), slice(1, 2))),\n (4, 1),\n [0, 0, 0, 0]),\n )\n def test_gda_subset_devices(self, mesh_axes, expected_index,\n expected_shard_shape, expected_replica_ids):\n global_mesh = jtu.create_global_mesh((2, 2), ('x', 'y'))\n global_input_shape = (8, 2)\n global_input_data = np.arange(\n prod(global_input_shape)).reshape(global_input_shape)\n def cb(index):\n return global_input_data[index]\n\n gda = GlobalDeviceArray.from_callback(global_input_shape, global_mesh,\n mesh_axes, cb)\n self.assertEqual(gda.local_shards[0].index, expected_index[0])\n self.assertArraysEqual(gda.local_data(0),\n global_input_data[expected_index[0]])\n self.assertEqual(gda.local_shards[1].index, expected_index[1])\n self.assertArraysEqual(gda.local_data(1),\n global_input_data[expected_index[1]])\n self.assertEqual(gda.local_data(0).shape, expected_shard_shape)\n replica_ids = [i.replica_id for i in gda.local_shards]\n self.assertListEqual(replica_ids, expected_replica_ids)\n for g, l in safe_zip(gda.global_shards, gda.local_shards):\n self.assertEqual(g.device, l.device)\n self.assertEqual(g.index, l.index)\n self.assertEqual(g.replica_id, l.replica_id)\n self.assertArraysEqual(g.data, l.data)\n\n def test_gda_batched_callback(self):\n global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))\n global_input_shape = (8, 2)\n mesh_axes = [('x', 'y')]\n global_input_data = np.arange(\n prod(global_input_shape)).reshape(global_input_shape)\n\n def cb(indices):\n self.assertEqual(len(indices), len(global_mesh.local_devices))\n return [global_input_data[index] for index in indices]\n\n gda = GlobalDeviceArray.from_batched_callback(\n global_input_shape, global_mesh, mesh_axes, cb)\n expected_first_shard_value = np.array([[0, 1]])\n self.assertArraysEqual(gda.local_data(0).to_py(),\n expected_first_shard_value)\n expected_second_shard_value = np.array([[2, 3]])\n self.assertArraysEqual(gda.local_data(1).to_py(),\n expected_second_shard_value)\n\n def test_gda_batched_callback_with_devices(self):\n global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))\n global_input_shape = (8, 2)\n mesh_axes = ['x']\n global_input_data = np.arange(\n prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)\n\n def cb(cb_inp):\n self.assertLen(cb_inp, 4)\n dbs = []\n for inp in cb_inp:\n index, devices = inp\n self.assertLen(devices, 2)\n array = global_input_data[index]\n dbs.extend([jax.device_put(array, device) for device in devices])\n return dbs\n\n gda = GlobalDeviceArray.from_batched_callback_with_devices(\n global_input_shape, global_mesh, mesh_axes, cb)\n expected_first_shard_value = np.array([[0, 1], [2, 3]], dtype=np.float32)\n self.assertArraysEqual(gda.local_data(0).to_py(),\n expected_first_shard_value)\n expected_second_shard_value = np.array([[0, 1], [2, 3]], dtype=np.float32)\n self.assertArraysEqual(gda.local_data(1).to_py(),\n expected_second_shard_value)\n\n def test_gda_str_repr(self):\n global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))\n global_input_shape = (8, 2)\n mesh_axes = [('x', 'y')]\n global_input_data = np.arange(\n prod(global_input_shape)).reshape(global_input_shape)\n def cb(index):\n return global_input_data[index]\n gda = GlobalDeviceArray.from_callback(\n global_input_shape, global_mesh, mesh_axes, cb)\n self.assertEqual(str(gda),\n 'GlobalDeviceArray(shape=(8, 2), dtype=int32)')\n self.assertEqual(\n repr(gda),\n (\"GlobalDeviceArray(shape=(8, 2), dtype=int32, \"\n \"global_mesh_shape={'x': 4, 'y': 2}, mesh_axes=[('x', 'y')])\"))\n\n\nif __name__ == '__main__':\n absltest.main(testLoader=jtu.JaxTestLoader())\n", "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport dataclasses\nimport numpy as np\nfrom typing import Callable, Sequence, Tuple, Union, Mapping, Optional, List, Dict, NamedTuple\n\nfrom jax import core\nfrom jax._src.lib import xla_bridge as xb\nfrom jax._src.lib import xla_client as xc\nfrom jax.interpreters import pxla, xla\nfrom jax._src.util import prod, safe_zip, cache\nfrom jax._src.api import device_put\nfrom jax.tree_util import tree_flatten\nfrom jax.interpreters.sharded_jit import PartitionSpec\n\nShape = Tuple[int, ...]\nMeshAxes = Sequence[Union[str, Tuple[str], None]]\nDeviceArray = xc.Buffer\nDevice = xc.Device\nArrayLike = Union[np.ndarray, DeviceArray]\nIndex = Tuple[slice, ...]\n\n\ndef _convert_list_args_to_tuple(f):\n def wrapper(*args, **kwargs):\n args = [tuple(a) if isinstance(a, list) else a for a in args]\n kwargs = {k: (tuple(v) if isinstance(v, list) else v) for k, v in kwargs.items()}\n return f(*args, **kwargs)\n return wrapper\n\n\ndef _canonicalize_mesh_axes(mesh_axes):\n if not isinstance(mesh_axes, PartitionSpec):\n pspec = PartitionSpec(*mesh_axes)\n else:\n pspec = mesh_axes\n return pspec\n\ndef _get_indices(global_shape: Shape, global_mesh: pxla.Mesh,\n mesh_axes: MeshAxes) -> Tuple[pxla.Index, ...]:\n # Import here to avoid cyclic import error when importing gda in pjit.py.\n from jax.experimental.pjit import get_array_mapping, _prepare_axis_resources\n\n pspec = _canonicalize_mesh_axes(mesh_axes)\n parsed_pspec, _, _ = _prepare_axis_resources(pspec, \"mesh_axes\")\n array_mapping = get_array_mapping(parsed_pspec)\n # The dtype doesn't matter for creating sharding specs.\n aval = core.ShapedArray(global_shape, np.float32)\n sharding_spec = pxla.mesh_sharding_specs(\n global_mesh.shape, global_mesh.axis_names)(aval, array_mapping)\n indices = pxla.spec_to_indices(global_shape, sharding_spec)\n for index in indices:\n assert isinstance(index, tuple)\n for idx in index:\n assert isinstance(idx, slice)\n return indices\n\n\n@_convert_list_args_to_tuple\n@cache()\ndef get_shard_indices(global_shape: Shape, global_mesh: pxla.Mesh,\n mesh_axes: MeshAxes) -> Mapping[Device, Index]:\n indices = _get_indices(global_shape, global_mesh, mesh_axes)\n # The type: ignore is to ignore the type returned by `spec_to_indices`.\n return dict(\n (d, i)\n for d, i in safe_zip(global_mesh.devices.flat, indices)) # type: ignore\n\n\ndef _calc_replica_ids(global_mesh: pxla.Mesh, mesh_axes: MeshAxes):\n pspec = _canonicalize_mesh_axes(mesh_axes)\n mesh_values = list(global_mesh.shape.values())\n flattened_pspec, _ = tree_flatten(tuple(pspec))\n # Get the location (coordinates) of each device in the device mesh.\n device_location = np.array(np.unravel_index(\n [d.id for d in global_mesh.devices.flat], mesh_values))\n # Find all the axes that were replicated.\n # If mesh_axes = (('x', 'y'), None, 'z') and ('x', 'y', 'z') were the mesh's\n # axis, then replicated axes will be None since all axes are being used to\n # shard the input.\n replicated_axis = np.isin(list(global_mesh.shape.keys()), flattened_pspec,\n invert=True)\n # If all elements in replicated_axis are False then the input is fully sharded\n # so replica ids should be all 0s.\n if not any(replicated_axis):\n return [0] * global_mesh.devices.size\n else:\n # Drop all the sharded axes and find the location of coordinates in a linear\n # array.\n return np.ravel_multi_index(device_location[replicated_axis],\n np.array(mesh_values)[replicated_axis])\n\n\n@_convert_list_args_to_tuple\n@cache()\ndef get_shard_indices_replica_ids(\n global_shape: Shape, global_mesh: pxla.Mesh,\n mesh_axes: MeshAxes) -> Mapping[Device, Tuple[Index, int]]:\n return _get_shard_indices_replica_ids_uncached(global_shape, global_mesh, mesh_axes)\n\ndef _get_shard_indices_replica_ids_uncached(\n global_shape: Shape, global_mesh: pxla.Mesh,\n mesh_axes: MeshAxes) -> Mapping[Device, Tuple[Index, int]]:\n indices = _get_indices(global_shape, global_mesh, mesh_axes)\n replica_ids = _calc_replica_ids(global_mesh, mesh_axes)\n return dict((d, (i, r))\n for d, i, r in safe_zip(global_mesh.devices.flat, indices, replica_ids))\n\n\n@_convert_list_args_to_tuple\n@cache()\ndef get_shard_shape(global_shape, global_mesh, mesh_axes) -> Shape:\n chunk_size = []\n for mesh_axis, size in zip(mesh_axes, global_shape):\n if not mesh_axis:\n chunk_size.append(size)\n elif isinstance(mesh_axis, tuple):\n m = prod([global_mesh.shape[ma] for ma in mesh_axis])\n chunk_size.append(size // m)\n else:\n chunk_size.append(size // global_mesh.shape[mesh_axis])\n if len(chunk_size) != len(global_shape):\n chunk_size.extend(global_shape[len(chunk_size):])\n return tuple(chunk_size)\n\n\n_hashed_index = lambda x: hash(tuple((v.start, v.stop) for v in x))\n\n\[email protected](frozen=True)\nclass Shard:\n \"\"\"A single data shard of a GlobalDeviceArray.\n\n Args:\n device : Which device this shard resides on.\n index : The index into the global array of this shard.\n replica_id : Integer id indicating which replica of the global array this\n shard is part of. Always 0 for fully sharded data\n (i.e. when there’s only 1 replica).\n data : The data of this shard. None if ``device`` is non-local.\n \"\"\"\n device: Device\n index: Index\n replica_id: int\n # None if this `Shard` lives on a non-local device.\n data: Optional[DeviceArray] = None\n\n\nclass _GdaFastPathArgs(NamedTuple):\n global_indices_replica_ids: Mapping[Device, Tuple[Index, int]]\n local_devices: Sequence[Device]\n\n\nclass GlobalDeviceArray:\n \"\"\"A logical array with data sharded across multiple devices and processes.\n\n If you’re not already familiar with JAX’s multi-process programming model,\n please read https://jax.readthedocs.io/en/latest/multi_process.html.\n\n A GlobalDeviceArray (GDA) can be thought of as a view into a single logical\n array sharded across processes. The logical array is the “global” array, and\n each process has a GlobalDeviceArray object referring to the same global array\n (similarly to how each process runs a multi-process pmap or pjit). Each process\n can access the shape, dtype, etc. of the global array via the GDA, pass the\n GDA into multi-process pjits, and get GDAs as pjit outputs (coming soon: xmap\n and pmap). However, each process can only directly access the shards of the\n global array data stored on its local devices.\n\n GDAs can help manage the inputs and outputs of multi-process computations.\n A GDA keeps track of which shard of the global array belongs to which device,\n and provides callback-based APIs to materialize the correct shard of the data\n needed for each local device of each process.\n\n A GDA consists of data shards. Each shard is stored on a different device.\n There are local shards and global shards. Local shards are those on local\n devices, and the data is visible to the current process. Global shards are\n those across all devices (including local devices), and the data isn’t visible\n if the shard is on a non-local device with respect to the current process.\n Please see the ``Shard`` class to see what information is stored inside that\n data structure.\n\n Note: to make pjit output GlobalDeviceArrays, set the environment variable\n ``JAX_PARALLEL_FUNCTIONS_OUTPUT_GDA=true`` or add the following to your code:\n ``jax.config.update('jax_parallel_functions_output_gda', True)``\n\n Args:\n global_shape : The global shape of the array.\n global_mesh : The global mesh representing devices across multiple\n processes.\n mesh_axes : A sequence with length less than or equal to the rank of the\n global array (i.e. the length of the global shape). Each element can be:\n\n * An axis name of ``global_mesh``, indicating that the corresponding\n global array axis is partitioned across the given device axis of\n ``global_mesh``.\n * A tuple of axis names of ``global_mesh``. This is like the above option\n except the global array axis is partitioned across the product of axes\n named in the tuple.\n * None indicating that the corresponding global array axis is not\n partitioned.\n\n For more information, please see:\n https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html#more-information-on-partitionspec\n device_buffers: DeviceArrays that are on the local devices of ``global_mesh``.\n\n Attributes:\n shape : Global shape of the array.\n dtype : Dtype of the global array.\n local_shards : List of :class:`Shard` on the local devices of the current process.\n Data is materialized for all local shards.\n global_shards : List of all :class:`Shard` of the global array. Data isn’t\n available if a shard is on a non-local device with respect to the current\n process.\n is_fully_replicated : True if the full array value is present on all devices\n of the global mesh.\n\n Example::\n\n # Logical mesh is (hosts, devices)\n assert global_mesh.shape == {'x': 4, 'y': 8}\n\n global_input_shape = (64, 32)\n mesh_axes = P('x', 'y')\n\n # Dummy example data; in practice we wouldn't necessarily materialize global data\n # in a single process.\n global_input_data = np.arange(\n np.prod(global_input_shape)).reshape(global_input_shape)\n\n def get_local_data_slice(index):\n # index will be a tuple of slice objects, e.g. (slice(0, 16), slice(0, 4))\n # This method will be called per-local device from the GDA constructor.\n return global_input_data[index]\n\n gda = GlobalDeviceArray.from_callback(\n global_input_shape, global_mesh, mesh_axes, get_local_data_slice)\n\n f = pjit(lambda x: x @ x.T, out_axis_resources = P('y', 'x'))\n\n with mesh(global_mesh.shape, global_mesh.axis_names):\n out = f(gda)\n\n print(type(out)) # GlobalDeviceArray\n print(out.shape) # global shape == (64, 64)\n print(out.local_shards[0].data) # Access the data on a single local device,\n # e.g. for checkpointing\n print(out.local_shards[0].data.shape) # per-device shape == (8, 16)\n print(out.local_shards[0].index) # Numpy-style index into the global array that\n # this data shard corresponds to\n\n # `out` can be passed to another pjit call, out.local_shards can be used to\n # export the data to non-jax systems (e.g. for checkpointing or logging), etc.\n \"\"\"\n\n def __init__(self, global_shape: Shape, global_mesh: pxla.Mesh,\n mesh_axes: MeshAxes, device_buffers: Sequence[DeviceArray],\n _gda_fast_path_args: Optional[_GdaFastPathArgs] = None):\n self._global_shape = global_shape\n self._global_mesh = global_mesh\n self._mesh_axes = mesh_axes\n self._device_buffers = device_buffers\n # Optionally precomputed for performance.\n self._gda_fast_path_args = _gda_fast_path_args\n self._current_process = xb.process_index()\n\n if self._gda_fast_path_args is None:\n self._local_devices = self._global_mesh.local_devices\n else:\n self._local_devices = self._gda_fast_path_args.local_devices\n assert len(device_buffers) == len(self._local_devices)\n\n self._local_shards = self._create_local_shards()\n\n ss = get_shard_shape(self._global_shape, self._global_mesh, self._mesh_axes)\n assert all(db.shape == ss for db in device_buffers), (\n f\"Expected shard shape {ss} doesn't match the device buffer \"\n f\"shape {device_buffers[0].shape}\")\n\n dtype = device_buffers[0].dtype\n assert all(db.dtype == dtype for db in device_buffers), (\n \"Input arrays to GlobalDeviceArray must have matching dtypes, \"\n f\"got: {[db.dtype for db in device_buffers]}\")\n self.dtype = dtype\n\n def __str__(self):\n return f'GlobalDeviceArray(shape={self.shape}, dtype={self.dtype})'\n\n def __repr__(self):\n return (f'GlobalDeviceArray(shape={self.shape}, dtype={self.dtype}, '\n f'global_mesh_shape={dict(self._global_mesh.shape)}, '\n f'mesh_axes={self._mesh_axes})')\n\n @property\n def shape(self) -> Shape:\n return self._global_shape\n\n @property\n def is_fully_replicated(self) -> bool:\n return self.shape == self.local_data(0).shape\n\n def _create_local_shards(self) -> Sequence[Shard]:\n if self._gda_fast_path_args is not None:\n global_indices_rid = self._gda_fast_path_args.global_indices_replica_ids\n else:\n global_indices_rid = get_shard_indices_replica_ids(\n self._global_shape, self._global_mesh, self._mesh_axes)\n\n out = []\n for db in self._device_buffers:\n device = db.device()\n index, rid = global_indices_rid[device]\n out.append(Shard(device, index, rid, db))\n return out\n\n\n @pxla.maybe_cached_property\n def local_shards(self) -> Sequence[Shard]:\n for s in self._local_shards:\n # Ignore the type because mypy thinks data is None but local_shards\n # cannot have data=None which is checked in `_create_local_shards`.\n if s.data.aval is None: # type: ignore\n s.data.aval = core.ShapedArray(s.data.shape, s.data.dtype) # type: ignore\n return self._local_shards\n\n @pxla.maybe_cached_property\n def global_shards(self) -> Sequence[Shard]:\n # Populating global_shards lazily (i.e. when requested) because populating\n # sthem eagerly leads to a performance regression when training on large\n # models.\n # Also as this a cached property, once calculated, it should be cached. So\n # multiple accesses should be cheap.\n global_indices_rid = get_shard_indices_replica_ids(\n self._global_shape, self._global_mesh, self._mesh_axes)\n device_to_buffer = dict((db.device(), db) for db in self._device_buffers)\n global_shards = []\n for device, (index, rid) in global_indices_rid.items():\n local_shard = device.process_index == self._current_process\n buf = device_to_buffer[device] if local_shard else None\n if buf is not None and buf.aval is None:\n buf.aval = core.ShapedArray(buf.shape, buf.dtype)\n sh = Shard(device, index, rid, buf)\n global_shards.append(sh)\n return global_shards\n\n def local_data(self, index) -> DeviceArray:\n return self.local_shards[index].data\n\n @classmethod\n def from_callback(cls, global_shape: Shape, global_mesh: pxla.Mesh,\n mesh_axes: MeshAxes, data_callback: Callable[[Index],\n ArrayLike]):\n \"\"\"Constructs a GlobalDeviceArray via data fetched from ``data_callback``.\n\n ``data_callback`` is used to fetch the data for each local slice of the returned GlobalDeviceArray.\n\n Example::\n\n global_input_shape = (8, 2)\n global_input_data = np.arange(prod(global_input_shape)).reshape(global_input_shape)\n def cb(index):\n return global_input_data[index]\n gda = GlobalDeviceArray.from_callback(global_input_shape, global_mesh, mesh_axes, cb)\n\n Args:\n global_shape : The global shape of the array\n global_mesh : The global mesh representing devices across multiple\n processes.\n mesh_axes : See the ``mesh_axes`` parameter of GlobalDeviceArray.\n data_callback : Callback that takes indices into the global array value as input and\n returns the corresponding data of the global array value. The data can be returned\n as any array-like object, e.g. a ``numpy.ndarray``.\n \"\"\"\n global_indices_rid = get_shard_indices_replica_ids(\n global_shape, global_mesh, mesh_axes)\n local_devices = global_mesh.local_devices\n dbs = [\n device_put(data_callback(global_indices_rid[device][0]), device)\n for device in local_devices\n ]\n return cls(global_shape, global_mesh, mesh_axes, dbs,\n _gda_fast_path_args=_GdaFastPathArgs(global_indices_rid, local_devices))\n\n @classmethod\n def from_batched_callback(cls, global_shape: Shape,\n global_mesh: pxla.Mesh, mesh_axes: MeshAxes,\n data_callback: Callable[[Sequence[Index]],\n Sequence[ArrayLike]]):\n \"\"\"Constructs a GlobalDeviceArray via batched data fetched from ``data_callback``.\n\n Like ``from_callback``, except the callback function is called only once to fetch all data\n local to this process.\n\n Example::\n\n global_input_shape = (8, 2)\n global_input_data = np.arange(\n prod(global_input_shape)).reshape(global_input_shape)\n def batched_cb(indices):\n self.assertEqual(len(indices),len(global_mesh.local_devices))\n return [global_input_data[index] for index in indices]\n gda = GlobalDeviceArray.from_batched_callback(global_input_shape, global_mesh, mesh_axes, batched_cb)\n\n Args:\n global_shape : The global shape of the array\n global_mesh : The global mesh representing devices across multiple\n processes.\n mesh_axes : See the ``mesh_axes`` parameter of GlobalDeviceArray.\n data_callback : Callback that takes a batch of indices into the global array value with\n length equal to the number of local devices as input and returns the corresponding data for each index.\n The data can be returned as any array-like objects, e.g. ``numpy.ndarray``\n\"\"\"\n global_indices_rid = get_shard_indices_replica_ids(\n global_shape, global_mesh, mesh_axes)\n local_devices = global_mesh.local_devices\n local_indices = [global_indices_rid[d][0] for d in local_devices]\n local_arrays = data_callback(local_indices)\n dbs = pxla.device_put(local_arrays, local_devices)\n return cls(global_shape, global_mesh, mesh_axes, dbs,\n _gda_fast_path_args=_GdaFastPathArgs(global_indices_rid, local_devices))\n\n @classmethod\n def from_batched_callback_with_devices(\n cls, global_shape: Shape, global_mesh: pxla.Mesh,\n mesh_axes: MeshAxes,\n data_callback: Callable[[Sequence[Tuple[Index, Tuple[Device, ...]]]],\n Sequence[DeviceArray]]):\n \"\"\"Constructs a GlobalDeviceArray via batched DeviceArrays fetched from ``data_callback``.\n\n Like ``from_batched_callback``, except the callback function is responsible for returning on-device data (e.g. by calling ``jax.device_put``).\n\n Example::\n\n global_input_shape = (8, 2)\n global_input_data = np.arange(prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)\n def cb(cb_inp):\n self.assertLen(cb_inp, len(global_mesh.local_devices))\n dbs = []\n for inp in cb_inp:\n index, devices = inp\n array = global_input_data[index]\n dbs.extend([jax.device_put(array, device) for device in devices])\n return dbs\n gda = GlobalDeviceArray.from_batched_callback_with_devices(global_input_shape, global_mesh, mesh_axes, cb)\n\n Args:\n global_shape : The global shape of the array\n global_mesh : The global mesh representing devices across multiple\n processes.\n mesh_axes : See the ``mesh_axes`` parameter of GlobalDeviceArray.\n data_callback : Callback that takes agets batch of indices into the global array value with\n length equal to the number of local devices as input and returns the corresponding data for\n each index. The data must be returned as jax DeviceArrays.\n\"\"\"\n global_indices_rid = get_shard_indices_replica_ids(\n global_shape, global_mesh, mesh_axes)\n local_devices = global_mesh.local_devices\n\n index_to_device: Dict[int, Tuple[Index, List[Device]]] = {}\n for device in local_devices:\n index = global_indices_rid[device][0]\n h_index = _hashed_index(index)\n if h_index not in index_to_device:\n index_to_device[h_index] = (index, [device])\n else:\n index_to_device[h_index][1].append(device)\n\n cb_inp = [\n (index, tuple(devices)) for index, devices in index_to_device.values()\n ]\n dbs = data_callback(cb_inp)\n return cls(global_shape, global_mesh, mesh_axes, dbs,\n _gda_fast_path_args=_GdaFastPathArgs(global_indices_rid, local_devices))\n\n\ncore.pytype_aval_mappings[GlobalDeviceArray] = lambda x: core.ShapedArray(\n x.shape, x.dtype)\nxla.pytype_aval_mappings[GlobalDeviceArray] = lambda x: core.ShapedArray(\n x.shape, x.dtype)\nxla.canonicalize_dtype_handlers[GlobalDeviceArray] = pxla.identity\n\ndef _gda_shard_arg(x, devices, indices):\n return [s.data for s in x.local_shards]\npxla.shard_arg_handlers[GlobalDeviceArray] = _gda_shard_arg\n\n\ndef _gda_array_result_handler(global_aval, out_axis_resources, global_mesh):\n global_idx_rid = get_shard_indices_replica_ids(global_aval.shape, global_mesh,\n out_axis_resources)\n local_devices = global_mesh.local_devices\n fast_path_args = _GdaFastPathArgs(global_idx_rid, local_devices)\n return lambda bufs: GlobalDeviceArray(\n global_aval.shape, global_mesh, out_axis_resources, bufs, fast_path_args)\npxla.global_result_handlers[core.ShapedArray] = _gda_array_result_handler\npxla.global_result_handlers[core.ConcreteArray] = _gda_array_result_handler\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.math.floormod", "tensorflow.raw_ops.PreventGradient", "tensorflow.compiler.tf2xla.python.xla.dot_general", "tensorflow.math.greater_equal", "tensorflow.linalg.eigh", "tensorflow.dtypes.cast", "tensorflow.compiler.tf2xla.python.xla.variadic_reduce", "tensorflow.math.imag", "tensorflow.math.conj", "tensorflow.math.truediv", "tensorflow.switch_case", "tensorflow.compiler.tf2xla.python.xla.conv", "tensorflow.linalg.qr", "tensorflow.zeros_like", "tensorflow.function", "tensorflow.compiler.tf2xla.python.xla.dynamic_slice", "numpy.array", "tensorflow.compiler.xla.xla_data_pb2.PrecisionConfig", "tensorflow.linalg.eigvals", "tensorflow.ones", "numpy.shape", "tensorflow.concat", "tensorflow.math.sign", "tensorflow.stack", "tensorflow.compiler.xla.xla_data_pb2.ConvolutionDimensionNumbers", "tensorflow.where", "tensorflow.compiler.xla.experimental.xla_sharding.xla_sharding.replicate", "tensorflow.linalg.svd", "tensorflow.bitwise.invert", "tensorflow.math.real", "tensorflow.compiler.xla.xla_data_pb2.DotDimensionNumbers", "tensorflow.compiler.xla.xla_data_pb2.GatherDimensionNumbers", "tensorflow.compiler.tf2xla.python.xla.variadic_sort", "tensorflow.random.uniform", "tensorflow.identity", "tensorflow.math.floordiv", "tensorflow.dtypes.as_dtype", "tensorflow.math.subtract", "tensorflow.broadcast_to", "tensorflow.compiler.xla.experimental.xla_sharding.xla_sharding.Sharding", "tensorflow.compiler.xla.xla_data_pb2.OpMetadata", "tensorflow.zeros", "tensorflow.compiler.tf2xla.python.xla.gather", "tensorflow.compiler.tf2xla.python.xla.scatter", "numpy.issubdtype", "tensorflow.compiler.xla.experimental.xla_sharding.xla_sharding.tile", "tensorflow.cast", "tensorflow.math.equal", "tensorflow.raw_ops.PopulationCount", "tensorflow.rank", "tensorflow.compiler.xla.xla_data_pb2.ScatterDimensionNumbers", "tensorflow.while_loop", "tensorflow.stop_gradient", "tensorflow.complex", "tensorflow.logical_not", "tensorflow.reverse", "tensorflow.bitwise.left_shift", "tensorflow.linalg.eig", "tensorflow.shape", "tensorflow.compiler.tf2xla.python.xla.select_and_scatter", "tensorflow.bitwise.bitwise_or", "tensorflow.math.floor", "tensorflow.math.round", "tensorflow.raw_ops.AddV2", "tensorflow.clip_by_value", "tensorflow.transpose", "tensorflow.constant", "tensorflow.reshape", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.Module", "tensorflow.compiler.tf2xla.python.xla.reduce_window", "tensorflow.nest.map_structure", "tensorflow.logical_and", "tensorflow.math.abs", "tensorflow.device", "tensorflow.math.reciprocal", "tensorflow.nest.flatten", "tensorflow.bitwise.right_shift", "numpy.arange", "tensorflow.name_scope", "tensorflow.linalg.triangular_solve", "tensorflow.math.atan2", "tensorflow.math.multiply", "tensorflow.linalg.adjoint", "tensorflow.math.less", "tensorflow.compiler.tf2xla.python.xla.pad", "tensorflow.math.sqrt", "tensorflow.math.negative", "numpy.prod", "tensorflow.math.top_k", "tensorflow.TensorSpec" ], [ "numpy.array" ], [ "numpy.array", "numpy.unravel_index" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yichenj/yolov3-tf2
[ "17bcb67e765eeab648c201146856d4440c784240" ]
[ "yolov3_tf2/models.py" ]
[ "from absl import flags\nfrom absl.flags import FLAGS\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import (\n Add,\n Concatenate,\n Conv2D,\n Input,\n Lambda,\n LeakyReLU,\n MaxPool2D,\n UpSampling2D,\n ZeroPadding2D,\n BatchNormalization,\n)\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.losses import (\n binary_crossentropy,\n sparse_categorical_crossentropy\n)\nfrom .utils import broadcast_iou\n\nflags.DEFINE_integer('yolo_max_boxes', 100,\n 'maximum number of boxes per image')\nflags.DEFINE_float('yolo_iou_threshold', 0.5, 'iou threshold')\nflags.DEFINE_float('yolo_score_threshold', 0.5, 'score threshold')\n\nyolo_anchors = np.array([(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),\n (59, 119), (116, 90), (156, 198), (373, 326)],\n np.float32) / 416\nyolo_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])\n\nyolo_tiny_anchors = np.array([(10, 14), (23, 27), (37, 58),\n (81, 82), (135, 169), (344, 319)],\n np.float32) / 416\nyolo_tiny_anchor_masks = np.array([[3, 4, 5], [0, 1, 2]])\n\n\ndef DarknetConv(x, filters, size, strides=1, batch_norm=True):\n if strides == 1:\n padding = 'same'\n else:\n x = ZeroPadding2D(((1, 0), (1, 0)))(x) # top left half-padding\n padding = 'valid'\n x = Conv2D(filters=filters, kernel_size=size,\n strides=strides, padding=padding,\n use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x)\n if batch_norm:\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=0.1)(x)\n return x\n\n\ndef DarknetResidual(x, filters):\n prev = x\n x = DarknetConv(x, filters // 2, 1)\n x = DarknetConv(x, filters, 3)\n x = Add()([prev, x])\n return x\n\n\ndef DarknetBlock(x, filters, blocks):\n x = DarknetConv(x, filters, 3, strides=2)\n for _ in range(blocks):\n x = DarknetResidual(x, filters)\n return x\n\n\ndef Darknet(name=None):\n x = inputs = Input([None, None, 3])\n x = DarknetConv(x, 32, 3)\n x = DarknetBlock(x, 64, 1)\n x = DarknetBlock(x, 128, 2) # skip connection\n x = x_36 = DarknetBlock(x, 256, 8) # skip connection\n x = x_61 = DarknetBlock(x, 512, 8)\n x = DarknetBlock(x, 1024, 4)\n return tf.keras.Model(inputs, (x_36, x_61, x), name=name)\n\n\ndef DarknetTiny(name=None):\n x = inputs = Input([None, None, 3])\n x = DarknetConv(x, 16, 3)\n x = MaxPool2D(2, 2, 'same')(x)\n x = DarknetConv(x, 32, 3)\n x = MaxPool2D(2, 2, 'same')(x)\n x = DarknetConv(x, 64, 3)\n x = MaxPool2D(2, 2, 'same')(x)\n x = DarknetConv(x, 128, 3)\n x = MaxPool2D(2, 2, 'same')(x)\n x = x_8 = DarknetConv(x, 256, 3) # skip connection\n x = MaxPool2D(2, 2, 'same')(x)\n x = DarknetConv(x, 512, 3)\n x = MaxPool2D(2, 1, 'same')(x)\n x = DarknetConv(x, 1024, 3)\n return tf.keras.Model(inputs, (x_8, x), name=name)\n\n\ndef YoloConv(filters, name=None):\n def yolo_conv(x_in):\n if isinstance(x_in, tuple):\n inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])\n x, x_skip = inputs\n\n # concat with skip connection\n x = DarknetConv(x, filters, 1)\n x = UpSampling2D(2)(x)\n x = Concatenate()([x, x_skip])\n else:\n x = inputs = Input(x_in.shape[1:])\n\n x = DarknetConv(x, filters, 1)\n x = DarknetConv(x, filters * 2, 3)\n x = DarknetConv(x, filters, 1)\n x = DarknetConv(x, filters * 2, 3)\n x = DarknetConv(x, filters, 1)\n return Model(inputs, x, name=name)(x_in)\n return yolo_conv\n\n\ndef YoloConvTiny(filters, name=None):\n def yolo_conv(x_in):\n if isinstance(x_in, tuple):\n inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])\n x, x_skip = inputs\n\n # concat with skip connection\n x = DarknetConv(x, filters, 1)\n x = UpSampling2D(2)(x)\n x = Concatenate()([x, x_skip])\n else:\n x = inputs = Input(x_in.shape[1:])\n x = DarknetConv(x, filters, 1)\n\n return Model(inputs, x, name=name)(x_in)\n return yolo_conv\n\n\ndef YoloOutput(filters, anchors, classes, name=None):\n def yolo_output(x_in):\n x = inputs = Input(x_in.shape[1:])\n x = DarknetConv(x, filters * 2, 3)\n x = DarknetConv(x, anchors * (classes + 5), 1, batch_norm=False)\n x = Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2],\n anchors, classes + 5)))(x)\n return tf.keras.Model(inputs, x, name=name)(x_in)\n return yolo_output\n\n\ndef yolo_boxes(pred, anchors, classes):\n # pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))\n grid_size = tf.shape(pred)[1:3]\n box_xy, box_wh, objectness, class_probs = tf.split(\n pred, (2, 2, 1, classes), axis=-1)\n\n box_xy = tf.sigmoid(box_xy)\n objectness = tf.sigmoid(objectness)\n class_probs = tf.sigmoid(class_probs)\n pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss\n\n # !!! grid[x][y] == (y, x)\n grid = tf.meshgrid(tf.range(grid_size[1]), tf.range(grid_size[0]))\n grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]\n\n box_xy = (box_xy + tf.cast(grid, tf.float32)) / \\\n tf.cast(grid_size, tf.float32)\n box_wh = tf.exp(box_wh) * anchors\n\n box_x1y1 = box_xy - box_wh / 2\n box_x2y2 = box_xy + box_wh / 2\n bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)\n\n return bbox, objectness, class_probs, pred_box\n\n\ndef yolo_nms(outputs, anchors, masks, classes):\n # boxes, conf, type\n b, c, t = [], [], []\n\n for o in outputs:\n b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))\n c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))\n t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))\n\n bbox = tf.concat(b, axis=1)\n confidence = tf.concat(c, axis=1)\n class_probs = tf.concat(t, axis=1)\n\n scores = confidence * class_probs\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\n boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),\n scores=tf.reshape(\n scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),\n max_output_size_per_class=FLAGS.yolo_max_boxes,\n max_total_size=FLAGS.yolo_max_boxes,\n iou_threshold=FLAGS.yolo_iou_threshold,\n score_threshold=FLAGS.yolo_score_threshold\n )\n\n return boxes, scores, classes, valid_detections\n\n\ndef YoloV3(size=None, channels=3, anchors=yolo_anchors,\n masks=yolo_anchor_masks, classes=80, training=False):\n x = inputs = Input([size, size, channels], name='input')\n\n x_36, x_61, x = Darknet(name='yolo_darknet')(x)\n\n x = YoloConv(512, name='yolo_conv_0')(x)\n output_0 = YoloOutput(512, len(masks[0]), classes, name='yolo_output_0')(x)\n\n x = YoloConv(256, name='yolo_conv_1')((x, x_61))\n output_1 = YoloOutput(256, len(masks[1]), classes, name='yolo_output_1')(x)\n\n x = YoloConv(128, name='yolo_conv_2')((x, x_36))\n output_2 = YoloOutput(128, len(masks[2]), classes, name='yolo_output_2')(x)\n\n if training:\n return Model(inputs, (output_0, output_1, output_2), name='yolov3')\n\n boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),\n name='yolo_boxes_0')(output_0)\n boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),\n name='yolo_boxes_1')(output_1)\n boxes_2 = Lambda(lambda x: yolo_boxes(x, anchors[masks[2]], classes),\n name='yolo_boxes_2')(output_2)\n\n outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),\n name='yolo_nms')((boxes_0[:3], boxes_1[:3], boxes_2[:3]))\n\n return Model(inputs, outputs, name='yolov3')\n\n\ndef YoloV3Tiny(size=None, channels=3, anchors=yolo_tiny_anchors,\n masks=yolo_tiny_anchor_masks, classes=80, training=False):\n x = inputs = Input([size, size, channels], name='input')\n\n x_8, x = DarknetTiny(name='yolo_darknet')(x)\n\n x = YoloConvTiny(256, name='yolo_conv_0')(x)\n output_0 = YoloOutput(256, len(masks[0]), classes, name='yolo_output_0')(x)\n\n x = YoloConvTiny(128, name='yolo_conv_1')((x, x_8))\n output_1 = YoloOutput(128, len(masks[1]), classes, name='yolo_output_1')(x)\n\n if training:\n return Model(inputs, (output_0, output_1), name='yolov3')\n\n boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),\n name='yolo_boxes_0')(output_0)\n boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),\n name='yolo_boxes_1')(output_1)\n outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),\n name='yolo_nms')((boxes_0[:3], boxes_1[:3]))\n return Model(inputs, outputs, name='yolov3_tiny')\n\n\ndef YoloLoss(anchors, classes=80, ignore_thresh=0.5):\n def yolo_loss(y_true, y_pred):\n # 1. transform all pred outputs\n # y_pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...cls))\n pred_box, pred_obj, pred_class, pred_xywh = yolo_boxes(\n y_pred, anchors, classes)\n pred_xy = pred_xywh[..., 0:2]\n pred_wh = pred_xywh[..., 2:4]\n\n # 2. transform all true outputs\n # y_true: (batch_size, grid, grid, anchors, (x1, y1, x2, y2, obj, cls))\n true_box, true_obj, true_class_idx = tf.split(\n y_true, (4, 1, 1), axis=-1)\n true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2\n true_wh = true_box[..., 2:4] - true_box[..., 0:2]\n\n # give higher weights to small boxes\n box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]\n\n # 3. inverting the pred box equations\n grid_size = tf.shape(y_true)[1]\n grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))\n grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)\n true_xy = true_xy * tf.cast(grid_size, tf.float32) - \\\n tf.cast(grid, tf.float32)\n true_wh = tf.math.log(true_wh / anchors)\n true_wh = tf.where(tf.math.is_inf(true_wh),\n tf.zeros_like(true_wh), true_wh)\n\n # 4. calculate all masks\n obj_mask = tf.squeeze(true_obj, -1)\n # ignore false positive when iou is over threshold\n best_iou = tf.map_fn(\n lambda x: tf.reduce_max(broadcast_iou(x[0], tf.boolean_mask(\n x[1], tf.cast(x[2], tf.bool))), axis=-1),\n (pred_box, true_box, obj_mask),\n tf.float32)\n ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)\n\n # 5. calculate all losses\n xy_loss = obj_mask * box_loss_scale * \\\n tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)\n wh_loss = obj_mask * box_loss_scale * \\\n tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)\n obj_loss = binary_crossentropy(true_obj, pred_obj)\n obj_loss = obj_mask * obj_loss + \\\n (1 - obj_mask) * ignore_mask * obj_loss\n # sparse_categorical_crossentropy will always output 0 when number of classes is 1,\n # so convert true_class into one hot label and use binary_crossentropy.\n true_class_one_hot = tf.one_hot(\n tf.cast(true_class_idx[..., 0], tf.int32), classes)\n class_loss = obj_mask * binary_crossentropy(\n true_class_one_hot, pred_class)\n\n # 6. sum over (batch, gridx, gridy, anchors) => (batch, 1)\n xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))\n wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))\n obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))\n class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))\n\n return xy_loss + wh_loss + obj_loss + class_loss\n return yolo_loss\n" ]
[ [ "tensorflow.concat", "tensorflow.stack", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.keras.layers.ZeroPadding2D", "tensorflow.keras.layers.Concatenate", "tensorflow.keras.layers.LeakyReLU", "tensorflow.keras.regularizers.l2", "tensorflow.keras.layers.UpSampling2D", "tensorflow.squeeze", "tensorflow.square", "tensorflow.keras.layers.Add", "tensorflow.math.is_inf", "tensorflow.shape", "tensorflow.exp", "tensorflow.keras.Model", "tensorflow.zeros_like", "tensorflow.keras.losses.binary_crossentropy", "tensorflow.split", "numpy.array", "tensorflow.range", "tensorflow.sigmoid", "tensorflow.keras.layers.MaxPool2D", "tensorflow.math.log", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
P-Hidringer/tensorflow
[ "c8731009708d4694fc553562a267d75064fc5ab4" ]
[ "tensorflow/contrib/lite/testing/generate_examples.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Generate a series of TensorFlow graphs that become tflite test cases.\n\nUsage:\n\ngenerate_examples <output directory>\n\nbazel run //tensorflow/contrib/lite/testing:generate_examples\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport itertools\nimport os\nimport re\nimport sys\nimport tempfile\nimport traceback\nimport zipfile\nimport numpy as np\nfrom six import StringIO\nfrom six.moves import xrange\n\n# TODO(aselle): Disable GPU for now\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n# pylint: disable=g-import-not-at-top\nimport tensorflow as tf\nfrom google.protobuf import text_format\n# TODO(aselle): switch to TensorFlow's resource_loader\nfrom tensorflow.contrib.lite.testing import generate_examples_report as report_lib\nfrom tensorflow.python.framework import graph_util as tf_graph_util\nfrom tensorflow.python.ops import rnn\n\nparser = argparse.ArgumentParser(description=\"Script to generate TFLite tests.\")\nparser.add_argument(\"output_path\",\n help=\"Directory where the outputs will be go.\")\nparser.add_argument(\"--zip_to_output\",\n type=str,\n help=\"Particular zip to output.\",\n required=False)\nparser.add_argument(\"--toco\",\n type=str,\n help=\"Path to toco tool.\",\n required=True)\nparser.add_argument(\n \"--known_bugs_are_errors\",\n action=\"store_true\",\n help=(\"If a particular model is affected by a known bug,\"\n \" count it as a toco error.\"))\nparser.add_argument(\n \"--ignore_toco_errors\",\n action=\"store_true\",\n help=\"Raise an exception if any toco error is encountered.\")\nparser.add_argument(\n \"--save_graphdefs\",\n action=\"store_true\",\n help=\"Include intermediate graphdefs in the output zip files.\")\n\n\nRANDOM_SEED = 342\nTEST_INPUT_DEPTH = 3\n\n\n# A map from regular expression to bug number. Any test failure with label\n# matching the expression will be considered due to the corresponding bug.\nKNOWN_BUGS = {\n # TOCO doesn't support scalars as input.\n r\"relu.*input_shape=\\[\\]\": \"67587484\",\n r\"sigmoid.*input_shape=\\[\\]\": \"67645668\",\n # Concat doesn't work with a single input tensor\n r\"concat.*num_tensors=1\": \"67378344\",\n # Transposition in MatMul is not supported.\n r\"fully_connected.*transpose_.=True\": \"67586970\",\n # Softmax graphs are too complex.\n r\"softmax.*dim=0\": \"67749831\",\n r\"softmax.*input_shape=\\[1,3,4,3\\]\": \"67749831\",\n # SpaceToDepth only supports float32.\n r\"space_to_depth.*(float16|int32|uint8|int64)\": \"68018134\",\n # BatchToSpaceND only supports 4D tensors.\n r\"batch_to_space_nd.*input_shape=\\[8,2,2,2,1,1\\]\": \"70594733\",\n # Div will use floordiv.\n r\"div.*int32\": \"72051395\",\n # TOCO require matching dimensions in strided_slice.\n r\"strided_slice.*begin=\\[0\\].*end=\\[1\\].*\": \"73170889\",\n # No support for SplitV\n r\"split.*num_or_size_splits=\\[2,2\\]\": \"73377559\",\n # Needs support for dimensions other than the last one in argmax.\n r\"arg_max.*axis=0.*\": \"77546240\",\n r\"arg_max.*axis=1.*\": \"77546240\",\n r\"arg_max.*axis=2.*\": \"77546240\",\n}\n\n\nclass ExtraTocoOptions(object):\n \"\"\"Additional toco options besides input, output, shape.\"\"\"\n\n def __init__(self):\n # Whether to ignore control dependency nodes.\n self.drop_control_dependency = False\n # Allow custom ops in the toco conversion.\n self.allow_custom_ops = False\n # Rnn states that are used to support rnn / lstm cells.\n self.rnn_states = None\n\n\ndef toco_options(data_types,\n input_arrays,\n output_arrays,\n shapes,\n extra_toco_options=ExtraTocoOptions()):\n \"\"\"Create TOCO options to process a model.\n\n Args:\n data_types: input and inference types used by TOCO.\n input_arrays: names of the input tensors\n output_arrays: name of the output tensors\n shapes: shapes of the input tensors\n extra_toco_options: additional toco options\n Returns:\n the options in a string.\n \"\"\"\n shape_str = \":\".join([\",\".join(str(y) for y in x) for x in shapes])\n inference_type = \"FLOAT\"\n # TODO(ahentz): if we get multi-input quantization to work we need this\n # to change\n if data_types[0] == \"QUANTIZED_UINT8\":\n inference_type = \"QUANTIZED_UINT8\"\n s = (\" --input_data_types=%s\" % \",\".join(data_types) +\n \" --inference_type=%s\" % inference_type +\n \" --input_format=TENSORFLOW_GRAPHDEF\" + \" --output_format=TFLITE\" +\n \" --input_arrays=%s\" % \",\".join(input_arrays) +\n \" --input_shapes=%s\" % shape_str +\n \" --output_arrays=%s\" % \",\".join(output_arrays))\n if extra_toco_options.drop_control_dependency:\n s += \" --drop_control_dependency\"\n if extra_toco_options.allow_custom_ops:\n s += \" --allow_custom_ops\"\n if extra_toco_options.rnn_states:\n s += (\" --rnn_states='\" + extra_toco_options.rnn_states + \"'\")\n return s\n\n\ndef write_examples(fp, examples):\n \"\"\"Given a list `examples`, write a text format representation.\n\n The file format is csv like with a simple repeated pattern. We would ike\n to use proto here, but we can't yet due to interfacing with the Android\n team using this format.\n\n Args:\n fp: File-like object to write to.\n examples: Example dictionary consiting of keys \"inputs\" and \"outputs\"\n \"\"\"\n\n def write_tensor(fp, x):\n \"\"\"Write tensor in file format supported by TFLITE example.\"\"\"\n fp.write(\"dtype,%s\\n\" % x.dtype)\n fp.write(\"shape,\" + \",\".join(map(str, x.shape)) + \"\\n\")\n # Output 9 digits after the point to ensure the precision is good enough.\n values = [\"{:.9f}\".format(value) for value in list(x.flatten())]\n fp.write(\"values,\" + \",\".join(values) + \"\\n\")\n\n fp.write(\"test_cases,%d\\n\" % len(examples))\n for example in examples:\n fp.write(\"inputs,%d\\n\" % len(example[\"inputs\"]))\n for i in example[\"inputs\"]:\n write_tensor(fp, i)\n fp.write(\"outputs,%d\\n\" % len(example[\"outputs\"]))\n for i in example[\"outputs\"]:\n write_tensor(fp, i)\n\n\ndef write_test_cases(fp, model_name, examples):\n \"\"\"Given a dictionary of `examples`, write a text format representation.\n\n The file format is protocol-buffer-like, even though we don't use proto due\n to the needs of the Android team.\n\n Args:\n fp: File-like object to write to.\n model_name: Filename where the model was written to, relative to filename.\n examples: Example dictionary consiting of keys \"inputs\" and \"outputs\"\n \"\"\"\n\n fp.write(\"load_model: %s\\n\" % os.path.basename(model_name))\n for example in examples:\n fp.write(\"reshape {\\n\")\n for t in example[\"inputs\"]:\n fp.write(\" input: \\\"\" + \",\".join(map(str, t.shape)) + \"\\\"\\n\")\n fp.write(\"}\\n\")\n fp.write(\"invoke {\\n\")\n\n for t in example[\"inputs\"]:\n values = [\"{:.9f}\".format(value) for value in list(t.flatten())]\n fp.write(\" input: \\\"\" + \",\".join(values) + \"\\\"\\n\")\n for t in example[\"outputs\"]:\n values = [\"{:.9f}\".format(value) for value in list(t.flatten())]\n fp.write(\" output: \\\"\" + \",\".join(values) + \"\\\"\\n\")\n fp.write(\"}\\n\")\n\n\n_TF_TYPE_INFO = {\n tf.float32: (np.float32, \"FLOAT\"),\n tf.float16: (np.float16, \"FLOAT\"),\n tf.int32: (np.int32, \"INT32\"),\n tf.uint8: (np.uint8, \"QUANTIZED_UINT8\"),\n tf.int64: (np.int64, \"INT64\"),\n}\n\n\ndef create_tensor_data(dtype, shape, min_value=-100, max_value=100):\n \"\"\"Build tensor data spreading the range [min_value, max_value).\"\"\"\n\n if dtype in _TF_TYPE_INFO:\n dtype = _TF_TYPE_INFO[dtype][0]\n\n if dtype in (tf.float32, tf.float16):\n value = (max_value-min_value)*np.random.random_sample(shape)+min_value\n elif dtype in (tf.int32, tf.uint8, tf.int64):\n value = np.random.randint(min_value, max_value+1, shape)\n return value.astype(dtype)\n\n\ndef freeze_graph(session, outputs):\n \"\"\"Freeze the current graph.\n\n Args:\n session: Tensorflow sessions containing the graph\n outputs: List of output tensors\n\n Returns:\n The frozen graph_def.\n \"\"\"\n return tf_graph_util.convert_variables_to_constants(\n session, session.graph.as_graph_def(), [x.op.name for x in outputs])\n\n\ndef make_control_dep_tests(zip_path):\n \"\"\"Make a set of tests that use control dependencies.\"\"\"\n\n test_parameters = [{\n \"input_shape\": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n filter_value = tf.zeros((3, 3, TEST_INPUT_DEPTH, 8), tf.float32)\n assert_op = tf.assert_greater_equal(input_tensor, input_tensor - 1)\n with tf.control_dependencies([assert_op]):\n out = tf.nn.conv2d(input_tensor, filter_value,\n strides=(1, 1, 1, 1), padding=\"SAME\")\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(tf.float32, parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n extra_toco_options = ExtraTocoOptions()\n extra_toco_options.drop_control_dependency = True\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs,\n extra_toco_options)\n\n\ndef toco_convert(graph_def_str, input_tensors, output_tensors,\n extra_toco_options):\n \"\"\"Convert a model's graph def into a tflite model.\n\n NOTE: this currently shells out to the toco binary, but we would like\n convert to Python API tooling in the future.\n\n Args:\n graph_def_str: Graph def proto in serialized string format.\n input_tensors: List of input tensor tuples `(name, shape, type)`.\n output_tensors: List of output tensors (names).\n extra_toco_options: Additional toco options.\n\n Returns:\n output tflite model, log_txt from conversion\n or None, log_txt if it did not convert properly.\n \"\"\"\n data_types = [_TF_TYPE_INFO[x[2]][1] for x in input_tensors]\n opts = toco_options(\n data_types=data_types,\n input_arrays=[x[0] for x in input_tensors],\n shapes=[x[1] for x in input_tensors],\n output_arrays=output_tensors,\n extra_toco_options=extra_toco_options)\n\n with tempfile.NamedTemporaryFile() as graphdef_file, \\\n tempfile.NamedTemporaryFile() as output_file, \\\n tempfile.NamedTemporaryFile(\"w+\") as stdout_file:\n graphdef_file.write(graph_def_str)\n graphdef_file.flush()\n\n # TODO(aselle): Switch this to subprocess at some point.\n cmd = (\"%s --input_file=%s --output_file=%s %s > %s 2>&1\" %\n (bin_path, graphdef_file.name, output_file.name, opts,\n stdout_file.name))\n exit_code = os.system(cmd)\n log = (\n cmd + \"exited with code %d\" % exit_code + \"\\n------------------\\n\" +\n stdout_file.read())\n return (None if exit_code != 0 else output_file.read()), log\n\n\ndef normalize_output_name(output_name):\n \"\"\"Remove :0 suffix from tensor names.\"\"\"\n return output_name.split(\":\")[0] if output_name.endswith(\n \":0\") else output_name\n\n\ndef make_zip_of_tests(zip_path,\n test_parameters,\n make_graph,\n make_test_inputs,\n extra_toco_options=ExtraTocoOptions(),\n use_frozen_graph=False):\n \"\"\"Helper to make a zip file of a bunch of TensorFlow models.\n\n This does a cartestian product of the dictionary of test_parameters and\n calls make_graph() for each item in the cartestian product set.\n If the graph is built successfully, then make_test_inputs() is called to\n build expected input/output value pairs. The model is then converted to tflite\n with toco, and the examples are serialized with the tflite model into a zip\n file (2 files per item in the cartesian product set).\n\n Args:\n zip_path: Path of zip file to write\n test_parameters: Dictionary mapping to lists for each parameter.\n e.g. `{\"strides\": [[1,3,3,1], [1,2,2,1]], \"foo\": [1.2, 1.3]}`\n make_graph: function that takes current parameters and returns tuple\n `[input1, input2, ...], [output1, output2, ...]`\n make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,\n `output_tensors` and returns tuple `(input_values, output_values)`.\n extra_toco_options: Additional toco options.\n use_frozen_graph: Whether or not freeze graph before toco converter.\n\n Raises:\n RuntimeError: if there are toco errors that can't be ignored.\n \"\"\"\n\n # TODO(aselle): Make this allow multiple inputs outputs.\n archive = zipfile.PyZipFile(zip_path, \"w\")\n zip_manifest = []\n convert_report = []\n toco_errors = 0\n for parameters in test_parameters:\n keys = parameters.keys()\n for curr in itertools.product(*parameters.values()):\n label = zip_path.replace(\".zip\", \"\") + (\",\".join(\n \"%s=%r\" % z for z in sorted(zip(keys, curr))).replace(\" \", \"\"))\n if label[0] == \"/\":\n label = label[1:]\n param_dict = dict(zip(keys, curr))\n\n def build_example(label, param_dict_real):\n \"\"\"Build the model with parameter values set in param_dict_real.\n\n Args:\n label: Label of the model (i.e. the filename in the zip).\n param_dict_real: Parameter dictionary (arguments to the factories\n make_graph and make_test_inputs)\n Returns:\n (tflite_model_binary, report) where tflite_model_binary is the\n serialized flatbuffer as a string and report is a dictionary with\n keys `toco_log` (log of toco conversion), `tf_log` (log of tf\n conversion), `toco` (a string of success status of the conversion),\n `tf` (a string success status of the conversion).\n \"\"\"\n\n np.random.seed(RANDOM_SEED)\n report = {\"toco\": report_lib.NOTRUN, \"tf\": report_lib.FAILED}\n\n # Build graph\n report[\"tf_log\"] = \"\"\n report[\"toco_log\"] = \"\"\n tf.reset_default_graph()\n\n with tf.device(\"/cpu:0\"):\n try:\n inputs, outputs = make_graph(param_dict_real)\n except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,\n ValueError):\n report[\"tf_log\"] += traceback.format_exc()\n return None, report\n\n sess = tf.Session()\n try:\n baseline_inputs, baseline_outputs = (make_test_inputs(\n param_dict_real, sess, inputs, outputs))\n except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,\n ValueError):\n report[\"tf_log\"] += traceback.format_exc()\n return None, report\n report[\"toco\"] = report_lib.FAILED\n report[\"tf\"] = report_lib.SUCCESS\n # Convert graph to toco\n input_tensors = [(input_tensor.name.split(\":\")[0],\n input_tensor.get_shape(), input_tensor.dtype)\n for input_tensor in inputs]\n output_tensors = [normalize_output_name(out.name) for out in outputs]\n graph_def = freeze_graph(\n sess,\n tf.global_variables() + inputs +\n outputs) if use_frozen_graph else sess.graph_def\n tflite_model_binary, toco_log = toco_convert(\n graph_def.SerializeToString(), input_tensors, output_tensors,\n extra_toco_options)\n report[\"toco\"] = (report_lib.SUCCESS if tflite_model_binary is not None\n else report_lib.FAILED)\n report[\"toco_log\"] = toco_log\n\n if FLAGS.save_graphdefs:\n archive.writestr(label + \".pb\",\n text_format.MessageToString(graph_def),\n zipfile.ZIP_DEFLATED)\n\n if tflite_model_binary:\n archive.writestr(label + \".bin\", tflite_model_binary,\n zipfile.ZIP_DEFLATED)\n example = {\"inputs\": baseline_inputs, \"outputs\": baseline_outputs}\n\n example_fp = StringIO()\n write_examples(example_fp, [example])\n archive.writestr(label + \".inputs\",\n example_fp.getvalue(), zipfile.ZIP_DEFLATED)\n\n example_fp2 = StringIO()\n write_test_cases(example_fp2, label + \".bin\", [example])\n archive.writestr(label + \"_tests.txt\",\n example_fp2.getvalue(), zipfile.ZIP_DEFLATED)\n\n zip_manifest.append(label + \"\\n\")\n\n return tflite_model_binary, report\n\n _, report = build_example(label, param_dict)\n\n if report[\"toco\"] == report_lib.FAILED:\n ignore_error = False\n if not FLAGS.known_bugs_are_errors:\n for pattern, bug_number in KNOWN_BUGS.items():\n if re.search(pattern, label):\n print(\"Ignored TOCO error due to bug %s\" % bug_number)\n ignore_error = True\n if not ignore_error:\n toco_errors += 1\n print(\"-----------------\\ntoco error!\\n%s\\n-----------------\\n\" %\n report[\"toco_log\"])\n\n convert_report.append((param_dict, report))\n report_io = StringIO()\n report_lib.make_report_table(report_io, zip_path, convert_report)\n archive.writestr(\"report.html\", report_io.getvalue())\n\n archive.writestr(\"manifest.txt\", \"\".join(zip_manifest), zipfile.ZIP_DEFLATED)\n\n # Log statistics of what succeeded\n total_conversions = len(convert_report)\n tf_success = sum(1 for x in convert_report\n if x[1][\"tf\"] == report_lib.SUCCESS)\n toco_success = sum(1 for x in convert_report\n if x[1][\"toco\"] == report_lib.SUCCESS)\n percent = 0\n if tf_success > 0:\n percent = float(toco_success) / float(tf_success) * 100.\n tf.logging.info((\"Archive %s Considered %d graphs, %d TF evaluated graphs \"\n \" and %d TOCO converted graphs (%.1f%%\"), zip_path,\n total_conversions, tf_success, toco_success, percent)\n\n if not FLAGS.ignore_toco_errors and toco_errors > 0:\n raise RuntimeError(\n \"Found %d errors while generating toco models\" % toco_errors)\n\n\ndef make_pool_tests(pool_op_in):\n \"\"\"Make a set of tests to do average pooling.\n\n Args:\n pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool`.\n\n Returns:\n A function representing the true generator (after curried pool_op_in).\n \"\"\"\n\n pool_op = pool_op_in\n\n def f(zip_path):\n \"\"\"Actual function that generates examples.\n\n Args:\n zip_path: path to write zip to.\n \"\"\"\n\n # Chose a set of parameters\n test_parameters = [{\n \"ksize\": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],\n \"strides\": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],\n # TODO(aselle): should add in a degenerate shape (e.g. [1, 0, 1, 1]).\n \"input_shape\": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],\n \"padding\": [\"SAME\", \"VALID\"],\n \"data_format\": [\"NHWC\"], # TODO(aselle): NCHW would be good\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n out = pool_op(\n input_tensor,\n ksize=parameters[\"ksize\"],\n strides=parameters[\"strides\"],\n data_format=parameters[\"data_format\"],\n padding=parameters[\"padding\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(tf.float32, parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n return f\n\n\ndef make_l2_pool_tests(zip_path):\n make_pool_tests(make_l2_pool)(zip_path)\n\n\ndef make_avg_pool_tests(zip_path):\n make_pool_tests(tf.nn.avg_pool)(zip_path)\n\n\ndef make_max_pool_tests(zip_path):\n make_pool_tests(tf.nn.max_pool)(zip_path)\n\n\ndef make_relu_tests(zip_path):\n \"\"\"Make a set of tests to do relu.\"\"\"\n\n # Chose a set of parameters\n test_parameters = [{\n \"input_shape\": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],\n [3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n out = tf.nn.relu(input_tensor)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(\n np.float32, parameters[\"input_shape\"], min_value=-4, max_value=10)\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_relu1_tests(zip_path):\n \"\"\"Make a set of tests to do relu1.\"\"\"\n\n # Chose a set of parameters\n test_parameters = [{\n \"input_shape\": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],\n [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n # Note that the following is not supported:\n # out = tf.maximum(-1.0, tf.minimum(input_tensor, 1.0))\n out = tf.minimum(1.0, tf.maximum(input_tensor, -1.0))\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(\n np.float32, parameters[\"input_shape\"], min_value=-3, max_value=10)\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_relu6_tests(zip_path):\n \"\"\"Make a set of tests to do relu6.\"\"\"\n\n # Chose a set of parameters\n test_parameters = [{\n \"input_shape\": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],\n [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n out = tf.nn.relu(input_tensor)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(\n np.float32, parameters[\"input_shape\"], min_value=-3, max_value=10)\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\n# This function tests various TensorFLow functions that generates Const op,\n# including `tf.ones`, `tf.zeros` and random functions.\ndef make_constant_tests(zip_path):\n \"\"\"Make a set of tests to do constant ops.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[1], [2], [1, 1, 1, 1], [2, 2, 2, 2]],\n }]\n\n def build_graph(parameters):\n # Since Toco & Tflite can't have a single constant op in the entire graph,\n # this test adds a zero tensor with a constant op tensor.\n input1 = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input1\",\n shape=parameters[\"input_shape\"])\n out = tf.ones(parameters[\"input_shape\"], dtype=parameters[\"dtype\"]) + input1\n return [input1], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input1 = np.zeros(parameters[\"input_shape\"],\n dtype=_TF_TYPE_INFO[parameters[\"dtype\"]][0])\n return [input1], sess.run(outputs, feed_dict={inputs[0]: input1})\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_binary_op_tests(zip_path, binary_operator):\n \"\"\"Make a set of tests to do add with and without broadcast.\"\"\"\n\n # These parameters are split because we don't support broadcasting.\n test_parameters = [{\n \"dtype\": [tf.float32, tf.int32],\n \"input_shape_1\": [[1, 3, 4, 3]],\n \"input_shape_2\": [[1, 3, 4, 3]],\n \"activation\": [True]\n }, {\n \"dtype\": [tf.float32],\n \"input_shape_1\": [[5]],\n \"input_shape_2\": [[5]],\n \"activation\": [False, True]\n }, {\n \"dtype\": [tf.float32],\n \"input_shape_1\": [[1, 3, 4, 3]],\n \"input_shape_2\": [[3]],\n \"activation\": [True]\n }]\n\n def build_graph(parameters):\n \"\"\"Builds the graph given the current parameters.\"\"\"\n input1 = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input1\",\n shape=parameters[\"input_shape_1\"])\n input2 = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input2\",\n shape=parameters[\"input_shape_2\"])\n out = binary_operator(input1, input2)\n if parameters[\"activation\"]:\n out = tf.nn.relu(out)\n return [input1, input2], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n \"\"\"Builds operand inputs for op.\"\"\"\n input1 = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape_1\"])\n input2 = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape_2\"])\n return [input1, input2], sess.run(\n outputs, feed_dict={\n inputs[0]: input1,\n inputs[1]: input2\n })\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_mean_tests(zip_path):\n \"\"\"Make a set of tests to do mean.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32, tf.int32, tf.int64],\n \"input_shape\": [[3, 2, 4]],\n \"axis\": [\n None, 0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],\n [2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1], [-1, 0],\n [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3]\n ],\n \"const_axis\": [True, False],\n \"keepdims\": [True, False],\n }, {\n \"input_dtype\": [tf.float32, tf.int32, tf.int64],\n \"input_shape\": [[1, 224, 224, 3]],\n \"axis\": [\n None, 0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2, 3],\n [3, 2, 1, 0], [3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2,\n -3, -4, [0, -2], [2, 3, -1, 0], [3, 1, 2, -3], [3, -4], [2, 2, 2],\n [2, 2, 3], [-3, -3, -4], [-3, 2, 1]\n ],\n \"const_axis\": [True, False],\n \"keepdims\": [True, False],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the mean op testing graph.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n\n # Get axis as either a placeholder or constants.\n if parameters[\"const_axis\"]:\n axis = parameters[\"axis\"]\n input_tensors = [input_tensor]\n else:\n if isinstance(parameters[\"axis\"], list):\n shape = [len(parameters[\"axis\"])]\n else:\n shape = [0] # shape for None or integers.\n axis = tf.placeholder(dtype=tf.int32, name=\"axis\", shape=shape)\n input_tensors = [input_tensor, axis]\n\n out = tf.reduce_mean(\n input_tensor, axis=axis, keepdims=parameters[\"keepdims\"])\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"input_dtype\"], parameters[\"input_shape\"])\n ]\n if not parameters[\"const_axis\"]:\n if parameters[\"axis\"]:\n values.append(np.array(parameters[\"axis\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_exp_tests(zip_path):\n \"\"\"Make a set of tests to do exp.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32],\n \"input_shape\": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the exp op testing graph.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n\n out = tf.exp(input_tensor)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"input_dtype\"], parameters[\"input_shape\"],\n min_value=-100, max_value=9)\n ]\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_log_softmax_tests(zip_path):\n \"\"\"Make a set of tests to do log_softmax.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32],\n \"input_shape\": [[1, 100], [4, 2], [5, 224]],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the log_softmax op testing graph.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n\n out = tf.nn.log_softmax(input_tensor)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(\n parameters[\"input_dtype\"],\n parameters[\"input_shape\"],\n min_value=-100,\n max_value=9)\n ]\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_maximum_tests(zip_path):\n \"\"\"Make a set of tests to do maximum.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32],\n \"input_shape_1\": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],\n \"input_shape_2\": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the maximum op testing graph.\"\"\"\n input_tensor_1 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input_1\",\n shape=parameters[\"input_shape_1\"])\n input_tensor_2 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input_2\",\n shape=parameters[\"input_shape_2\"])\n\n out = tf.maximum(input_tensor_1, input_tensor_2)\n return [input_tensor_1, input_tensor_2], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_1\"]),\n create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_2\"])\n ]\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_minimum_tests(zip_path):\n \"\"\"Make a set of tests to do minimum.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32],\n \"input_shape_1\": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],\n \"input_shape_2\": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the minimum op testing graph.\"\"\"\n input_tensor_1 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input_1\",\n shape=parameters[\"input_shape_1\"])\n input_tensor_2 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input_2\",\n shape=parameters[\"input_shape_2\"])\n\n out = tf.minimum(input_tensor_1, input_tensor_2)\n return [input_tensor_1, input_tensor_2], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_1\"]),\n create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_2\"])\n ]\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_binary_op_tests_func(binary_operator):\n \"\"\"Return a function that does a test on a binary operator.\"\"\"\n return lambda zip_path: make_binary_op_tests(zip_path, binary_operator)\n\n\ndef make_add_tests(zip_path):\n make_binary_op_tests(zip_path, tf.add)\n\n\ndef make_div_tests(zip_path):\n make_binary_op_tests(zip_path, tf.div)\n\n\ndef make_sub_tests(zip_path):\n make_binary_op_tests(zip_path, tf.subtract)\n\n\ndef make_mul_tests(zip_path):\n make_binary_op_tests(zip_path, tf.multiply)\n\n\ndef make_gather_tests(zip_path):\n \"\"\"Make a set of tests to do gather.\"\"\"\n\n test_parameters = [{\n # TODO(mgubin): add string tests when they are supported by Toco.\n # TODO(mgubin): add tests for Nd indices when they are supported by\n # TfLite.\n \"params_dtype\": [tf.float32, tf.int32],\n \"params_shape\": [[10], [1, 2, 20]],\n \"indices_dtype\": [tf.int32],\n \"indices_shape\": [[3], [5]],\n \"axis\": [0, 1],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the gather op testing graph.\"\"\"\n params = tf.placeholder(\n dtype=parameters[\"params_dtype\"],\n name=\"params\",\n shape=parameters[\"params_shape\"])\n indices = tf.placeholder(\n dtype=parameters[\"indices_dtype\"],\n name=\"indices\",\n shape=parameters[\"indices_shape\"])\n out = tf.gather(params, indices, axis=parameters[\"axis\"])\n return [params, indices], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n params = create_tensor_data(parameters[\"params_dtype\"],\n parameters[\"params_shape\"])\n indices = create_tensor_data(parameters[\"indices_dtype\"],\n parameters[\"indices_shape\"], 0,\n parameters[\"params_shape\"][0] - 1)\n return [params, indices], sess.run(\n outputs, feed_dict=dict(zip(inputs, [params, indices])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_global_batch_norm_tests(zip_path):\n \"\"\"Make a set of tests to do batch_norm_with_global_normalization.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 1, 6, 2], [3, 4, 5, 4]],\n \"epsilon\": [0.1, 0.0001],\n \"scale_after\": [True, False],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the global batch norm testing graph.\"\"\"\n input_shape = parameters[\"input_shape\"]\n scale_shape = input_shape[3]\n\n scale = create_tensor_data(parameters[\"dtype\"], scale_shape)\n offset = create_tensor_data(parameters[\"dtype\"], scale_shape)\n mean = create_tensor_data(parameters[\"dtype\"], scale_shape)\n variance = create_tensor_data(parameters[\"dtype\"], scale_shape)\n\n x = create_tensor_data(parameters[\"dtype\"], parameters[\"input_shape\"])\n x_norm = tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, scale, offset,\n parameters[\"epsilon\"], parameters[\"scale_after\"])\n\n input_tensor = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.add(input_tensor, x_norm)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_value], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_fused_batch_norm_tests(zip_path):\n \"\"\"Make a set of tests to do fused_batch_norm.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 1, 6, 2]],\n \"epsilon\": [0.001, 0.1],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the testing graph for fused batch normalization.\"\"\"\n input_shape = parameters[\"input_shape\"]\n scale_shape = input_shape[3]\n\n scale = create_tensor_data(parameters[\"dtype\"], scale_shape)\n offset = create_tensor_data(parameters[\"dtype\"], scale_shape)\n mean = create_tensor_data(parameters[\"dtype\"], scale_shape)\n variance = create_tensor_data(parameters[\"dtype\"], scale_shape)\n\n x = create_tensor_data(parameters[\"dtype\"], parameters[\"input_shape\"])\n [x_norm, _, _] = tf.nn.fused_batch_norm(\n x, scale, offset, mean, variance,\n parameters[\"epsilon\"], data_format=\"NHWC\", is_training=False)\n\n input_tensor = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.add(input_tensor, x_norm)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_value], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_conv_tests(zip_path):\n \"\"\"Make a set of tests to do convolution.\"\"\"\n\n test_parameters = [\n {\n \"input_shape\": [[1, 3, 4, 3]],\n \"filter_shape\": [[1, 1, 3, 2]],\n \"strides\": [[1, 1, 1, 1], [1, 2, 3, 1]],\n \"dilations\": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],\n \"padding\": [\"SAME\", \"VALID\"],\n \"data_format\": [\"NHWC\"], # TODO(aselle): NCHW would be good\n \"constant_filter\": [True, False],\n },\n {\n \"input_shape\": [[2, 14, 14, 2]],\n \"filter_shape\": [[6, 6, 2, 2]],\n \"strides\": [[1, 1, 1, 1], [1, 2, 3, 1]],\n \"dilations\": [[1, 1, 1, 1], [1, 2, 2, 1]],\n \"padding\": [\"SAME\", \"VALID\"],\n \"data_format\": [\"NHWC\"], # TODO(aselle): NCHW would be good\n \"constant_filter\": [True, False],\n }\n ]\n\n def build_graph(parameters):\n \"\"\"Build a conv graph given `parameters`.\"\"\"\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n\n # Get filter input either as a placeholder or constants. Also get a list of\n # the input tensors that are represented as placeholders.\n if parameters[\"constant_filter\"]:\n filter_input = create_tensor_data(np.float32, parameters[\"filter_shape\"])\n input_tensors = [input_tensor]\n else:\n filter_input = tf.placeholder(\n dtype=tf.float32, name=\"filter\", shape=parameters[\"filter_shape\"])\n input_tensors = [input_tensor, filter_input]\n\n out = tf.nn.conv2d(\n input_tensor,\n filter_input,\n strides=parameters[\"strides\"],\n dilations=parameters[\"dilations\"],\n padding=parameters[\"padding\"],\n data_format=parameters[\"data_format\"])\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n # Build list of input values either containing 1 tensor (input) or 2 tensors\n # (input, filter) based on whether filter is constant or variable input.\n values = [create_tensor_data(np.float32, parameters[\"input_shape\"])]\n if not parameters[\"constant_filter\"]:\n values.append(create_tensor_data(np.float32, parameters[\"filter_shape\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_depthwiseconv_tests(zip_path):\n \"\"\"Make a set of tests to do convolution.\"\"\"\n\n # Tensorflow only supports equal strides\n test_parameters = [\n {\n \"input_shape\": [[1, 3, 4, 3], [1, 10, 10, 3]],\n \"filter_size\": [[1, 1], [1, 2], [3, 3]],\n \"strides\": [[1, 1, 1, 1], [1, 3, 3, 1]],\n \"channel_multiplier\": [1, 2],\n \"rate\": [[1, 1]],\n \"padding\": [\"SAME\", \"VALID\"],\n \"data_format\": [\"NHWC\"],\n \"constant_filter\": [True, False],\n },\n {\n \"input_shape\": [[1, 3, 4, 3]],\n \"filter_size\": [[1, 1]],\n \"strides\": [[1, 1, 2, 1]], # TF needs [1, x, x, 1]\n \"channel_multiplier\": [2],\n \"rate\": [[2, 2]], # Only [1, 1] is supported\n \"padding\": [\"SAME\"],\n \"data_format\": [\"NHWC\"],\n \"constant_filter\": [True, False],\n }\n ]\n\n def get_tensor_shapes(parameters):\n input_shape = parameters[\"input_shape\"]\n filter_size = parameters[\"filter_size\"]\n filter_shape = filter_size + [\n input_shape[3], parameters[\"channel_multiplier\"]\n ]\n return [input_shape, filter_shape]\n\n def build_graph(parameters):\n \"\"\"Build a depthwise conv graph given `parameters`.\"\"\"\n input_shape, filter_shape = get_tensor_shapes(parameters)\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=input_shape)\n\n # Get filter input either as a placeholder or constants. Also get a list of\n # the input tensors that are represented as placeholders.\n if parameters[\"constant_filter\"]:\n filter_input = create_tensor_data(np.float32, filter_shape)\n input_tensors = [input_tensor]\n else:\n filter_input = tf.placeholder(\n dtype=tf.float32, name=\"filter\", shape=filter_shape)\n input_tensors = [input_tensor, filter_input]\n\n out = tf.nn.depthwise_conv2d(\n input_tensor,\n filter_input,\n strides=parameters[\"strides\"],\n rate=parameters[\"rate\"],\n padding=parameters[\"padding\"],\n data_format=parameters[\"data_format\"])\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n # Build list of input values either containing 1 tensor (input) or 2 tensors\n # (input, filter) based on whether filter is constant or variable input.\n input_shape, filter_shape = get_tensor_shapes(parameters)\n values = [create_tensor_data(np.float32, input_shape)]\n if not parameters[\"constant_filter\"]:\n values.append(create_tensor_data(np.float32, filter_shape))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_split_tests(zip_path):\n \"\"\"Make a set of tests to do tf.split.\"\"\"\n\n test_parameters = [{\n \"input_shape\": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]],\n \"num_or_size_splits\": [1, 2, 3, 4, 5, [2, 2]],\n \"axis\": [0, 1, 2, 3, -4, -3, -2, -1],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n out = tf.split(\n input_tensor, parameters[\"num_or_size_splits\"], parameters[\"axis\"])\n return [input_tensor], out\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [create_tensor_data(np.float32, parameters[\"input_shape\"])]\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_concat_tests(zip_path):\n \"\"\"Make a set of tests to do concatenation.\"\"\"\n\n test_parameters = [{\n \"base_shape\": [[1, 3, 4, 3], [3, 4]],\n \"num_tensors\": [1, 2, 3, 4, 5, 6],\n \"axis\": [0, 1, 2, 3, -3, -2, -1],\n }]\n\n def get_shape(parameters, delta):\n \"\"\"Return a tweaked version of 'base_shape'.\"\"\"\n axis = parameters[\"axis\"]\n shape = parameters[\"base_shape\"][:]\n if axis < 0:\n axis += len(shape)\n if axis < len(shape):\n shape[axis] += delta\n return shape\n\n def build_graph(parameters):\n all_tensors = []\n for n in range(0, parameters[\"num_tensors\"]):\n input_tensor = tf.placeholder(dtype=tf.float32, name=(\"input%d\" % n),\n shape=get_shape(parameters, n))\n all_tensors.append(input_tensor)\n out = tf.concat(all_tensors, parameters[\"axis\"])\n return all_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n all_values = []\n for n in range(0, parameters[\"num_tensors\"]):\n input_values = create_tensor_data(np.float32,\n get_shape(parameters, n))\n all_values.append(input_values)\n return all_values, sess.run(\n outputs, feed_dict=dict(zip(inputs, all_values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_fully_connected_tests(zip_path):\n \"\"\"Make a set of tests to do fully_connected.\"\"\"\n\n test_parameters = [{\n \"shape1\": [[3, 3]],\n \"shape2\": [[3, 3]],\n \"transpose_a\": [True, False],\n \"transpose_b\": [True, False],\n \"constant_filter\": [True, False],\n }, {\n \"shape1\": [[4, 4], [1, 4], [4]],\n \"shape2\": [[4, 4], [4, 1], [4]],\n \"transpose_a\": [False],\n \"transpose_b\": [False],\n \"constant_filter\": [True, False],\n }, {\n \"shape1\": [[40, 37]],\n \"shape2\": [[37, 40]],\n \"transpose_a\": [False],\n \"transpose_b\": [False],\n \"constant_filter\": [True, False],\n }]\n\n def build_graph(parameters):\n \"\"\"Build a matmul graph given `parameters`.\"\"\"\n input_tensor1 = tf.placeholder(dtype=tf.float32, name=\"input1\",\n shape=parameters[\"shape1\"])\n\n # Get input_tensor2 either as a placeholder or constants. Also get a list of\n # the input tensors that are represented as placeholders.\n if parameters[\"constant_filter\"]:\n input_tensor2 = create_tensor_data(np.float32, parameters[\"shape2\"])\n input_tensors = [input_tensor1]\n else:\n input_tensor2 = tf.placeholder(\n dtype=tf.float32, name=\"input2\", shape=parameters[\"shape2\"])\n input_tensors = [input_tensor1, input_tensor2]\n\n out = tf.matmul(input_tensor1, input_tensor2,\n transpose_a=parameters[\"transpose_a\"],\n transpose_b=parameters[\"transpose_b\"])\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n # Build list of input values either containing 1 tensor (input_values1) or 2\n # tensors (input_values1, input_values2) based on whether the second input\n # is a constant or variable input.\n values = [create_tensor_data(np.float32, shape=parameters[\"shape1\"])]\n if not parameters[\"constant_filter\"]:\n values.append(create_tensor_data(np.float32, parameters[\"shape2\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_l2norm_tests(zip_path):\n \"\"\"Make a set of tests to do l2norm.\"\"\"\n\n # Chose a set of parameters\n test_parameters = [{\n \"input_shape\": [[5, 7], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],\n [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],\n \"dim\": [0, 1, 2, 3, [2, 3], -2],\n \"epsilon\": [None, 1e-12, 1e-3],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n if parameters[\"epsilon\"]:\n out = tf.nn.l2_normalize(\n input_tensor, parameters[\"dim\"], epsilon=parameters[\"epsilon\"])\n else:\n out = tf.nn.l2_normalize(input_tensor, parameters[\"dim\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(\n np.float32, parameters[\"input_shape\"], min_value=-4, max_value=10)\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_local_response_norm_tests(zip_path):\n \"\"\"Make a set of tests to do local_response_norm.\"\"\"\n\n # Chose a set of parameters\n test_parameters = [{\n \"input_shape\": [[1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3]],\n \"depth_radius\": [None, 0, 1, 3, 4, 5],\n \"bias\": [None, 0.1, 0.3, -0.1],\n \"alpha\": [None, 1, 2, -3],\n \"beta\": [None, 0.5, 0.25, 2],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n out = tf.nn.local_response_normalization(\n input_tensor, depth_radius=parameters[\"depth_radius\"],\n bias=parameters[\"bias\"], alpha=parameters[\"alpha\"],\n beta=parameters[\"beta\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(\n np.float32, parameters[\"input_shape\"], min_value=-4, max_value=10)\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_pad_tests(zip_path):\n \"\"\"Make a set of tests to do pad.\"\"\"\n\n # TODO(nupurgarg): Add test for tf.uint8.\n test_parameters = [\n {\n \"dtype\": [tf.int32, tf.int64, tf.float32],\n \"input_shape\": [[1, 1, 2, 1], [2, 1, 1, 1]],\n \"paddings\": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],\n [0, 0], [2, 3]]],\n \"constant_paddings\": [True, False],\n },\n # Non-4D use case.\n {\n \"dtype\": [tf.int32, tf.int64, tf.float32],\n \"input_shape\": [[1, 2], [0, 1, 2]],\n \"paddings\": [[[0, 1], [2, 3]]],\n \"constant_paddings\": [True, False],\n },\n ]\n\n def build_graph(parameters):\n \"\"\"Build a pad graph given `parameters`.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n\n # Get paddings as either a placeholder or constants.\n if parameters[\"constant_paddings\"]:\n paddings = parameters[\"paddings\"]\n input_tensors = [input_tensor]\n else:\n shape = [len(parameters[\"paddings\"]), 2]\n paddings = tf.placeholder(dtype=tf.int32, name=\"padding\", shape=shape)\n input_tensors = [input_tensor, paddings]\n\n out = tf.pad(input_tensor, paddings=paddings)\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"dtype\"], parameters[\"input_shape\"])\n ]\n if not parameters[\"constant_paddings\"]:\n values.append(np.array(parameters[\"paddings\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_padv2_tests(zip_path):\n \"\"\"Make a set of tests to do padv2.\"\"\"\n\n # TODO(nupurgarg): Add test for tf.uint8.\n test_parameters = [\n {\n \"dtype\": [tf.int32, tf.int64, tf.float32],\n \"input_shape\": [[1, 1, 2, 1], [2, 1, 1, 1]],\n \"paddings\": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],\n [0, 0], [2, 3]]],\n \"constant_paddings\": [True, False],\n \"constant_values\": [0, 2],\n },\n # Non-4D use case.\n {\n \"dtype\": [tf.int32, tf.int64, tf.float32],\n \"input_shape\": [[1, 2], [0, 1, 2]],\n \"paddings\": [[[0, 1], [2, 3]]],\n \"constant_paddings\": [True, False],\n \"constant_values\": [0, 2],\n },\n ]\n\n def build_graph(parameters):\n \"\"\"Build a pad graph given `parameters`.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n\n # Get paddings as either a placeholder or constants.\n if parameters[\"constant_paddings\"]:\n paddings = parameters[\"paddings\"]\n input_tensors = [input_tensor]\n else:\n shape = [len(parameters[\"paddings\"]), 2]\n paddings = tf.placeholder(dtype=tf.int32, name=\"padding\", shape=shape)\n input_tensors = [input_tensor, paddings]\n\n out = tf.pad(input_tensor, paddings=paddings,\n constant_values=parameters[\"constant_values\"])\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"dtype\"], parameters[\"input_shape\"])\n ]\n if not parameters[\"constant_paddings\"]:\n values.append(np.array(parameters[\"paddings\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_reshape_tests(zip_path):\n \"\"\"Make a set of tests to do reshape.\"\"\"\n\n # All shapes below are suitable for tensors with 420 elements.\n test_parameters = [{\n \"dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[3, 4, 5, 7], [4, 105], [21, 5, 2, 2], [420]],\n \"output_shape\": [[15, 28], [420], [1, -1, 5, 7], [-1]],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.reshape(input_tensor, shape=parameters[\"output_shape\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_resize_bilinear_tests(zip_path):\n \"\"\"Make a set of tests to do resize_bilinear.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[1, 3, 4, 3], [1, 10, 2, 1]],\n \"size\": [[1, 1], [4, 3], [2, 2], [5, 6]],\n \"align_corners\": [None, True, False],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.image.resize_bilinear(input_tensor, size=parameters[\"size\"],\n align_corners=parameters[\"align_corners\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_sigmoid_tests(zip_path):\n \"\"\"Make a set of tests to do sigmoid.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 3, 4, 3], [4], [], [1, 2, 3, 4, 5, 6]],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.sigmoid(input_tensor)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_softmax_tests(zip_path):\n \"\"\"Make a set of tests to do softmax.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 3, 4, 3], [2, 3]],\n \"dim\": [-1, 0],\n }, {\n \"dtype\": [tf.float32],\n \"input_shape\": [[4, 7]],\n \"dim\": [-1, 1],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.nn.softmax(input_tensor, dim=parameters[\"dim\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_space_to_depth_tests(zip_path):\n \"\"\"Make a set of tests to do space_to_depth.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32, tf.float16, tf.int32, tf.uint8, tf.int64],\n \"input_shape\": [[2, 12, 24, 1]],\n \"block_size\": [2, 3, 4],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.space_to_depth(input_tensor, block_size=parameters[\"block_size\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_space_to_batch_nd_tests(zip_path):\n \"\"\"Make a set of tests to do space_to_batch_nd.\"\"\"\n\n # TODO(nupurgarg): Add test for uint8.\n test_parameters = [\n {\n \"dtype\": [tf.int32, tf.int64, tf.float32],\n \"input_shape\": [[1, 2, 2, 3], [2, 2, 4, 1]],\n \"block_shape\": [[1, 3], [2, 2]],\n \"paddings\": [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]],\n \"constant_block_shape\": [True, False],\n \"constant_paddings\": [True, False],\n },\n {\n \"dtype\": [tf.float32],\n \"input_shape\": [[2, 3, 7, 3]],\n \"block_shape\": [[1, 3], [2, 2]],\n \"paddings\": [[[0, 0], [2, 0]], [[1, 0], [1, 0]]],\n \"constant_block_shape\": [True, False],\n \"constant_paddings\": [True, False],\n },\n # Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others.\n {\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 4, 4, 4, 1, 1]],\n \"block_shape\": [[2, 2, 2]],\n \"paddings\": [[[0, 0], [0, 0], [0, 0]]],\n \"constant_block_shape\": [True, False],\n \"constant_paddings\": [True, False],\n },\n ]\n\n def build_graph(parameters):\n \"\"\"Build a space_to_batch graph given `parameters`.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n input_tensors = [input_tensor]\n\n # Get block_shape either as a const or as a placeholder (tensor).\n if parameters[\"constant_block_shape\"]:\n block_shape = parameters[\"block_shape\"]\n else:\n shape = [len(parameters[\"block_shape\"])]\n block_shape = tf.placeholder(dtype=tf.int32, name=\"shape\", shape=shape)\n input_tensors.append(block_shape)\n\n # Get paddings either as a const or as a placeholder (tensor).\n if parameters[\"constant_paddings\"]:\n paddings = parameters[\"paddings\"]\n else:\n shape = [len(parameters[\"paddings\"]), 2]\n paddings = tf.placeholder(dtype=tf.int32, name=\"paddings\", shape=shape)\n input_tensors.append(paddings)\n\n out = tf.space_to_batch_nd(input_tensor, block_shape, paddings)\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"dtype\"], parameters[\"input_shape\"])\n ]\n if not parameters[\"constant_block_shape\"]:\n values.append(np.array(parameters[\"block_shape\"]))\n if not parameters[\"constant_paddings\"]:\n values.append(np.array(parameters[\"paddings\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_batch_to_space_nd_tests(zip_path):\n \"\"\"Make a set of tests to do batch_to_space_nd.\"\"\"\n\n test_parameters = [\n {\n \"dtype\": [tf.float32, tf.int64, tf.int32],\n \"input_shape\": [[12, 3, 3, 1]],\n \"block_shape\": [[1, 4], [2, 2], [3, 4]],\n \"crops\": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],\n \"constant_block_shape\": [True, False],\n \"constant_crops\": [True, False],\n },\n # Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others.\n {\n \"dtype\": [tf.float32],\n \"input_shape\": [[8, 2, 2, 2, 1, 1]],\n \"block_shape\": [[2, 2, 2]],\n \"crops\": [[[0, 0], [0, 0], [0, 0]]],\n \"constant_block_shape\": [True, False],\n \"constant_crops\": [True, False],\n },\n ]\n\n def build_graph(parameters):\n \"\"\"Build a batch_to_space graph given `parameters`.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n input_tensors = [input_tensor]\n\n # Get block_shape either as a const or as a placeholder (tensor).\n if parameters[\"constant_block_shape\"]:\n block_shape = parameters[\"block_shape\"]\n else:\n shape = [len(parameters[\"block_shape\"])]\n block_shape = tf.placeholder(dtype=tf.int32, name=\"shape\", shape=shape)\n input_tensors.append(block_shape)\n\n # Get crops either as a const or as a placeholder (tensor).\n if parameters[\"constant_crops\"]:\n crops = parameters[\"crops\"]\n else:\n shape = [len(parameters[\"crops\"]), 2]\n crops = tf.placeholder(dtype=tf.int32, name=\"crops\", shape=shape)\n input_tensors.append(crops)\n\n out = tf.batch_to_space_nd(input_tensor, block_shape, crops)\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"dtype\"], parameters[\"input_shape\"])\n ]\n if not parameters[\"constant_block_shape\"]:\n values.append(np.array(parameters[\"block_shape\"]))\n if not parameters[\"constant_crops\"]:\n values.append(np.array(parameters[\"crops\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_transpose_tests(zip_path):\n \"\"\"Make a set of tests to do transpose.\"\"\"\n\n # TODO(nupurgarg): Add test for uint8.\n test_parameters = [{\n \"dtype\": [tf.int32, tf.int64, tf.float32],\n \"input_shape\": [[2, 2, 3]],\n \"perm\": [[0, 1, 2], [0, 2, 1]],\n \"constant_perm\": [True, False],\n }, {\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 2, 3, 4]],\n \"perm\": [[0, 1, 2, 3], [3, 0, 1, 2]],\n \"constant_perm\": [True, False],\n }, {\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 2, 3, 4, 5]],\n \"perm\": [[4, 3, 2, 1, 0]],\n \"constant_perm\": [True, False],\n }]\n\n def build_graph(parameters):\n \"\"\"Build a transpose graph given `parameters`.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n\n if parameters[\"constant_perm\"]:\n perm = parameters[\"perm\"]\n input_tensors = [input_tensor]\n else:\n shape = [len(parameters[\"perm\"]), 2]\n perm = tf.placeholder(dtype=tf.int32, name=\"perm\", shape=shape)\n input_tensors = [input_tensor, perm]\n\n out = tf.transpose(input_tensor, perm=perm)\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"dtype\"], parameters[\"input_shape\"])\n ]\n if not parameters[\"constant_perm\"]:\n values.append(np.array(parameters[\"perm\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_squeeze_tests(zip_path):\n \"\"\"Make a set of tests to do squeeze.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.int32, tf.float32, tf.int64],\n \"input_shape\": [[1, 2, 1, 3, 1, 4, 1, 1]],\n \"axis\": [\n None, [], [0, 2], [4, 7], [-1, 0, 2, 0, 7, -6], [1], [2, 3, 2],\n [-1, -2, -4, -6, -8], [0, 2, 4, 6, 7], [7, 6, 4, 2, 0], [6, 6],\n [0, 1, 2, 3, 4, 5, 6, 7], [-2, -3, 1, 0, 7, -5]\n ],\n }, {\n \"dtype\": [tf.int32, tf.float32, tf.int64],\n \"input_shape\": [[1]],\n \"axis\": [None, [], [0], [-1]],\n }, {\n \"dtype\": [tf.int32, tf.float32, tf.int64],\n \"input_shape\": [[1, 1, 1, 1, 1]],\n \"axis\": [None, [], [0], [3, 0], [-2, 0, 3, 2]],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.squeeze(input_tensor, axis=parameters[\"axis\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_strided_slice_tests(zip_path):\n \"\"\"Make a set of tests to do strided_slice.\"\"\"\n\n # TODO(soroosh): add test/support for uint8.\n test_parameters = [\n # 4-D\n {\n \"dtype\": [tf.float32, tf.int32, tf.int64],\n \"index_type\": [tf.int32],\n \"input_shape\": [[12, 2, 2, 5]],\n \"begin\": [[0, 0, 0, 0], [1, 0, 1, 0]],\n \"end\": [[8, 2, 2, 3], [12, 2, 2, 5]],\n \"strides\": [None, [2, 1, 3, 1]],\n \"begin_mask\": [None, 1, 8],\n \"end_mask\": [None, 1, 8],\n \"shrink_axis_mask\": [None, 1, 8, 11, 15, -1],\n \"constant_indices\": [False, True],\n },\n # TODO(b/73170889) Restore test parameters removed in cl/191608113.\n # 2-D\n {\n \"dtype\": [tf.float32, tf.int32, tf.int64],\n \"index_type\": [tf.int32],\n \"input_shape\": [[2, 3]],\n \"begin\": [[0, 0], [1, 0]],\n \"end\": [[2, 3], [2, 2]],\n \"strides\": [None, [2, 2]],\n \"begin_mask\": [None, 1, 2],\n \"end_mask\": [None, 1, 2],\n \"shrink_axis_mask\": [None, 1, 2, 3, -1],\n \"constant_indices\": [False, True],\n },\n # 1-D Exhaustive\n {\n \"dtype\": [tf.float32],\n \"index_type\": [tf.int32],\n \"input_shape\": [[4]],\n \"begin\": [[-100], [-3], [-2], [-1], [0], [1], [2], [3], [100]],\n \"end\": [[-100], [-3], [-2], [-1], [0], [1], [2], [3], [100]],\n \"strides\": [-2, -1, 1, 2],\n \"begin_mask\": [0, 1],\n \"end_mask\": [0, 1],\n \"shrink_axis_mask\": [0],\n \"constant_indices\": [False],\n },\n # Negative strides\n {\n \"dtype\": [tf.float32],\n \"index_type\": [tf.int32],\n \"input_shape\": [[2, 3]],\n \"begin\": [[0, -1]],\n \"end\": [[2, -3]],\n \"strides\": [[1, -1]],\n \"begin_mask\": [None, 1, 2],\n \"end_mask\": [None, 1, 2],\n \"shrink_axis_mask\": [None, 1, 2, 3, -1],\n \"constant_indices\": [False],\n },\n ]\n\n def build_graph(parameters):\n \"\"\"Build graph for stride_slice test.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n if parameters[\"constant_indices\"]:\n begin = parameters[\"begin\"]\n end = parameters[\"end\"]\n strides = parameters[\"strides\"]\n tensors = [input_tensor]\n else:\n begin = tf.placeholder(\n dtype=parameters[\"index_type\"],\n name=\"begin\",\n shape=[len(parameters[\"input_shape\"])])\n end = tf.placeholder(\n dtype=parameters[\"index_type\"],\n name=\"end\",\n shape=[len(parameters[\"input_shape\"])])\n strides = (\n tf.placeholder(\n dtype=parameters[\"index_type\"],\n name=\"strides\",\n shape=[len(parameters[\"input_shape\"])])\n if parameters[\"strides\"] is not None else None)\n tensors = [input_tensor, begin, end]\n if strides is not None:\n tensors.append(strides)\n out = tf.strided_slice(\n input_tensor,\n begin,\n end,\n strides,\n begin_mask=parameters[\"begin_mask\"],\n end_mask=parameters[\"end_mask\"])\n return tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n \"\"\"Build inputs for stride_slice test.\"\"\"\n input_values = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n index_type = _TF_TYPE_INFO[parameters[\"index_type\"]][0]\n values = [input_values]\n if not parameters[\"constant_indices\"]:\n begin_values = np.array(parameters[\"begin\"]).astype(index_type)\n end_values = np.array(parameters[\"end\"]).astype(index_type)\n stride_values = (\n np.array(parameters[\"strides\"]).astype(index_type)\n if parameters[\"strides\"] is not None else None)\n values.append(begin_values)\n values.append(end_values)\n if stride_values is not None:\n values.append(stride_values)\n\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_lstm_tests(zip_path):\n \"\"\"Make a set of tests to do basic Lstm cell.\"\"\"\n\n test_parameters = [\n {\n \"dtype\": [tf.float32],\n \"num_batchs\": [1],\n \"time_step_size\": [1],\n \"input_vec_size\": [3],\n \"num_cells\": [4],\n },\n ]\n\n def build_graph(parameters):\n \"\"\"Build a simple graph with BasicLSTMCell.\"\"\"\n\n num_batchs = parameters[\"num_batchs\"]\n time_step_size = parameters[\"time_step_size\"]\n input_vec_size = parameters[\"input_vec_size\"]\n num_cells = parameters[\"num_cells\"]\n inputs_after_split = []\n for i in xrange(time_step_size):\n one_timestamp_input = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"split_{}\".format(i),\n shape=[num_batchs, input_vec_size])\n inputs_after_split.append(one_timestamp_input)\n # Currently lstm identifier has a few limitations: only supports\n # forget_bias == 0, inner state activiation == tanh.\n # TODO(zhixianyan): Add another test with forget_bias == 1.\n # TODO(zhixianyan): Add another test with relu as activation.\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(\n num_cells, forget_bias=0.0, state_is_tuple=True)\n cell_outputs, _ = rnn.static_rnn(\n lstm_cell, inputs_after_split, dtype=tf.float32)\n out = cell_outputs[-1]\n return inputs_after_split, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n \"\"\"Feed inputs, assign variables, and freeze graph.\"\"\"\n\n with tf.variable_scope(\"\", reuse=True):\n kernel = tf.get_variable(\"rnn/basic_lstm_cell/kernel\")\n bias = tf.get_variable(\"rnn/basic_lstm_cell/bias\")\n kernel_values = create_tensor_data(\n parameters[\"dtype\"], [kernel.shape[0], kernel.shape[1]], -1, 1)\n bias_values = create_tensor_data(parameters[\"dtype\"], [bias.shape[0]], 0,\n 1)\n sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values)))\n\n num_batchs = parameters[\"num_batchs\"]\n time_step_size = parameters[\"time_step_size\"]\n input_vec_size = parameters[\"input_vec_size\"]\n input_values = []\n for _ in xrange(time_step_size):\n tensor_data = create_tensor_data(parameters[\"dtype\"],\n [num_batchs, input_vec_size], 0, 1)\n input_values.append(tensor_data)\n out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values)))\n return input_values, out\n\n # TODO(zhixianyan): Automatically generate rnn_states for lstm cell.\n extra_toco_options = ExtraTocoOptions()\n extra_toco_options.rnn_states = (\n \"{state_array:rnn/BasicLSTMCellZeroState/zeros,\"\n \"back_edge_source_array:rnn/basic_lstm_cell/Add_1,size:4},\"\n \"{state_array:rnn/BasicLSTMCellZeroState/zeros_1,\"\n \"back_edge_source_array:rnn/basic_lstm_cell/Mul_2,size:4}\")\n\n make_zip_of_tests(\n zip_path,\n test_parameters,\n build_graph,\n build_inputs,\n extra_toco_options,\n use_frozen_graph=True)\n\n\ndef make_l2_pool(input_tensor, ksize, strides, padding, data_format):\n \"\"\"Given an input perform a sequence of TensorFlow ops to produce l2pool.\"\"\"\n return tf.sqrt(tf.nn.avg_pool(\n tf.square(input_tensor), ksize=ksize, strides=strides,\n padding=padding, data_format=data_format))\n\n\ndef make_topk_tests(zip_path):\n \"\"\"Make a set of tests to do topk.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[10], [5, 20]],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the topk op testing graph.\"\"\"\n input_value = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n k = tf.constant(3, name=\"k\")\n out = tf.nn.top_k(input_value, k)\n return [input_value], [out[1]]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape\"])\n return [input_value], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_arg_max_tests(zip_path):\n \"\"\"Make a set of tests to do arg_max.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[1, 1, 1, 3], [2, 3, 4, 5], [2, 3, 3], [5, 5], [10]],\n \"axis\": [0, 1, 2, 3],\n \"output_type\": [tf.int32, tf.int64],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the topk op testing graph.\"\"\"\n input_value = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n axis = tf.constant(parameters[\"axis\"], name=\"axis\")\n out = tf.arg_max(input_value, axis, output_type=parameters[\"output_type\"])\n return [input_value], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape\"])\n return [input_value], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_greater_tests(zip_path):\n \"\"\"Make a set of tests to do greater.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32, tf.int32, tf.int64],\n \"input_shape_pair\": [([1, 1, 1, 3], [1, 1, 1, 3]),\n ([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),\n ([5, 5], [1]), ([10], [2, 4, 10])],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the greater op testing graph.\"\"\"\n input_value1 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input1\",\n shape=parameters[\"input_shape_pair\"][0])\n input_value2 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input2\",\n shape=parameters[\"input_shape_pair\"][1])\n out = tf.greater(input_value1, input_value2)\n return [input_value1, input_value2], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value1 = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_pair\"][0])\n input_value2 = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_pair\"][1])\n return [input_value1, input_value2], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_greater_equal_tests(zip_path):\n \"\"\"Make a set of tests to do greater_equal.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32, tf.int32, tf.int64],\n \"input_shape_pair\": [([1, 1, 1, 3], [1, 1, 1, 3]),\n ([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),\n ([5, 5], [1]), ([10], [2, 4, 10])],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the greater_equal op testing graph.\"\"\"\n input_value1 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input1\",\n shape=parameters[\"input_shape_pair\"][0])\n input_value2 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input2\",\n shape=parameters[\"input_shape_pair\"][1])\n out = tf.greater_equal(input_value1, input_value2)\n return [input_value1, input_value2], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value1 = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_pair\"][0])\n input_value2 = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_pair\"][1])\n return [input_value1, input_value2], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_less_tests(zip_path):\n \"\"\"Make a set of tests to do less.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32, tf.int32, tf.int64],\n \"input_shape_pair\": [([1, 1, 1, 3], [1, 1, 1, 3]),\n ([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),\n ([5, 5], [1]), ([10], [2, 4, 10])],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the less op testing graph.\"\"\"\n input_value1 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input1\",\n shape=parameters[\"input_shape_pair\"][0])\n input_value2 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input2\",\n shape=parameters[\"input_shape_pair\"][1])\n out = tf.less(input_value1, input_value2)\n return [input_value1, input_value2], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value1 = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_pair\"][0])\n input_value2 = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_pair\"][1])\n return [input_value1, input_value2], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_less_equal_tests(zip_path):\n \"\"\"Make a set of tests to do less_equal.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32, tf.int32, tf.int64],\n \"input_shape_pair\": [([1, 1, 1, 3], [1, 1, 1, 3]),\n ([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),\n ([5, 5], [1]), ([10], [2, 4, 10])],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the less_equal op testing graph.\"\"\"\n input_value1 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input1\",\n shape=parameters[\"input_shape_pair\"][0])\n input_value2 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input2\",\n shape=parameters[\"input_shape_pair\"][1])\n out = tf.less_equal(input_value1, input_value2)\n return [input_value1, input_value2], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value1 = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_pair\"][0])\n input_value2 = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_pair\"][1])\n return [input_value1, input_value2], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_floor_tests(zip_path):\n \"\"\"Make a set of tests to do floor.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32],\n \"input_shape\": [[1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the floor op testing graph.\"\"\"\n input_value = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input1\",\n shape=parameters[\"input_shape\"])\n out = tf.floor(input_value)\n return [input_value], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape\"])\n return [input_value], sess.run(\n outputs, feed_dict={inputs[0]: input_value})\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_neg_tests(zip_path):\n \"\"\"Make a set of tests to do neg.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[1, 3, 4, 3], [5]],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the neg op testing graph.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.negative(input_tensor)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape\"])\n return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_where_tests(zip_path):\n \"\"\"Make a set of tests to do where.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32, tf.int32],\n \"input_shape_set\": [([1, 2, 3, 4], [1, 2, 3, 4]),],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the where op testing graph.\"\"\"\n input_value1 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input2\",\n shape=parameters[\"input_shape_set\"][0])\n input_value2 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input3\",\n shape=parameters[\"input_shape_set\"][1])\n less = tf.less(input_value1, input_value2)\n out = tf.where(less, input_value1, input_value2)\n return [input_value1, input_value2], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value1 = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_set\"][0])\n input_value2 = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_set\"][1])\n return [input_value1, input_value2], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n# Toco binary path provided by the generate rule.\nbin_path = None\n\ndef main(unused_args):\n global bin_path\n def mkdir_if_not_exist(x):\n if not os.path.isdir(x):\n os.mkdir(x)\n if not os.path.isdir(x):\n raise RuntimeError(\"Failed to create dir %r\" % x)\n\n opstest_path = os.path.join(FLAGS.output_path)\n mkdir_if_not_exist(opstest_path)\n\n out = FLAGS.zip_to_output\n bin_path = FLAGS.toco\n test_function = (\"make_%s_tests\" % out.replace(\".zip\", \"\"))\n if test_function not in globals():\n raise RuntimeError(\"Can't find a test function to create %r. Tried %r\" %\n (out, test_function))\n\n # TODO(ahentz): accessing globals() is not very elegant. We should either\n # break this file into multiple tests or use decorator-based registration to\n # avoid using globals().\n globals()[test_function](os.path.join(opstest_path, out))\n\n\nif __name__ == \"__main__\":\n FLAGS, unparsed = parser.parse_known_args()\n\n if unparsed:\n print(\"Usage: %s <path out> <zip file to generate>\")\n else:\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n" ]
[ [ "tensorflow.get_variable", "tensorflow.device", "tensorflow.nn.batch_norm_with_global_normalization", "tensorflow.nn.log_softmax", "tensorflow.concat", "tensorflow.zeros", "tensorflow.control_dependencies", "tensorflow.minimum", "tensorflow.global_variables", "tensorflow.space_to_batch_nd", "numpy.random.random_sample", "tensorflow.pad", "tensorflow.where", "tensorflow.nn.depthwise_conv2d", "tensorflow.nn.conv2d", "tensorflow.strided_slice", "numpy.random.randint", "tensorflow.greater", "tensorflow.batch_to_space_nd", "tensorflow.floor", "tensorflow.squeeze", "tensorflow.gather", "tensorflow.nn.top_k", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.square", "tensorflow.Session", "tensorflow.assert_greater_equal", "numpy.zeros", "tensorflow.app.run", "tensorflow.contrib.lite.testing.generate_examples_report.make_report_table", "tensorflow.nn.fused_batch_norm", "tensorflow.matmul", "tensorflow.image.resize_bilinear", "tensorflow.negative", "tensorflow.nn.l2_normalize", "tensorflow.less", "tensorflow.python.ops.rnn.static_rnn", "tensorflow.less_equal", "tensorflow.placeholder", "tensorflow.exp", "tensorflow.logging.info", "tensorflow.split", "numpy.array", "tensorflow.nn.relu", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.constant", "tensorflow.arg_max", "tensorflow.reduce_mean", "tensorflow.space_to_depth", "tensorflow.maximum", "tensorflow.contrib.rnn.BasicLSTMCell", "tensorflow.reshape", "tensorflow.sigmoid", "tensorflow.ones", "numpy.random.seed", "tensorflow.variable_scope", "tensorflow.greater_equal", "tensorflow.nn.local_response_normalization" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Chenguang-Zhu/relancer
[ "bf1a175b77b7da4cff12fbc5de17dd55246d264d", "bf1a175b77b7da4cff12fbc5de17dd55246d264d" ]
[ "relancer-exp/original_notebooks/mirichoi0218_insurance/medical-insurance-cost-linear-regression.py", "relancer-exp/original_notebooks/shivam2503_diamonds/predicting-diamond-prices-using-linear-svm.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../../../input/mirichoi0218_insurance/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../../../input/mirichoi0218_insurance\"))\n\n# Any results you write to the current directory are saved as output.\n\n\n# In[ ]:\n\n\nimport numpy as np # linear algebra\nimport pandas as pd\ndata_train_file = \"../../../input/mirichoi0218_insurance/insurance.csv\"\ndf =pd.read_csv(data_train_file)\n\n\n# In[ ]:\n\n\ndf.head()\n\n\n# In[ ]:\n\n\nimport matplotlib.pyplot as plt\n\n\n# In[ ]:\n\n\nplt.scatter(\"age\",\"charges\",data=df)\n\n\n# In[ ]:\n\n\nimport seaborn as sns\nsns.countplot(\"children\",data=df)\n\n\n# In[ ]:\n\n\nsns.countplot(\"smoker\",data=df)\n\n\n# In[ ]:\n\n\nsns.countplot(\"region\",data = df)\n\n\n# In[ ]:\n\n\nplt.scatter(\"smoker\",\"charges\",data =df)\n\n\n# In[ ]:\n\n\ndf.head()\n\n\n# In[ ]:\n\n\ndef smoker(yes):\n if yes ==\"yes\":\n return 1\n else:\n return 0\ndf[\"smoker\"]=df[\"smoker\"].apply(smoker)\ndef sex(s):\n if s ==\"male\":\n return 1\n else:\n return 0\ndf[\"sex\"]=df[\"sex\"].apply(sex)\n\n\n# In[ ]:\n\n\ndf.head()\n\n\n# In[ ]:\n\n\nx = df.drop([\"charges\",\"region\"],axis =1)\n\n\n# In[ ]:\n\n\ny =df[\"charges\"]\n\n\n# In[ ]:\n\n\nfrom sklearn.cross_validation import train_test_split\n#split data into training and testing sets\nfrom sklearn.linear_model import LinearRegression\n\n\n# In[ ]:\n\n\nx1,x2,y1,y2 = train_test_split(x,y,test_size = 0.3)\nmodel = LinearRegression()\nmodel.fit(x1,y1)\npre = model.predict(x2)\nprint ('acc : ',model.score(x2,y2))\nprint ('intercept : ',model.intercept_)\nprint ('coefficient : ',model.coef_)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n", "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../../../input/shivam2503_diamonds/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\nimport matplotlib.pyplot as plt\nimport cufflinks as cf\nimport sklearn\nfrom sklearn import svm, preprocessing \n\nimport plotly.graph_objs as go\nimport plotly.plotly as py\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\ninit_notebook_mode(connected=True)\nimport os\nprint(os.listdir(\"../../../input/shivam2503_diamonds\"))\n\n# Any results you write to the current directory are saved as output.\n\n\n# ## **[1] Loading Data **\n\n# In[ ]:\n\n\ndf = pd.read_csv(\"../../../input/shivam2503_diamonds/diamonds.csv\")\ndf.info()\n\n\n# ## **[2] Data Preprocessing **\n\n# ### **[2.1] Removing additional Index :** since we have the in built index from pandas, we dont need the \"Unnamed: 0\" attribute. \n\n# In[ ]:\n\n\ndf.head()\ndf = df.drop('Unnamed: 0', axis = 1)\n\n\n# In[ ]:\n\n\ndf.head()\n\n\n# In[ ]:\n\n\ndf['clarity'].unique()\n\n\n# In[ ]:\n\n\ndf.groupby('cut').count()['carat'].plot.bar()\n\n\n# ### **[2.2] Converting Strings into Numbers:** For training models, we should convert the text based values into appropriate number representation. \n\n# In[ ]:\n\n\ncut_dict = {'Fair' : 1, 'Good' : 2, 'Very Good' : 3, 'Premium' : 4, 'Ideal' : 5}\nclarity_dict ={ 'I1' : 1, 'SI2' : 2, 'SI1' : 3, 'VS2' : 4, 'VS1' : 5, 'VVS2' : 6, 'VVS1' : 7 , 'IF' : 8}\ncolor_dict = {'D':7, 'E':6, 'F':5, 'G':4, 'H':3, 'I':2, 'J':1}\n\n\n# In[ ]:\n\n\ndf['cut'] = df['cut'].map(cut_dict)\ndf['clarity'] = df['clarity'].map(clarity_dict)\ndf['color'] = df['color'].map(color_dict)\n\n\n# In[ ]:\n\n\ndf.head()\n\n\n# In[ ]:\n\n\ndf.isnull().any()\n\n\n# ## **[3] Splitting the Dataset:**\n\n# In[ ]:\n\n\ndf = sklearn.utils.shuffle(df)\nX = df.drop(['price'], axis = 1).values\nX = preprocessing.scale(X)\ny = df['price'].values\ny = preprocessing.scale(y)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\ntest_size = 200\nX_train = X[: -test_size]\ny_train = y[: -test_size]\nX_test = X[-test_size :]\ny_test = y[-test_size :]\n\n\n# ## **[4] SVM Model:**\n\n# In[ ]:\n\n\nclf = svm.SVR(kernel = 'linear')\nclf.fit(X_train, y_train)\n\n\n# In[ ]:\n\n\nclf.score(X_test, y_test)\n\n\n# We are getting the Accuray of 88% using the Linear SVM model. \n\n# In[ ]:\n\n\ny_pred = clf.predict(X_test)\n\n\n# In[ ]:\n\n\n\ntrace0 = go.Scatter(y = y_test,x = np.arange(200),mode = 'lines',name = 'Actual Price',marker = dict(color = 'rgb(10, 150, 50)'))\n\ntrace1 = go.Scatter(y = y_pred,x = np.arange(200),mode = 'lines',name = 'Predicted Price',line = dict(color = 'rgb(110, 50, 140)',dash = 'dot'))\n\n\nlayout = go.Layout(xaxis = dict(title = 'Index'),yaxis = dict(title = 'Normalized Price'))\n\nfigure = go.Figure(data = [trace0, trace1], layout = layout)\niplot(figure)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n" ]
[ [ "sklearn.cross_validation.train_test_split", "pandas.read_csv", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.scatter" ], [ "pandas.read_csv", "sklearn.utils.shuffle", "numpy.arange", "sklearn.svm.SVR", "sklearn.preprocessing.scale" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
JohannesSeidel/pyNastran
[ "91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf", "91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf", "91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf", "91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf", "91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf", "91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf", "91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf", "91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf", "91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf" ]
[ "pyNastran/utils/test/test_dict_to_h5py.py", "pyNastran/bdf/cards/loads/dloads.py", "pyNastran/converters/abaqus/abaqus_cards.py", "pyNastran/op2/tables/oes_stressStrain/oes_nonlinear_rod.py", "pyNastran/converters/openfoam/test_openfoam.py", "pyNastran/dev/bdf_vectorized/cards/elements/spring/celas1.py", "pyNastran/converters/tetgen/tetgen.py", "pyNastran/op2/tables/oes_stressStrain/real/oes_bush.py", "h5Nastran/h5Nastran/post_process/grid_point_force_summation/grid_point_force_summation_data.py" ]
[ "# coding: utf-8\n\"\"\"tests for dict_to_h5py\"\"\"\nimport os\nimport unittest\nimport numpy as np\nfrom cpylog import get_logger\n\ntry:\n import h5py # pylint: disable=unused-import\n IS_H5PY = True\nexcept ImportError: # pragma: no cover\n IS_H5PY = False\n\nif IS_H5PY:\n from pyNastran.utils.dict_to_h5py import load_obj_from_hdf5, export_obj_to_hdf5\nfrom pyNastran.bdf.bdf import BDF\n\n\nclass TestDictToH5(unittest.TestCase):\n\n @unittest.skipIf(not IS_H5PY, \"No h5py\")\n def test_dict_to_h5py(self):\n model = BDF()\n log = get_logger(log=None, level='warning', encoding='utf-8')\n obj = {\n 'bdf' : model,\n 'key' : 'value',\n #1 : 2,\n #3.0 : 4.0,\n 'int_list' : [1, 2, 3],\n 'float_list' : [1.1, 2.2, 3.3],\n 'mydict' : {'one' : 1},\n 'five' : np.zeros(5),\n 'None' : None,\n 'nan' : np.nan,\n 'variable_type_list' : [1, 2.2, b'3.14s', u'4.4u'],\n 'variable_type_tuple' : (1, 2.2, b'3.14s', u'4.4u'),\n 'str_key_unicode_value' : u'helló wörld from two',\n 'helló wörld from two' : b'cat',\n }\n\n custom_types = {\n 'BDF' : BDF,\n }\n export_obj_to_hdf5('test.h5', obj, log=log)\n #export_obj_to_hdf5_file('test.h5', ap, log)\n new_dict = load_obj_from_hdf5('test.h5', custom_types, log=log)\n #print('new_dict[ap]', new_dict['ap'])\n #print('variable_type_list', new_dict['variable_type_list'])\n #print('variable_type_tuple', new_dict['variable_type_tuple'])\n export_obj_to_hdf5('test_new.h5', new_dict, log=log)\n\n\n os.remove('test.h5')\n os.remove('test_new.h5')\n\n #obj = {\n #'key' : 'value',\n ##1 : 2,\n ##3.0 : 4.0,\n #'mydict' : {'one' : 1},\n #'five' : np.zeros(5),\n #'ap' : analytic_predictor,\n #'None' : None,\n #'nan' : np.nan,\n #'variable_type_list' : [1, 2.2, '3.14s'],\n #'variable_type_tuple' : (1, 2.2, '3.14s'),\n #'breaking my code' : u'helló wörld from two',\n #'helló wörld from two' : 'cat',\n #}\n\n #print('new_dict[ap]', new_dict['ap'])\n assert isinstance(new_dict['variable_type_list'], list)\n assert isinstance(new_dict['variable_type_tuple'], tuple)\n assert isinstance(new_dict['five'], np.ndarray)\n assert len(new_dict['five']) == 5\n assert isinstance(new_dict['str_key_unicode_value'], str)\n assert isinstance(new_dict[u'helló wörld from two'], bytes), type(new_dict[u'helló wörld from two'])\n assert new_dict['None'] is None, new_dict['None']\n assert np.isnan(new_dict['nan']), new_dict['nan']\n #str_key_unicode_value\n\nif __name__ == '__main__':\n unittest.main()\n", "# coding: utf-8\n\"\"\"\nAll dynamic loads are defined in this file. This includes:\n\n * ACSRCE\n * DLOAD\n * TLOAD1\n * TLOAD2\n * RLOAD1\n * RLOAD2\n\n\"\"\"\nimport numpy as np\n\nfrom pyNastran.utils.numpy_utils import integer_types\nfrom pyNastran.bdf.field_writer_8 import set_blank_if_default\nfrom pyNastran.bdf.bdf_interface.assign_type import (\n integer, double_or_blank, integer_string_or_blank,\n integer_double_or_blank, double)\nfrom pyNastran.bdf.field_writer_8 import print_card_8\nfrom pyNastran.bdf.field_writer_16 import print_card_16\nfrom pyNastran.bdf.field_writer_double import print_card_double\nfrom pyNastran.bdf.cards.loads.loads import DynamicLoad, LoadCombination, BaseCard\n\n\nclass ACSRCE(BaseCard):\n r\"\"\"\n Defines acoustic source as a function of power vs. frequency.\n\n +--------+-----+----------+---------------+-----------------+-------+-----+---+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\n +========+=====+==========+===============+=================+=======+=====+===+\n | ACSRCE | SID | EXCITEID | DELAYI/DELAYR | DPHASEI/DPHASER | TP/RP | RHO | B |\n +--------+-----+----------+---------------+-----------------+-------+-----+---+\n\n ..math ::\n C = \\sqrt(B ⁄ ρ)\n Source Strength = {A} * 1/(2πf) * \\sqrt( 8πC P(f) / ρ) ^ (ei(θ + 2πfτ))\n\n \"\"\"\n type = 'ACSRCE'\n\n @classmethod\n def _init_from_empty(cls):\n sid = 1\n excite_id = 2\n rho = 3.\n b = 5.\n return ACSRCE(sid, excite_id, rho, b,\n delay=0, dphase=0, power=0, comment='')\n\n def __init__(self, sid, excite_id, rho, b,\n delay=0, dphase=0, power=0, comment=''):\n \"\"\"\n Creates an ACSRCE card\n\n Parameters\n ----------\n sid : int\n load set id number (referenced by DLOAD)\n excite_id : int\n Identification number of a DAREA or SLOAD entry that lists\n each degree of freedom to apply the excitation and the\n corresponding scale factor, A, for the excitation\n rho : float\n Density of the fluid\n b : float\n Bulk modulus of the fluid\n delay : int; default=0\n Time delay, τ.\n dphase : int / float; default=0\n the dphase; if it's 0/blank there is no phase lag\n float : delay in units of time\n int : delay id\n power : int; default=0\n Power as a function of frequency, P(f).\n float : value of P(f) used over all frequencies for all\n degrees of freedom in EXCITEID entry.\n int : TABLEDi entry that defines P(f) for all degrees of\n freedom in EXCITEID entry.\n comment : str; default=''\n a comment for the card\n \"\"\"\n if comment:\n self.comment = comment\n self.sid = sid\n self.excite_id = excite_id\n self.delay = delay\n self.dphase = dphase\n self.power = power\n self.rho = rho\n self.b = b\n self.power_ref = None\n self.sloads_ref = None\n self.delay_ref = None\n self.dphase_ref = None\n #self.dphases_ref = None\n #self.delays_ref = None\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a ACSRCE card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n excite_id = integer(card, 2, 'excite_id') # DAREA, FBALOAD, SLOAD\n delay = integer_double_or_blank(card, 3, 'delay', 0) # DELAY, FBADLAY\n dphase = integer_double_or_blank(card, 4, 'dphase', 0) # DPHASE, FBAPHAS\n power = integer_double_or_blank(card, 5, 'power/tp/rp', 0) # TABLEDi/power\n rho = double(card, 6, 'rho')\n b = double(card, 7, 'bulk modulus')\n\n assert len(card) <= 8, 'len(ACSRCE card) = %i\\n%s' % (len(card), card)\n return ACSRCE(sid, excite_id, rho, b,\n delay=delay, dphase=dphase, power=power, comment=comment)\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n cmsg = ', which is required by ACSRCE=%s' % (self.sid)\n\n # TODO: excite_id = DAREA, FBALOAD, SLOAD\n sloads_ref = {}\n lseqs_ref = {}\n for load_id, loads in model.loads.items():\n for load in loads:\n if load.type == 'SLOAD':\n #if load_id not in sloads_ref:\n #sloads_ref[load_id] = []\n for nid in load.node_ids:\n sloads_ref[(load_id, nid, 0)] = load\n elif load.type == 'LSEQ':\n load_idi = load.lid_ref[0].sid\n #print(load)\n #print(load.lid)\n excite_idi = load.excite_id\n #print('load_idi = %s' % load_idi)\n #print('excite_id = %s' % excite_idi)\n assert load_idi not in lseqs_ref\n lseqs_ref[load_idi] = load\n if sloads_ref:\n self.sloads_ref = sloads_ref\n sload_keys = list(sloads_ref.keys())\n #print('sload_keys =', sload_keys)\n else:\n sload_keys = []\n\n if self.excite_id not in model.dareas and self.excite_id not in lseqs_ref:\n darea_keys = list(model.dareas.keys())\n dphase_keys = list(model.dphases.keys())\n delay_keys = list(model.delays.keys())\n msg = 'excite_id=%s delay=%s dphase=%s\\n' % (\n self.excite_id, self.delay, self.dphase)\n msg += ' darea_keys=%s\\n' % darea_keys\n msg += ' sloads(load_id, nid, comp)=%s\\n' % sload_keys\n msg += ' dphases(sid)=%s\\n' % dphase_keys\n msg += ' delays(delay_id)=%s\\n' % delay_keys\n #raise RuntimeError(msg)\n #print(msg)\n\n if isinstance(self.delay, integer_types) and self.delay > 0:\n delays_ref = {}\n for sload_key in sload_keys:\n nid = sload_key[1]\n delay_key = (self.delay, nid, 0)\n delays_ref[sload_key] = model.DELAY(self.delay, msg=cmsg)\n if delays_ref:\n self.delay_ref = delays_ref\n\n if isinstance(self.dphase, integer_types) and self.dphase > 0:\n dphases_ref = {}\n for sload_key in sload_keys:\n nid = sload_key[1]\n dphase_key = (self.dphase, nid, 0)\n dphases_ref[sload_key] = model.DPHASE(self.dphase, msg=cmsg)\n if dphases_ref:\n self.dphase_ref = dphases_ref\n\n if isinstance(self.power, integer_types) and self.power > 0:\n self.power_ref = model.TableD(self.power, msg=cmsg)\n\n #load_ids2 = []\n #for load_id in self.load_ids:\n #load_id2 = model.DLoad(load_id, consider_dload_combinations=False, msg=msg)\n #load_ids2.append(load_id2)\n #self.load_ids = load_ids2\n #self.load_ids_ref = self.load_ids\n\n def uncross_reference(self) -> None:\n \"\"\"Removes cross-reference links\"\"\"\n self.power = self.Power()\n self.dphase = self.DPhase()\n self.delay = self.Delay()\n #self.sloads = self.\n\n #self.tb = self.Tb()\n #self.tp = self.Tp()\n #self.delay = self.delay_id\n #if self.tb > 0:\n #del self.tb_ref\n #if self.tp > 0:\n #del self.tp_ref\n self.power_ref = None\n self.sloads_ref = None\n self.delay_ref = None\n self.dphase_ref = None\n #self.dphases_ref = None\n #self.delays_ref = None\n\n def safe_cross_reference(self, model, xref_errors):\n return self.cross_reference(model)\n\n #def uncross_reference(self) -> None:\n #self.load_ids = [self.LoadID(load) for load in self.load_ids]\n #del self.load_ids_ref\n\n def Delay(self):\n if self.delay_ref is not None:\n return next(self.delay_ref.values()).sid\n elif self.delay in [0, 0.0]:\n return 0\n else:\n return self.delay\n\n def DPhase(self):\n if self.dphase_ref is not None:\n return next(self.delay_ref.values()).tid\n elif self.dphase in [0, 0.0]:\n return 0\n else:\n return self.dphase\n\n def Power(self):\n if self.power_ref is not None:\n return self.power_ref.tid\n return self.power\n\n def get_load_at_freq(self, freq):\n r\"\"\"\n ..math ::\n C = \\sqrt(B ⁄ ρ)\n Source_strength = {A} * 1/(2πf) * \\sqrt( 8πC P(f) / ρ) ^ (ei(θ + 2πfτ))\n \"\"\"\n C = np.sqrt(self.b / self.rho)\n ei = np.exp(1) * 1.j\n A = 0.0\n pi = np.pi\n if self.delay in [0, 0.]:\n tau = 0.\n else:\n #print('delay\\n', self.delay_ref)\n tau = self.delay_ref.value\n Pf = self.power_ref.interpolate(freq)\n if self.dphase in [0, 0.]:\n theta = 0.\n else:\n #print('dphase\\n', self.dphase_ref)\n theta = self.dphase_ref.interpolate(freq)\n strength = A / (2.* pi * freq) * np.sqrt(8*pi*C*Pf / self.rho) ** (ei*(theta + 2*pi*freq*tau))\n\n return 0.0\n\n def raw_fields(self):\n list_fields = ['ACSRCE', self.sid, self.excite_id, self.Delay(), self.DPhase(),\n self.Power(), self.rho, self.b]\n return list_fields\n\n def repr_fields(self):\n return self.raw_fields()\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.raw_fields()\n if size == 16:\n return self.comment + print_card_16(card)\n return self.comment + print_card_8(card)\n\n\nclass DLOAD(LoadCombination):\n \"\"\"\n +-------+-----+----+------+----+----+----+----+----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+=====+====+======+====+====+====+====+====+\n | DLOAD | SID | S | S1 | L1 | S2 | L2 | S3 | L3 |\n +-------+-----+----+------+----+----+----+----+----+\n | | S4 | L4 | etc. | | | | | |\n +-------+-----+----+------+----+----+----+----+----+\n \"\"\"\n type = 'DLOAD'\n\n @classmethod\n def _init_from_empty(cls):\n sid = 1\n scale = 1.\n scale_factors = [1., 2.]\n load_ids = [1, 2]\n return DLOAD(sid, scale, scale_factors, load_ids, comment='')\n\n def __init__(self, sid, scale, scale_factors, load_ids, comment=''):\n \"\"\"\n Creates a DLOAD card\n\n Parameters\n ----------\n sid : int\n Load set identification number. See Remarks 1. and 4. (Integer > 0)\n scale : float\n Scale factor. See Remarks 2. and 8. (Real)\n Si : List[float]\n Scale factors. See Remarks 2., 7. and 8. (Real)\n load_ids : List[int]\n Load set identification numbers of RLOAD1, RLOAD2, TLOAD1,\n TLOAD2, and ACSRCE entries. See Remarks 3 and 7. (Integer > 0)\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n LoadCombination.__init__(self, sid, scale, scale_factors, load_ids,\n comment=comment)\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n\n \"\"\"\n dload_ids2 = []\n msg = ', which is required by DLOAD=%s' % (self.sid)\n for dload_id in self.load_ids:\n dload_id2 = model.DLoad(dload_id, consider_dload_combinations=False, msg=msg)\n dload_ids2.append(dload_id2)\n self.load_ids_ref = dload_ids2\n\n def safe_cross_reference(self, model, xref_errors, debug=True):\n dload_ids2 = []\n msg = ', which is required by DLOAD=%s' % (self.sid)\n for dload_id in self.load_ids:\n try:\n dload_id2 = model.DLoad(dload_id, consider_dload_combinations=False, msg=msg)\n except KeyError:\n if debug:\n msg = 'Couldnt find dload_id=%i, which is required by %s=%s' % (\n dload_id, self.type, self.sid)\n model.log.warning(msg)\n continue\n dload_ids2.append(dload_id2)\n self.load_ids_ref = dload_ids2\n\n def uncross_reference(self) -> None:\n \"\"\"Removes cross-reference links\"\"\"\n self.load_ids = [self.LoadID(dload) for dload in self.get_load_ids()]\n self.load_ids_ref = None\n\n def raw_fields(self):\n list_fields = ['DLOAD', self.sid, self.scale]\n for (scale_factor, load_id) in zip(self.scale_factors, self.get_load_ids()):\n list_fields += [scale_factor, self.LoadID(load_id)]\n return list_fields\n\n def repr_fields(self):\n return self.raw_fields()\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.raw_fields()\n if size == 16:\n return self.comment + print_card_16(card)\n return self.comment + print_card_8(card)\n\n\nclass RLOAD1(DynamicLoad):\n r\"\"\"\n Defines a frequency-dependent dynamic load of the form\n for use in frequency response problems.\n\n .. math::\n \\left\\{ P(f) \\right\\} = \\left\\{A\\right\\} [ C(f)+iD(f)]\n e^{ i \\left\\{\\theta - 2 \\pi f \\tau \\right\\} }\n\n +--------+-----+----------+-------+--------+----+----+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\n +========+=====+==========+=======+========+====+====+======+\n | RLOAD1 | SID | EXCITEID | DELAY | DPHASE | TC | TD | TYPE |\n +--------+-----+----------+-------+--------+----+----+------+\n | RLOAD1 | 5 | 3 | | | 1 | | |\n +--------+-----+----------+-------+--------+----+----+------+\n\n NX allows DELAY and DPHASE to be floats\n \"\"\"\n type = 'RLOAD1'\n _properties = ['delay_id', 'dphase_id']\n\n @classmethod\n def _init_from_empty(cls):\n sid = 1\n excite_id = 1\n return RLOAD1(sid, excite_id, delay=0, dphase=0, tc=0, td=0, Type='LOAD', comment='')\n\n def __init__(self, sid, excite_id, delay=0, dphase=0, tc=0, td=0, Type='LOAD', comment=''):\n \"\"\"\n Creates an RLOAD1 card, which defienes a frequency-dependent load\n based on TABLEDs.\n\n Parameters\n ----------\n sid : int\n load id\n excite_id : int\n node id where the load is applied\n delay : int/float; default=None\n the delay; if it's 0/blank there is no delay\n float : delay in units of time\n int : delay id\n dphase : int/float; default=None\n the dphase; if it's 0/blank there is no phase lag\n float : delay in units of time\n int : delay id\n tc : int/float; default=0\n TABLEDi id that defines C(f) for all degrees of freedom in\n EXCITEID entry\n td : int/float; default=0\n TABLEDi id that defines D(f) for all degrees of freedom in\n EXCITEID entry\n Type : int/str; default='LOAD'\n the type of load\n 0/LOAD\n 1/DISP\n 2/VELO\n 3/ACCE\n 4, 5, 6, 7, 12, 13 - MSC only\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n DynamicLoad.__init__(self)\n if comment:\n self.comment = comment\n Type = update_loadtype(Type)\n\n self.sid = sid\n self.excite_id = excite_id\n self.delay = delay\n self.dphase = dphase\n self.tc = tc\n self.td = td\n self.Type = Type\n assert sid > 0, self\n self.tc_ref = None\n self.td_ref = None\n self.delay_ref = None\n self.dphase_ref = None\n\n def validate(self):\n msg = ''\n is_failed = False\n if self.tc > 0 or self.td > 0:\n msg += 'either RLOAD1 TC or TD > 0; tc=%s td=%s\\n' % (self.tc, self.td)\n\n if self.Type in [0, 'L', 'LO', 'LOA', 'LOAD']:\n self.Type = 'LOAD'\n elif self.Type in [1, 'D', 'DI', 'DIS', 'DISP']:\n self.Type = 'DISP'\n elif self.Type in [2, 'V', 'VE', 'VEL', 'VELO']:\n self.Type = 'VELO'\n elif self.Type in [3, 'A', 'AC', 'ACC', 'ACCE']:\n self.Type = 'ACCE'\n else:\n msg += 'invalid RLOAD1 type Type=%r\\n' % self.Type\n is_failed = True\n\n if is_failed:\n msg += str(self)\n raise RuntimeError(msg)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a RLOAD1 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n excite_id = integer(card, 2, 'excite_id')\n delay = integer_double_or_blank(card, 3, 'delay', 0)\n dphase = integer_double_or_blank(card, 4, 'dphase', 0)\n tc = integer_double_or_blank(card, 5, 'tc', 0)\n td = integer_double_or_blank(card, 6, 'td', 0)\n Type = integer_string_or_blank(card, 7, 'Type', 'LOAD')\n\n assert len(card) <= 8, 'len(RLOAD1 card) = %i\\ncard=%s' % (len(card), card)\n return RLOAD1(sid, excite_id, delay, dphase, tc, td, Type, comment=comment)\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by RLOAD1 sid=%s' % (self.sid)\n _cross_reference_excite_id(self, model, msg)\n if isinstance(self.tc, integer_types) and self.tc:\n self.tc_ref = model.TableD(self.tc, msg=msg)\n if isinstance(self.td, integer_types) and self.td:\n self.td_ref = model.TableD(self.td, msg=msg)\n if isinstance(self.delay, integer_types) and self.delay > 0:\n self.delay_ref = model.DELAY(self.delay_id, msg=msg)\n if isinstance(self.dphase, integer_types) and self.dphase > 0:\n self.dphase_ref = model.DPHASE(self.dphase, msg=msg)\n\n def safe_cross_reference(self, model, xref_errors, ):\n msg = ', which is required by RLOAD1 sid=%s' % (self.sid)\n _cross_reference_excite_id(self, model, msg)\n if isinstance(self.tc, integer_types) and self.tc:\n self.tc_ref = model.TableD(self.tc, msg=msg)\n if isinstance(self.td, integer_types) and self.td:\n self.td_ref = model.TableD(self.td, msg=msg)\n if isinstance(self.delay, integer_types) and self.delay > 0:\n self.delay_ref = model.DELAY(self.delay_id, msg=msg)\n if isinstance(self.dphase, integer_types) and self.dphase > 0:\n self.dphase_ref = model.DPHASE(self.dphase, msg=msg)\n\n def uncross_reference(self) -> None:\n \"\"\"Removes cross-reference links\"\"\"\n self.tc = self.Tc()\n self.td = self.Td()\n self.delay = self.delay_id\n self.dphase = self.dphase_id\n self.tc_ref = None\n self.td_ref = None\n self.delay_ref = None\n self.dphase_ref = None\n\n def get_loads(self):\n return [self]\n\n def Tc(self):\n if self.tc_ref is not None:\n return self.tc_ref.tid\n elif self.tc in [0, 0.0]:\n return 0\n return self.tc\n\n def Td(self):\n if self.td_ref is not None:\n return self.td_ref.tid\n elif self.td in [0, 0.0]:\n return 0\n return self.td\n\n @property\n def delay_id(self):\n if self.delay_ref is not None:\n return self.delay_ref.sid\n elif self.delay in [0, 0.]:\n return 0\n return self.delay\n\n @property\n def dphase_id(self):\n if self.dphase_ref is not None:\n return self.dphase_ref.sid\n elif self.dphase in [0, 0.0]:\n return 0\n return self.dphase\n\n def get_load_at_freq(self, freq, scale=1.):\n # A = 1. # points to DAREA or SPCD\n if isinstance(freq, float):\n freq = np.array([freq])\n else:\n freq = np.asarray(freq)\n\n if isinstance(self.tc, float):\n c = float(self.tc)\n elif self.tc == 0:\n c = 0.\n else:\n c = self.tc_ref.interpolate(freq)\n\n if isinstance(self.td, float):\n d = float(self.td)\n elif self.td == 0:\n d = 0.\n else:\n d = self.td_ref.interpolate(freq)\n\n if isinstance(self.dphase, float):\n dphase = self.dphase\n elif self.dphase == 0:\n dphase = 0.0\n else:\n nids, comps, dphases = self.dphase_ref.get_dphase_at_freq(freq)\n assert len(dphases) == 1, 'dphases=%s\\n%s' % (dphases, self.dphase_ref)\n dphase = dphases[0]\n\n if isinstance(self.delay, float):\n tau = self.delay\n elif self.delay == 0:\n tau = 0.0\n else:\n nids, comps, taus = self.delay_ref.get_delay_at_freq(freq)\n assert len(taus) == 1, 'taus=%s\\n%s' % (taus, self.delay_ref)\n tau = taus[0]\n\n out = (c + 1.j * d) * np.exp(dphase - 2 * np.pi * freq * tau)\n return out\n\n def raw_fields(self):\n list_fields = ['RLOAD1', self.sid, self.excite_id, self.delay_id, self.dphase_id,\n self.Tc(), self.Td(), self.Type]\n return list_fields\n\n def repr_fields(self):\n Type = set_blank_if_default(self.Type, 'LOAD')\n list_fields = ['RLOAD1', self.sid, self.excite_id, self.delay_id, self.dphase_id,\n self.Tc(), self.Td(), Type]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)\n\n\ndef _cross_reference_excite_id_backup(self, model, msg): # pragma: no cover\n \"\"\"not quite done...not sure how to handle the very odd xref\n\n EXCITEID may refer to one or more static load entries (FORCE, PLOADi, GRAV, etc.).\n \"\"\"\n excite_id_ref = []\n case_control = model.case_control_deck\n if case_control is not None:\n #print('cc = %r' % case_control)\n for key, subcase in sorted(model.case_control_deck.subcases.items()):\n #print(subcase, type(subcase))\n #if 'LOADSET' in subcase:\n #lseq_id = subcase['LOADSET'][0]\n #lseq = model.Load(lseq_id, consider_load_combinations=False, msg=msg)[0]\n #self.excite_id_ref = lseq\n ##self.dload_id = lseq.\n #if 'DLOAD' in subcase:\n if self.excite_id in model.loads:\n # FORCE, FORCE1, FORCE2, PLOAD4, GRAV\n # changes the magnitudes of the load, not the direction\n model.log.debug('excite_id load = %s' % self.excite_id)\n #print(' dloads =', list(model.dloads.keys()))\n #print(' dareas =', list(model.dareas.keys()))\n excite_id_ref += model.loads[self.excite_id]\n if self.excite_id in model.dareas:\n model.log.debug('excite_id darea = %s' % self.excite_id)\n darea_ref = model.DAREA(self.excite_id, msg=msg)\n excite_id_ref.append(darea_ref)\n if self.excite_id in model.dload_entries:\n # this is probably wrong...\n # it was added to pass TestLoads.test_loads_nonlinear_thermal1, but\n # I think QVECT should be in self.loads, not self.dload_entries...\n model.log.debug('excite_id dload_entries = %s' % self.excite_id)\n excite_id_ref += model.dload_entries\n # what about TEMPBC?\n #else:\n #msg = ('LOADSET and DLOAD are not found in the case control deck\\n%s' %\n #str(model.case_control_deck))\n #raise RuntimeError(msg)\n #else:\n #model.log.warning('could not find excite_id=%i for\\n%s' % (self.excite_id, str(self)))\n #self.excite_id_ref = model.DAREA(self.excite_id, msg=msg)\n if len(excite_id_ref) == 0:\n print('excite_id = %s' % self.excite_id)\n print(' loads =', list(model.loads.keys()))\n print(' dareas =', list(model.dareas.keys()))\n print(' dloads =', list(model.dloads.keys()))\n print(' dload_entries =', list(model.dload_entries.keys()))\n model.log.warning('could not find excite_id=%i for\\n%s' % (self.excite_id, str(self)))\n raise RuntimeError('could not find excite_id=%i for\\n%s' % (self.excite_id, str(self)))\n\ndef get_lseqs_by_excite_id(model, excite_id):\n from collections import defaultdict\n\n # get the lseqs that correspond to the correct EXCITE_ID id\n lseq_sids = defaultdict(list)\n for sid, loads in model.load_combinations.items():\n for load in loads:\n if load.type == 'LSEQ':\n if excite_id == load.excite_id:\n #print(load)\n lseq_sids[sid].append(load)\n #for sid, loads in lseqs.items():\n #print(sid, loads)\n return lseq_sids\n\ndef _cross_reference_excite_id(self, model, msg):\n \"\"\"not quite done...not sure how to handle the very odd xref\n\n EXCITEID may refer to one or more static load entries (FORCE, PLOADi, GRAV, etc.).\n \"\"\"\n #print('*' * 80)\n lseq_sids = get_lseqs_by_excite_id(model, self.excite_id)\n\n # find all the LOADSETs in the model\n # LOADSETs reference LSEQs by sid\n valid_lseqs = []\n if lseq_sids:\n # get the sid for the LSEQ\n case_control = model.case_control_deck\n if case_control is not None:\n #print('cc = %r' % case_control)\n for key, subcase in sorted(model.case_control_deck.subcases.items()):\n if 'LOADSET' in subcase:\n lseq_sid = subcase['LOADSET'][0]\n if lseq_sid in lseq_sids:\n model.log.debug('adding LOADSET = %i' % lseq_sid)\n valid_lseqs.append(lseq_sid)\n if valid_lseqs:\n valid_lseqs = list(set(valid_lseqs))\n valid_lseqs.sort()\n #assert len(valid_lseqs) == 1, 'valid_lseqs=%s' % valid_lseqs\n #print('valid_lseqs =', valid_lseqs)\n # can Case Control LOADSET be substituded for Case Control DLOAD id?\n\n excite_id_ref = []\n if self.excite_id in model.loads:\n # FORCE, FORCE1, FORCE2, PLOAD4, GRAV\n # changes the magnitudes of the load, not the direction\n model.log.debug('excite_id load = %s' % self.excite_id)\n #print(' dloads =', list(model.dloads.keys()))\n #print(' dareas =', list(model.dareas.keys()))\n excite_id_ref += model.loads[self.excite_id]\n\n if self.excite_id in model.dareas:\n model.log.debug('excite_id darea = %s' % self.excite_id)\n darea_ref = model.DAREA(self.excite_id, msg=msg)\n excite_id_ref.append(darea_ref)\n\n if self.excite_id in model.bcs:\n # CONV, TEMPBC\n model.log.debug('excite_id bcs = %s' % self.excite_id)\n excite_id_ref = model.bcs[self.excite_id]\n\n if self.excite_id in model.dload_entries: # this is probably wrong...\n # this is probably wrong...\n # it was added to pass TestLoads.test_loads_nonlinear_thermal1, but\n # I think QVECT should be in self.loads, not self.dload_entries...\n model.log.debug('excite_id dload_entries = %s' % self.excite_id)\n excite_id_ref += model.dload_entries\n\n if self.excite_id in model.load_combinations: # this should be right...\n # C:\\NASA\\m4\\formats\\git\\examples\\move_tpl\\nlstrs2.op2\n model.log.debug('excite_id load_combinations = %s' % self.excite_id)\n excite_id_ref = model.load_combinations[self.excite_id]\n\n # handles LSEQ\n if valid_lseqs:\n for lseq_sid in valid_lseqs:\n excite_id_ref += lseq_sids[lseq_sid]\n\n # what about SPCD?\n\n if len(excite_id_ref) == 0:\n print(model.get_bdf_stats())\n print('excite_id = %s' % self.excite_id)\n print(' loads =', list(model.loads.keys()))\n print(' dareas =', list(model.dareas.keys()))\n print(' bcs =', list(model.bcs.keys()))\n print(' dloads =', list(model.dloads.keys()))\n print(' dload_entries =', list(model.dload_entries.keys()))\n print(' load_combinations =', list(model.load_combinations.keys())) # what about LSEQ\n if lseq_sids:\n sids = list(lseq_sids.keys())\n print(' lseq_excite_ids=%s; lseq_sids=%s; valid_lseqs=%s' % (\n self.excite_id, sids, valid_lseqs))\n else:\n print(' lseq_sids = []')\n model.log.warning('could not find excite_id=%i for\\n%s' % (self.excite_id, str(self)))\n raise RuntimeError('could not find excite_id=%i for\\n%s' % (self.excite_id, str(self)))\n\n\nclass RLOAD2(DynamicLoad):\n r\"\"\"\n Defines a frequency-dependent dynamic load of the form\n for use in frequency response problems.\n\n .. math:: \\left\\{ P(f) \\right\\} = \\left\\{A\\right\\} * B(f)\n e^{ i \\left\\{ \\phi(f) + \\theta - 2 \\pi f \\tau \\right\\} }\n\n +--------+-----+----------+-------+--------+----+----+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\n +========+=====+==========+=======+========+====+====+======+\n | RLOAD2 | SID | EXCITEID | DELAY | DPHASE | TB | TP | TYPE |\n +--------+-----+----------+-------+--------+----+----+------+\n | RLOAD2 | 5 | 3 | | | 1 | | |\n +--------+-----+----------+-------+--------+----+----+------+\n\n NX allows DELAY and DPHASE to be floats\n \"\"\"\n type = 'RLOAD2'\n _properties = ['delay_id', 'dphase_id']\n\n @classmethod\n def _init_from_empty(cls):\n sid = 1\n excite_id = 1\n return RLOAD2(sid, excite_id, delay=0, dphase=0, tb=0, tp=0, Type='LOAD', comment='')\n\n # P(f) = {A} * B(f) * e^(i*phi(f), + theta - 2*pi*f*tau)\n def __init__(self, sid, excite_id, delay=0, dphase=0, tb=0, tp=0, Type='LOAD', comment=''):\n \"\"\"\n Creates a nRLOAD2 card, which defienes a frequency-dependent load\n based on TABLEDs.\n\n Parameters\n ----------\n sid : int\n load id\n excite_id : int\n node id where the load is applied\n delay : int/float; default=None\n the delay; if it's 0/blank there is no delay\n float : delay in units of time\n int : delay id\n dphase : int/float; default=None\n the dphase; if it's 0/blank there is no phase lag\n float : delay in units of time\n int : delay id\n tb : int/float; default=0\n TABLEDi id that defines B(f) for all degrees of freedom in\n EXCITEID entry\n tc : int/float; default=0\n TABLEDi id that defines C(f) for all degrees of freedom in\n EXCITEID entry\n td : int/float; default=0\n TABLEDi id that defines D(f) for all degrees of freedom in\n EXCITEID entry\n tp : int/float; default=0\n TABLEDi id that defines phi(f) for all degrees of freedom in\n EXCITEID entry\n Type : int/str; default='LOAD'\n the type of load\n 0/LOAD\n 1/DISP\n 2/VELO\n 3/ACCE\n 4, 5, 6, 7, 12, 13 - MSC only\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n DynamicLoad.__init__(self)\n if comment:\n self.comment = comment\n Type = update_loadtype(Type)\n\n self.sid = sid\n self.excite_id = excite_id\n self.delay = delay\n self.dphase = dphase\n self.tb = tb\n self.tp = tp\n self.Type = Type\n self.tb_ref = None\n self.tp_ref = None\n self.delay_ref = None\n self.dphase_ref = None\n\n #@property\n #def Type(self):\n #\"\"\"gets the load_type\"\"\"\n #return self.load_type\n #@Type.setter\n #def Type(self, load_type):\n #\"\"\"sets the load_type\"\"\"\n #self.load_type = load_type\n\n def validate(self):\n msg = ''\n is_failed = False\n if self.tb > 0 or self.tp > 0:\n msg += 'either RLOAD2 TB or TP > 0; tb=%s tp=%s\\n' % (self.tb, self.tp)\n\n if self.Type in [0, 'L', 'LO', 'LOA', 'LOAD']:\n self.Type = 'LOAD'\n elif self.Type in [1, 'D', 'DI', 'DIS', 'DISP']:\n self.Type = 'DISP'\n elif self.Type in [2, 'V', 'VE', 'VEL', 'VELO']:\n self.Type = 'VELO'\n elif self.Type in [3, 'A', 'AC', 'ACC', 'ACCE']:\n self.Type = 'ACCE'\n else:\n msg += 'invalid RLOAD2 type Type=%r\\n' % self.Type\n is_failed = True\n\n if is_failed:\n msg += str(self)\n raise RuntimeError(msg)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a RLOAD2 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n excite_id = integer(card, 2, 'excite_id')\n delay = integer_double_or_blank(card, 3, 'delay', 0)\n dphase = integer_double_or_blank(card, 4, 'dphase', 0)\n tb = integer_double_or_blank(card, 5, 'tb', 0)\n tp = integer_double_or_blank(card, 6, 'tp', 0)\n Type = integer_string_or_blank(card, 7, 'Type', 'LOAD')\n\n assert len(card) <= 8, 'len(RLOAD2 card) = %i\\ncard=%s' % (len(card), card)\n return RLOAD2(sid, excite_id, delay, dphase, tb, tp, Type, comment=comment)\n\n def get_load_at_freq(self, freq, scale=1.):\n # A = 1. # points to DAREA or SPCD\n if isinstance(self.tb, float):\n b = self.tb\n elif self.tb == 0:\n b = 0.0\n else:\n b = self.tb_ref.interpolate(freq)\n\n if isinstance(self.tp, float):\n p = self.tp\n elif self.tp == 0:\n p = 0.0\n else:\n p = self.tp_ref.interpolate(freq)\n\n if isinstance(self.dphase, float):\n dphase = self.dphase\n elif self.dphase == 0 or self.dphase is None:\n dphase = 0.0\n else:\n nids, comps, dphases = self.dphase_ref.get_dphase_at_freq(freq)\n assert len(dphases) == 1, dphases\n dphase = dphases[0]\n\n if isinstance(self.delay, float):\n tau = self.delay\n elif self.delay == 0:\n tau = 0.0\n else:\n nids, comps, taus = self.delay_ref.get_delay_at_freq(freq)\n assert len(taus) == 1, taus\n tau = taus[0]\n\n try:\n out = b * np.exp(1.j * p + dphase - 2 * np.pi * freq * tau)\n except TypeError:\n print('b =', b)\n print('p =', p)\n print('dphase =', dphase)\n print('freq =', freq)\n print('tau =', tau)\n raise\n return out\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by RLOAD2=%s' % (self.sid)\n _cross_reference_excite_id(self, model, msg)\n if isinstance(self.tb, integer_types) and self.tb:\n self.tb_ref = model.TableD(self.tb, msg=msg)\n if isinstance(self.tp, integer_types) and self.tp:\n self.tp_ref = model.TableD(self.tp, msg=msg)\n if isinstance(self.delay, integer_types) and self.delay > 0:\n self.delay_ref = model.DELAY(self.delay, msg=msg)\n if isinstance(self.dphase, integer_types) and self.dphase > 0:\n self.dphase_ref = model.DPHASE(self.dphase, msg=msg)\n\n def safe_cross_reference(self, model, xref_errors, ):\n msg = ', which is required by RLOAD2=%s' % (self.sid)\n _cross_reference_excite_id(self, model, msg)\n if isinstance(self.tb, integer_types) and self.tb:\n self.tb_ref = model.TableD(self.tb, msg=msg)\n if isinstance(self.tp, integer_types) and self.tp:\n self.tp_ref = model.TableD(self.tp, msg=msg)\n if isinstance(self.delay, integer_types) and self.delay > 0:\n self.delay_ref = model.DELAY(self.delay, msg=msg)\n if isinstance(self.dphase, integer_types) and self.dphase > 0:\n self.dphase_ref = model.DPHASE(self.dphase, msg=msg)\n\n def uncross_reference(self) -> None:\n \"\"\"Removes cross-reference links\"\"\"\n self.tb = self.Tb()\n self.tp = self.Tp()\n self.delay = self.delay_id\n self.dphase = self.dphase_id\n self.tb_ref = None\n self.tp_ref = None\n self.delay_ref = None\n self.dphase_ref = None\n\n def get_loads(self):\n return [self]\n\n def LoadID(self):\n return self.sid\n\n def Tb(self):\n if self.tb_ref is not None:\n return self.tb_ref.tid\n elif self.tb == 0:\n return 0\n return self.tb\n\n def Tp(self):\n if self.tp_ref is not None:\n return self.tp_ref.tid\n elif self.tp == 0:\n return 0\n return self.tp\n\n @property\n def delay_id(self):\n if self.delay_ref is not None:\n return self.delay_ref.sid\n elif self.delay == 0:\n return 0\n return self.delay\n\n @property\n def dphase_id(self):\n if self.dphase_ref is not None:\n return self.dphase_ref.sid\n elif self.dphase == 0:\n return 0\n return self.dphase\n\n def raw_fields(self):\n list_fields = ['RLOAD2', self.sid, self.excite_id, self.delay_id, self.dphase_id,\n self.Tb(), self.Tp(), self.Type]\n return list_fields\n\n def repr_fields(self):\n Type = set_blank_if_default(self.Type, 0.0)\n list_fields = ['RLOAD2', self.sid, self.excite_id, self.delay_id, self.dphase_id,\n self.Tb(), self.Tp(), Type]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)\n\n\nclass TLOAD1(DynamicLoad):\n r\"\"\"\n Transient Response Dynamic Excitation, Form 1\n\n Defines a time-dependent dynamic load or enforced motion of the form:\n\n .. math::\n \\left\\{ P(t) \\right\\} = \\left\\{ A \\right\\} \\cdot F(t-\\tau)\n\n for use in transient response analysis.\n\n +--------+-----+----------+-------+------+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\n +========+=====+==========+=======+======+=====+=====+=====+\n | TLOAD1 | SID | EXCITEID | DELAY | TYPE | TID | US0 | VS0 |\n +--------+-----+----------+-------+------+-----+-----+-----+\n\n MSC 2016.1\n\n +--------+-----+----------+-------+------+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 |\n +========+=====+==========+=======+======+=====+\n | TLOAD1 | SID | EXCITEID | DELAY | TYPE | TID |\n +--------+-----+----------+-------+------+-----+\n\n NX 11\n \"\"\"\n type = 'TLOAD1'\n _properties = ['delay_id']\n\n @classmethod\n def _init_from_empty(cls):\n sid = 1\n excite_id = 1\n tid = 1\n return TLOAD1(sid, excite_id, tid, delay=0, Type='LOAD', us0=0.0, vs0=0.0, comment='')\n\n def __init__(self, sid, excite_id, tid, delay=0, Type='LOAD',\n us0=0.0, vs0=0.0, comment=''):\n \"\"\"\n Creates a TLOAD1 card, which defienes a time-dependent load\n based on a DTABLE.\n\n Parameters\n ----------\n sid : int\n load id\n excite_id : int\n node id where the load is applied\n tid : int\n TABLEDi id that defines F(t) for all degrees of freedom in\n EXCITEID entry\n float : MSC not supported\n delay : int/float; default=None\n the delay; if it's 0/blank there is no delay\n float : delay in units of time\n int : delay id\n Type : int/str; default='LOAD'\n the type of load\n 0/LOAD\n 1/DISP\n 2/VELO\n 3/ACCE\n 4, 5, 6, 7, 12, 13 - MSC only\n us0 : float; default=0.\n Factor for initial displacements of the enforced degrees-of-freedom\n MSC only\n vs0 : float; default=0.\n Factor for initial velocities of the enforced degrees-of-freedom\n MSC only\n comment : str; default=''\n a comment for the card\n \"\"\"\n DynamicLoad.__init__(self)\n if delay is None:\n delay = 0\n Type = update_loadtype(Type)\n\n if comment:\n self.comment = comment\n\n #: load ID\n self.sid = sid\n\n #: Identification number of DAREA or SPCD entry set or a thermal load\n #: set (in heat transfer analysis) that defines {A}. (Integer > 0)\n self.excite_id = excite_id\n\n #: If it is a non-zero integer, it represents the\n #: identification number of DELAY Bulk Data entry that defines .\n #: If it is real, then it directly defines the value of that will\n #: be used for all degrees-of-freedom that are excited by this\n #: dynamic load entry. See also Remark 9. (Integer >= 0,\n #: real or blank)\n self.delay = delay\n\n #: Defines the type of the dynamic excitation. (LOAD,DISP, VELO, ACCE)\n self.Type = Type\n\n #: Identification number of TABLEDi entry that gives F(t). (Integer > 0)\n self.tid = tid\n\n #: Factor for initial displacements of the enforced degrees-of-freedom.\n #: (Real; Default = 0.0)\n self.us0 = us0\n #: Factor for initial velocities of the enforced degrees-of-freedom.\n #: (Real; Default = 0.0)\n self.vs0 = vs0\n\n self.tid_ref = None\n self.delay_ref = None\n\n def validate(self):\n if self.Type in [0, 'L', 'LO', 'LOA', 'LOAD']:\n self.Type = 'LOAD'\n elif self.Type in [1, 'D', 'DI', 'DIS', 'DISP']:\n self.Type = 'DISP'\n elif self.Type in [2, 'V', 'VE', 'VEL', 'VELO']:\n self.Type = 'VELO'\n elif self.Type in [3, 'A', 'AC', 'ACC', 'ACCE']:\n self.Type = 'ACCE'\n elif self.Type in [4, 5, 6, 7, 12, 13]: # MSC-only\n pass\n else:\n msg = 'invalid TLOAD1 type Type=%r' % self.Type\n raise RuntimeError(msg)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a TLOAD1 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n excite_id = integer(card, 2, 'excite_id')\n delay = integer_double_or_blank(card, 3, 'delay', 0)\n Type = integer_string_or_blank(card, 4, 'Type', 'LOAD')\n tid = integer(card, 5, 'tid')\n us0 = double_or_blank(card, 6, 'us0', 0.0)\n vs0 = double_or_blank(card, 7, 'vs0', 0.0)\n\n assert len(card) <= 8, 'len(TLOAD1 card) = %i\\ncard=%s' % (len(card), card)\n return TLOAD1(sid, excite_id, tid, delay=delay, Type=Type, us0=us0, vs0=vs0, comment=comment)\n\n def get_loads(self):\n return [self]\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by TLOAD1=%s' % (self.sid)\n _cross_reference_excite_id(self, model, msg)\n if self.tid:\n self.tid_ref = model.TableD(self.tid, msg=msg)\n if isinstance(self.delay, integer_types) and self.delay > 0:\n self.delay_ref = model.DELAY(self.delay, msg=msg)\n\n def safe_cross_reference(self, model, debug=True):\n msg = ', which is required by TLOAD1=%s' % (self.sid)\n _cross_reference_excite_id(self, model, msg)\n if self.tid:\n #try:\n self.tid_ref = model.TableD(self.tid, msg=msg)\n #except\n if isinstance(self.delay, integer_types) and self.delay > 0:\n self.delay_ref = model.DELAY(self.delay_id, msg=msg)\n\n def uncross_reference(self) -> None:\n \"\"\"Removes cross-reference links\"\"\"\n self.tid = self.Tid()\n self.delay = self.delay_id\n self.tid_ref = None\n self.delay_ref = None\n\n def Tid(self):\n if self.tid_ref is not None:\n return self.tid_ref.tid\n elif self.tid == 0:\n return 0\n else:\n return self.tid\n\n @property\n def delay_id(self):\n if self.delay_ref is not None:\n return self.delay_ref.sid\n elif self.delay == 0:\n return 0\n return self.delay\n\n def get_load_at_time(self, time, scale=1.):\n # A = 1. # points to DAREA or SPCD\n if isinstance(time, float):\n time = np.array([time])\n else:\n time = np.asarray(time)\n\n if isinstance(self.delay, float):\n tau = self.delay\n elif self.delay == 0 or self.delay is None:\n tau = 0.0\n else:\n tau = self.delay_ref.get_delay_at_time(time)\n\n i = np.where(time - tau > 0)\n time2 = time[i]\n resp = self.tid_ref.interpolate(time2)\n is_spcd = False\n if self.Type == 'VELO' and is_spcd:\n resp[0] = self.us0\n if self.Type == 'ACCE' and is_spcd:\n resp[0] = self.vs0\n return resp * scale\n\n def raw_fields(self):\n list_fields = ['TLOAD1', self.sid, self.excite_id, self.delay_id, self.Type,\n self.Tid(), self.us0, self.vs0]\n return list_fields\n\n def repr_fields(self):\n us0 = set_blank_if_default(self.us0, 0.0)\n vs0 = set_blank_if_default(self.vs0, 0.0)\n list_fields = ['TLOAD1', self.sid, self.excite_id, self.delay_id, self.Type,\n self.Tid(), us0, vs0]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)\n\n\nclass TLOAD2(DynamicLoad):\n r\"\"\"\n Transient Response Dynamic Excitation, Form 1\n\n Defines a time-dependent dynamic load or enforced motion of the form:\n\n .. math::\n \\left\\{ P(t) \\right\\} = \\left\\{ A \\right\\} e^(C*t) cos(2 \\pi f t + \\phi)\n\n P(t) = 0 (t<T1+tau or t > T2+tau)\n P(t) = {A} * t^b * e^(C*t) * cos(2*pi*f*t + phase) (T1+tau <= t <= T2+tau)\n\n for use in transient response analysis.\n\n +--------+-----+----------+-------+------+-----+-----+--------+---------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +========+=====+==========+=======+======+=====+=====+========+=========+\n | TLOAD2 | SID | EXCITEID | DELAY | TYPE | T1 | T2 | FREQ | PHASE |\n +--------+-----+----------+-------+------+-----+-----+--------+---------+\n | | C | B | US0 | VS0 | | | | |\n +--------+-----+----------+-------+------+-----+-----+--------+---------+\n\n MSC 2016.1\n\n +--------+-----+----------+-------+------+-----+-----+--------+---------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +========+=====+==========+=======+======+=====+=====+========+=========+\n | TLOAD2 | SID | EXCITEID | DELAY | TYPE | T1 | T2 | FREQ | PHASE |\n +--------+-----+----------+-------+------+-----+-----+--------+---------+\n | | C | B | | | | | | |\n +--------+-----+----------+-------+------+-----+-----+--------+---------+\n\n NX 11\n \"\"\"\n type = 'TLOAD2'\n _properties = ['delay_id']\n\n @classmethod\n def _init_from_empty(cls):\n sid = 1\n excite_id = 1\n return TLOAD2(sid, excite_id, delay=0, Type='LOAD', T1=0., T2=None,\n frequency=0., phase=0., c=0., b=0., us0=0., vs0=0., comment='')\n\n def __init__(self, sid, excite_id, delay=0, Type='LOAD', T1=0., T2=None,\n frequency=0., phase=0., c=0., b=0., us0=0., vs0=0., comment=''):\n \"\"\"\n Creates a TLOAD2 card, which defines a exponential time dependent\n load based on constants.\n\n Parameters\n ----------\n sid : int\n load id\n excite_id : int\n node id where the load is applied\n delay : int/float; default=None\n the delay; if it's 0/blank there is no delay\n float : delay in units of time\n int : delay id\n Type : int/str; default='LOAD'\n the type of load\n 0/LOAD\n 1/DISP\n 2/VELO\n 3/ACCE\n 4, 5, 6, 7, 12, 13 - MSC only\n T1 : float; default=0.\n time constant (t1 > 0.0)\n times below this are ignored\n T2 : float; default=None\n time constant (t2 > t1)\n times above this are ignored\n frequency : float; default=0.\n Frequency in cycles per unit time.\n phase : float; default=0.\n Phase angle in degrees.\n c : float; default=0.\n Exponential coefficient.\n b : float; default=0.\n Growth coefficient.\n us0 : float; default=0.\n Factor for initial displacements of the enforced degrees-of-freedom\n MSC only\n vs0 : float; default=0.\n Factor for initial velocities of the enforced degrees-of-freedom\n MSC only\n comment : str; default=''\n a comment for the card\n \"\"\"\n DynamicLoad.__init__(self)\n if comment:\n self.comment = comment\n if T2 is None:\n T2 = T1\n\n Type = update_loadtype(Type)\n\n #: load ID\n #: SID must be unique for all TLOAD1, TLOAD2, RLOAD1, RLOAD2, and ACSRCE entries.\n self.sid = sid\n self.excite_id = excite_id\n self.delay = delay\n\n #: Defines the type of the dynamic excitation. (Integer; character\n #: or blank; Default = 0)\n self.Type = Type\n\n #: Time constant. (Real >= 0.0)\n self.T1 = T1\n #: Time constant. (Real; T2 > T1)\n self.T2 = T2\n\n #: Frequency in cycles per unit time. (Real >= 0.0; Default = 0.0)\n self.frequency = frequency\n\n #: Phase angle in degrees. (Real; Default = 0.0)\n self.phase = phase\n\n #: Exponential coefficient. (Real; Default = 0.0)\n self.c = c\n\n #: Growth coefficient. (Real; Default = 0.0)\n self.b = b\n\n #: Factor for initial displacements of the enforced degrees-of-freedom.\n #: (Real; Default = 0.0)\n self.us0 = us0\n\n #: Factor for initial velocities of the enforced degrees-of-freedom\n #: (Real; Default = 0.0)\n self.vs0 = vs0\n\n self.delay_ref = None\n\n def validate(self):\n if self.Type in [0, 'L', 'LO', 'LOA', 'LOAD']:\n self.Type = 'LOAD'\n elif self.Type in [1, 'D', 'DI', 'DIS', 'DISP']:\n self.Type = 'DISP'\n elif self.Type in [2, 'V', 'VE', 'VEL', 'VELO']:\n self.Type = 'VELO'\n elif self.Type in [3, 'A', 'AC', 'ACC', 'ACCE']:\n self.Type = 'ACCE'\n elif self.Type in [5, 6, 7, 12, 13]: # MSC only\n pass\n else:\n msg = 'invalid TLOAD2 type Type=%r' % self.Type\n raise RuntimeError(msg)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a TLOAD2 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n excite_id = integer(card, 2, 'excite_id')\n delay = integer_double_or_blank(card, 3, 'delay', 0)\n Type = integer_string_or_blank(card, 4, 'Type', 'LOAD')\n\n T1 = double_or_blank(card, 5, 'T1', 0.0)\n T2 = double_or_blank(card, 6, 'T2', T1)\n frequency = double_or_blank(card, 7, 'frequency', 0.)\n phase = double_or_blank(card, 8, 'phase', 0.)\n c = double_or_blank(card, 9, 'c', 0.)\n b = double_or_blank(card, 10, 'b', 0.)\n us0 = double_or_blank(card, 11, 'us0', 0.)\n vs0 = double_or_blank(card, 12, 'vs0', 0.)\n\n assert len(card) <= 13, 'len(TLOAD2 card) = %i\\ncard=%s' % (len(card), card)\n return TLOAD2(sid, excite_id, delay, Type, T1, T2, frequency, phase,\n c, b, us0, vs0, comment=comment)\n\n def get_load_at_time(self, time, scale=1.):\n if isinstance(time, float):\n time = np.array([time])\n else:\n time = np.asarray(time)\n\n # A = 1. # points to DAREA or SPCD\n #xy = array(self.tid.table.table)\n #x = xy[:, 0]\n #y = xy[:, 1]\n #assert x.shape == y.shape, 'x.shape=%s y.shape=%s' % (str(x.shape), str(y.shape))\n #f = interp1d(x, y)\n\n if isinstance(self.delay, float):\n tau = self.delay\n elif self.delay == 0 or self.delay is None:\n tau = 0.0\n else:\n tau = self.delay_ref.get_delay_at_time(time)\n\n t1 = self.T1 + tau\n t2 = self.T2 + tau\n f = self.frequency\n p = self.phase\n f = np.zeros(time.shape, dtype=time.dtype)\n\n i = np.where(t1 <= time)[0]\n j = np.where(time[i] <= t2)[0]\n i = i[j]\n f[i] = scale * time[i] ** self.b * np.exp(self.c * time[i]) * np.cos(2 * np.pi * f * time[i] + p)\n\n is_spcd = False\n #resp = f\n if self.Type == 'VELO' and is_spcd:\n f[0] = self.us0\n if self.Type == 'ACCE' and is_spcd:\n f[0] = self.vs0\n return f\n\n def get_loads(self):\n return [self]\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by TLOAD2 sid=%s' % (self.sid)\n _cross_reference_excite_id(self, model, msg)\n if isinstance(self.delay, integer_types) and self.delay > 0:\n self.delay_ref = model.DELAY(self.delay_id, msg=msg)\n # TODO: excite_id\n\n def safe_cross_reference(self, model, xref_errors, debug=True):\n msg = ', which is required by TLOAD2 sid=%s' % (self.sid)\n _cross_reference_excite_id(self, model, msg)\n if isinstance(self.delay, integer_types) and self.delay > 0:\n self.delay_ref = model.DELAY(self.delay_id, msg=msg)\n # TODO: excite_id\n\n def uncross_reference(self) -> None:\n \"\"\"Removes cross-reference links\"\"\"\n self.delay = self.delay_id\n self.delay_ref = None\n\n @property\n def delay_id(self):\n if self.delay_ref is not None:\n return self.delay_ref.sid\n elif self.delay == 0:\n return 0\n return self.delay\n\n def raw_fields(self):\n list_fields = ['TLOAD2', self.sid, self.excite_id, self.delay_id, self.Type,\n self.T1, self.T2, self.frequency, self.phase, self.c, self.b,\n self.us0, self.vs0]\n return list_fields\n\n def repr_fields(self):\n frequency = set_blank_if_default(self.frequency, 0.0)\n phase = set_blank_if_default(self.phase, 0.0)\n c = set_blank_if_default(self.c, 0.0)\n b = set_blank_if_default(self.b, 0.0)\n\n us0 = set_blank_if_default(self.us0, 0.0)\n vs0 = set_blank_if_default(self.vs0, 0.0)\n list_fields = ['TLOAD2', self.sid, self.excite_id, self.delay_id, self.Type,\n self.T1, self.T2, frequency, phase, c, b, us0, vs0]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)\n\ndef update_loadtype(load_type):\n if load_type in [0, 'L', 'LO', 'LOA', 'LOAD']:\n load_type = 'LOAD'\n elif load_type in [1, 'D', 'DI', 'DIS', 'DISP']:\n load_type = 'DISP'\n elif load_type in [2, 'V', 'VE', 'VEL', 'VELO']:\n load_type = 'VELO'\n elif load_type in [3, 'A', 'AC', 'ACC', 'ACCE']:\n load_type = 'ACCE'\n return load_type\n", "\"\"\"\ndefines:\n - SolidSection\n - Material\n - Part\n\n\"\"\"\nimport numpy as np\n\nallowed_element_types = [\n 'r2d2', 'conn2d2',\n 'cpe3', 'cpe4', 'cpe4r',\n 'cps3', 'cps4', 'cps4r',\n\n 'coh2d4', 'c3d10h', 'cohax4',\n 'cax3', 'cax4r', 'mass', 'rotaryi', 't2d2', 'c3d8r',\n]\n\nclass SolidSection:\n \"\"\"a SolidSection defines depth and a material\"\"\"\n def __init__(self, param_map, data_lines, log):\n self.param_map = param_map\n self.data_lines = data_lines\n self.material = param_map['material']\n if len(data_lines) == 0:\n pass\n elif len(data_lines) == 1:\n assert len(data_lines) == 1, data_lines\n line0 = data_lines[0]\n assert len(line0) == 1, data_lines\n\n try:\n self.thickness = float(line0[0])\n except ValueError:\n self.thickness = 0.\n\n for line in data_lines:\n log.info('solid - %r' % line)\n\n def __repr__(self):\n \"\"\"prints a summary for the solid section\"\"\"\n msg = 'SolidSection(\\n'\n msg += ' param_map = %r,\\n' % self.param_map\n msg += ' thickness = %s,\\n' % self.thickness\n msg += ')\\n'\n return msg\n\n\nclass Material:\n \"\"\"a Material object is a series of nodes & elements (of various types)\"\"\"\n def __init__(self, name, sections, density=None, ndepvars=None, ndelete=None):\n self.name = name\n self.density = density\n\n #self.depvar = None\n self.ndelete = ndelete\n self.ndepvars = ndepvars\n\n self.user_material = None\n #print(sections)\n #if 'density' in sections:\n #self.density = sections['density']\n #if 'depvar' in sections:\n #self.depvar = sections['depvar']\n #if 'user_material' in sections:\n #self.user_material = sections['user_material']\n self.sections = sections\n\n def __repr__(self):\n \"\"\"prints a summary for the material\"\"\"\n msg = 'Material(\\n'\n msg += ' name=%r,\\n' % self.name\n for key, value in self.sections.items():\n msg += ' %r : %r,\\n' % (key, value)\n msg += ')\\n'\n return msg\n\n def write(self, abq_file):\n #*Material, name=Glassy828DEA\n #*Density\n #1180.,\n #*Elastic\n #2.14078e+09, 0.42718\n #*Material, name=MAT1_828DEA_Dam\n #*Density\n #1180.,\n #*Depvar, delete=4\n #20,\n #*User Material, constants=16\n #** K CTELIN C10 C01 DAM_FORM FUNC_FORM EVOLF EVMF\n #3.2e+09, 5.667e-05, 3.75e+08, 0., 2., 1., 50000., 0.05\n #**EVM0ISO EVM0VOL EVM0VM DAM_METHOD ALPHA A B C\n #0., 0.5, 0.5, 1., 0., 0., 0.5, 0.6\n #*Material, name=Steel\n #*Density\n #7800.,\n #*Elastic\n #2e+11, 0.3\n abq_file.write('*Material, name=%s\\n' % write_name(self.name))\n if self.density:\n abq_file.write('*Density\\n %s,\\n' % self.density)\n if self.ndepvars:\n ndelete = '' if self.ndelete is None else ', delete=%s' % self.ndelete\n abq_file.write('*Depvar%s\\n %s,\\n' % (ndelete, self.ndepvars))\n if self.user_material:\n nconstants = ''\n abq_file.write('*User Material%s\\n %s,\\n' % (nconstants, self.user_material))\n #abq_file.write('** skipping Material %s\\n' % self.name)\n\nclass Assembly:\n def __init__(self, element_types, node_sets, element_sets):\n self.element_types = element_types\n self.node_sets = node_sets\n self.element_sets = element_sets\n\n def write(self, abq_file):\n abq_file.write('** skipping Assembly\\n')\n\n def __repr__(self):\n \"\"\"summary for the Assembly\"\"\"\n etypes = list(self.element_types.keys())\n nsets = list(self.node_sets.keys())\n esets = list(self.element_sets.keys())\n msg = (\n 'Assembly:\\n'\n ' element_types = %s\\n'\n ' node_sets = %s\\n'\n ' element_sets = %s\\n' % (etypes, nsets, esets)\n )\n return msg\n\nclass Part:\n \"\"\"a Part object is a series of nodes & elements (of various types)\"\"\"\n def __init__(self, name, nids, nodes, element_types, node_sets, element_sets,\n solid_sections, log):\n \"\"\"\n creates a Part object\n\n Parameters\n ----------\n name : str\n the name\n element_types : Dict[element_type] : node_ids\n element_type : str\n the element type\n bars:\n r2d2 : (nelements, 2) int ndarray\n shells:\n cpe3 : (nelements, 3) int ndarray\n cpe4 : (nelements, 4) int ndarray\n cpe4r : (nelements, 4) int ndarray\n cps3 : (nelements, 3) int ndarray\n cps4 : (nelements, 4) int ndarray\n cps4r : (nelements, 4) int ndarray\n coh2d4 : (nelements, 4) int ndarray\n cohax4 : (nelements, 4) int ndarray\n cax3 : (nelements, 3) int ndarray\n cax4r : (nelements, 4) int ndarray\n solids:\n c3d10h : (nelements, 10) int ndarray\n\n \"\"\"\n self.name = name\n self.log = log\n self.node_sets = node_sets\n self.element_sets = element_sets\n self.solid_sections = solid_sections\n\n try:\n self.nids = np.array(nids, dtype='int32')\n except ValueError:\n msg = 'nids=%s is not integers' % nids\n raise ValueError(msg)\n nnodes = len(self.nids)\n\n node0 = nodes[0]\n node_shape = len(node0)\n\n if node_shape == 3:\n self.nodes = np.array(nodes, dtype='float32')\n elif node_shape == 2:\n # abaqus can have only x/y coordinates, so we fake the z coordinate\n self.nodes = np.zeros((nnodes, 3), dtype='float32')\n nodes2 = np.array(nodes, dtype='float32')\n #print(nodes2.shape, self.nodes.shape)\n self.nodes[:, :2] = nodes2\n else:\n raise NotImplementedError(node0)\n\n # bars\n self.r2d2 = None\n\n # ---shells---\n # plane strain\n self.cpe3 = None\n self.cpe4 = None\n self.cpe4r = None\n\n # plane stress\n self.cps3 = None\n self.cps4 = None\n self.cps4r = None\n\n # other\n self.coh2d4 = None\n self.cohax4 = None\n self.cax3 = None\n self.cax4r = None\n\n # solids\n self.c3d10h = None\n self.c3d8r = None\n #-----------------------------------\n # eids\n self.r2d2_eids = None\n\n self.cpe3_eids = None\n self.cpe4_eids = None\n self.cpe4r_eids = None\n\n self.cps3_eids = None\n self.cps4_eids = None\n self.cps4r_eids = None\n\n self.coh2d4_eids = None\n self.cohax4_eids = None\n self.cax3_eids = None\n self.cax4r_eids = None\n\n # rigid elements\n self.c3d10h_eids = None\n self.c3d8r_eids = None\n self._store_elements(element_types)\n\n def _etypes_nnodes(self):\n \"\"\"internal helper method\"\"\"\n etypes_nnodes = [\n ('r2d2', 2), # similar to a CBAR\n\n # shells\n ('cpe3', 3),\n ('cpe4', 4),\n ('cpe4r', 4),\n\n ('cps3', 3),\n ('cps4', 4),\n ('cps4r', 4), # quad, plane stress, reduced\n\n ('coh2d4', 4), # cohesive zone\n ('cohax4', 4), # cohesive zone\n ('cax3', 3),\n ('cax4r', 4),\n\n # solids\n ('c3d10h', 10), # tet10\n ('c3d8r', 8), # hexa8\n ]\n return etypes_nnodes\n\n def _store_elements(self, element_types):\n \"\"\"helper method for the init\"\"\"\n etypes_nnodes = self._etypes_nnodes()\n for etype, nnodes in etypes_nnodes:\n if etype in element_types:\n etype_eids = '%s_eids' % etype\n elements = element_types[etype]\n eids_elements = np.array(elements, dtype='int32')\n setattr(self, etype, eids_elements) # r2d2\n setattr(self, etype_eids, eids_elements[:, 0]) # r2d2_eids\n assert eids_elements.shape[1] == nnodes + 1, eids_elements.shape\n\n\n def element(self, eid):\n \"\"\"gets a specific element of the part\"\"\"\n elem = None\n etypes_nnodes = self._etypes_nnodes()\n for etype, nnodes in etypes_nnodes:\n etype_eids = '%s_eids' % etype\n eids = getattr(self, etype_eids) # r2d2_eids\n if eids is not None:\n ieid = np.where(eid == eids)[0]\n if len(ieid):\n ieidi = ieid[0]\n elems = getattr(self, etype) # r2d2\n elem = elems[ieid, :]\n return etype, ieid, elem\n return None, None, None\n\n def check_materials(self, materials):\n \"\"\"validates the materials\"\"\"\n for section in self.solid_sections:\n key = section.material\n if key in materials:\n self.log.debug('material=%r for part=%r exists' % (key, self.name))\n else:\n self.log.warning('key=%r is an invalid material' % key)\n\n @property\n def nelements(self):\n \"\"\"Gets the total number of elements\"\"\"\n n_r2d2 = self.r2d2.shape[0] if self.r2d2 is not None else 0\n\n # plane strain\n n_cpe3 = self.cpe3.shape[0] if self.cpe3 is not None else 0\n n_cpe4 = self.cpe4.shape[0] if self.cpe4 is not None else 0\n n_cpe4r = self.cpe4r.shape[0] if self.cpe4r is not None else 0\n\n # plane stress\n n_cps3 = self.cps3.shape[0] if self.cps3 is not None else 0\n n_cps4 = self.cps4.shape[0] if self.cps4 is not None else 0\n n_cps4r = self.cps4r.shape[0] if self.cps4r is not None else 0\n\n n_coh2d4 = self.coh2d4.shape[0] if self.coh2d4 is not None else 0\n n_c3d10h = self.c3d10h.shape[0] if self.c3d10h is not None else 0\n\n n_cohax4 = self.cohax4.shape[0] if self.cohax4 is not None else 0\n n_cax3 = self.cax3.shape[0] if self.cax3 is not None else 0\n n_cax4r = self.cax4r.shape[0] if self.cax4r is not None else 0\n\n n_c3d8r = self.c3d8r.shape[0] if self.c3d8r is not None else 0\n\n neids = (n_r2d2 +\n n_cpe3 + n_cpe4 + n_cpe4r + # plane strain\n n_cps3 + n_cps4 + n_cps4r + # plane stress\n n_coh2d4 +\n n_c3d10h + n_cohax4 + n_cax3 + n_cax4r +\n n_c3d8r)\n assert neids > 0, neids\n return neids\n\n def __repr__(self):\n \"\"\"prints a summary for the part\"\"\"\n nnodes = self.nodes.shape[0]\n n_r2d2 = self.r2d2.shape[0] if self.r2d2 is not None else 0\n\n # plane strain\n n_cpe3 = self.cpe3.shape[0] if self.cpe3 is not None else 0\n n_cpe4 = self.cpe4.shape[0] if self.cpe4 is not None else 0\n n_cpe4r = self.cpe4r.shape[0] if self.cpe4r is not None else 0\n\n # plane stress\n n_cps3 = self.cps3.shape[0] if self.cps3 is not None else 0\n n_cps4 = self.cps4.shape[0] if self.cps4 is not None else 0\n n_cps4r = self.cps4r.shape[0] if self.cps4r is not None else 0\n\n n_coh2d4 = self.coh2d4.shape[0] if self.coh2d4 is not None else 0\n n_c3d10h = self.c3d10h.shape[0] if self.c3d10h is not None else 0\n\n n_cohax4 = self.cohax4.shape[0] if self.cohax4 is not None else 0\n n_cax3 = self.cax3.shape[0] if self.cax3 is not None else 0\n n_cax4r = self.cax4r.shape[0] if self.r2d2 is not None else 0\n\n n_c3d8r = self.c3d8r.shape[0] if self.c3d8r is not None else 0\n\n neids = (n_r2d2 +\n n_cpe3 + n_cpe4 + n_cpe4r + # plane strain\n n_cps3 + n_cps4 + n_cps4r + # plane stress\n n_coh2d4 +\n n_c3d10h + n_cohax4 + n_cax3 + n_cax4r +\n n_c3d8r)\n assert neids == self.nelements, 'something is out of date...'\n msg = (\n f'Part(name={self.name}, nnodes={nnodes:d}, neids={neids:d},\\n'\n f' n_r2d2={n_r2d2}, n_cps3={n_cps3}, n_cpe3={n_cpe3}, '\n f'n_cpe4={n_cpe4}, n_cpe4r={n_cpe4r}, n_coh2d4={n_coh2d4},\\n'\n f' n_cohax4={n_cohax4}, n_cax3={n_cax3}, n_cax4r={n_cax4r},'\n f' n_cps4r={n_cps4r},\\n'\n f' n_c3d10h={n_c3d10h}, n_c3d8r=n_c3d8r)\\n'\n )\n nsets = list(self.node_sets.keys())\n esets = list(self.element_sets.keys())\n msg += ' Node Sets: %s\\n' % nsets\n msg += ' Element Sets: %s\\n' % esets\n for section in self.solid_sections:\n msg += str(section) + '\\n'\n return msg\n\n @property\n def element_types(self):\n \"\"\"simplified way to access all the elements as a dictionary\"\"\"\n element_types = {}\n element_types['r2d2'] = (self.r2d2_eids, self.r2d2)\n\n # plane strain element_types['cpe3'] = (self.cpe3_eids, self.cpe3)\n element_types['cpe4'] = (self.cpe4_eids, self.cpe4)\n element_types['cpe4r'] = (self.cpe4r_eids, self.cpe4r)\n\n # plane stress\n element_types['cps3'] = (self.cps3_eids, self.cps3)\n element_types['cps4'] = (self.cps4_eids, self.cps4)\n element_types['cps4r'] = (self.cps4r_eids, self.cps4r)\n\n element_types['cohax4'] = (self.cohax4_eids, self.cohax4)\n element_types['coh2d4'] = (self.coh2d4_eids, self.coh2d4)\n element_types['cax3'] = (self.cax3_eids, self.cax3)\n element_types['cax4r'] = (self.cax4r_eids, self.cax4r)\n #element_types['cps4r'] = (self.cps4r_eids, self.cps4r)\n element_types['c3d10h'] = (self.c3d10h_eids, self.c3d10h)\n return element_types\n\n def write(self, abq_file, is_2d=False):\n \"\"\"writes a Part\"\"\"\n #name, nids, nodes, element_types, node_sets, element_sets,\n # solid_sections, log\n abq_file.write('*Part,name=%s\\n' % write_name(self.name))\n\n abq_file.write('*Node\\n')\n if is_2d:\n for nid, node in zip(self.nids, self.nodes):\n abq_file.write('%i,\\t%s,\\t%s,\\t%s\\n' % (nid, node[0], node[1], node[2]))\n else:\n for nid, node in zip(self.nids, self.nodes):\n abq_file.write('%i,\\t%s,\\t%s\\n' % (nid, node[0], node[1]))\n\n for set_name, values in sorted(self.node_sets.items()):\n write_node_set_to_file(abq_file, set_name, values)\n\n for elem_type, (eids, elems) in self.element_types.items():\n if eids is None:\n continue\n abq_file.write('*Element,type=%s\\n' % elem_type)\n nnodes = elems.shape[1]\n fmt = '%s,\\t' * (nnodes - 1) + '%s\\n'\n for eid, elem in zip(eids, elems):\n abq_file.write(fmt % tuple(elem))\n\n for set_name, values in sorted(self.element_sets.items()):\n write_element_set_to_file(abq_file, set_name, values)\n abq_file.write('*endpart\\n')\n\ndef write_name(name):\n \"\"\"Abaqus has odd rules for writing words without spaces vs. with spaces\"\"\"\n return '%r' % name if ' ' in name else '%s' % name\n\ndef write_element_set_to_file(abq_file, set_name, values_array):\n \"\"\"writes an element set\"\"\"\n abq_file.write('*Elset, elset=%s\\n' % write_name(set_name))\n write_set_to_file(abq_file, values_array)\n\ndef write_node_set_to_file(abq_file, set_name, values_array):\n \"\"\"writes a node set\"\"\"\n abq_file.write('*Nset, nset=%s\\n' % write_name(set_name))\n write_set_to_file(abq_file, values_array)\n\ndef write_set_to_file(abq_file, values_array):\n \"\"\"writes 16 integer values per line to a set card\"\"\"\n assert isinstance(values_array, np.ndarray), type(values_array)\n nvalues = len(values_array)\n nrows = nvalues // 16\n nleftover = nvalues % 16\n if nrows:\n values_array_square = values_array[:nrows*16].reshape(nrows, 16)\n fmt = '%i,\\t' * 16 + '\\n'\n fmt2 = '%i,\\t' * 15 + '%i\\n'\n for row in values_array_square[:-1, :]:\n abq_file.write(fmt % tuple(row))\n abq_file.write(fmt2 % tuple(values_array_square[-1, :]))\n\n if nleftover:\n fmt = '%i,\\t' * (nleftover - 1) + '%i\\n'\n leftover = values_array[nrows*16:]\n abq_file.write(fmt % tuple(leftover))\n", "import numpy as np\nfrom numpy import zeros\n\nfrom pyNastran.utils.numpy_utils import integer_types\nfrom pyNastran.op2.tables.oes_stressStrain.real.oes_objects import OES_Object\nfrom pyNastran.f06.f06_formatting import write_floats_13e, _eigenvalue_header\n\n\nclass RealNonlinearRodArray(OES_Object): # 89-CRODNL, 92-CONRODNL\n \"\"\"\n ::\n\n ELEMENT-ID = 102\n N O N L I N E A R S T R E S S E S I N R O D E L E M E N T S ( C R O D )\n TIME AXIAL STRESS EQUIVALENT TOTAL STRAIN EFF. STRAIN EFF. CREEP LIN. TORSIONAL\n STRESS PLASTIC/NLELAST STRAIN STRESS\n 2.000E-02 1.941367E+01 1.941367E+01 1.941367E-04 0.0 0.0 0.0\n 3.000E-02 1.941367E+01 1.941367E+01 1.941367E-04 0.0 0.0 0.0\n \"\"\"\n def __init__(self, data_code, is_sort1, isubcase, dt):\n \"\"\"tested by elements/loadstep_elements.op2\"\"\"\n OES_Object.__init__(self, data_code, isubcase, apply_data_code=True)\n #self.code = [self.format_code, self.sort_code, self.s_code]\n self.nelements = 0 # result specific\n\n @property\n def is_real(self):\n return True\n\n @property\n def is_complex(self):\n return False\n\n def _reset_indices(self):\n self.itotal = 0\n self.ielement = 0\n\n def _get_msgs(self):\n raise NotImplementedError()\n\n def get_headers(self):\n headers = ['axial_stress', 'equiv_stress', 'total_strain',\n 'effective_plastic_creep_strain', 'effective_creep_strain',\n 'linear_torsional_stress']\n return headers\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the RealNonlinearRodArray\"\"\"\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n #self.names = []\n self.nelements //= self.ntimes\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #self.ntimes = 0\n #self.nelements = 0\n self.is_built = True\n\n #print(\"ntimes=%s nelements=%s ntotal=%s\" % (self.ntimes, self.nelements, self.ntotal))\n dtype = 'float32'\n if isinstance(self.nonlinear_factor, integer_types):\n dtype = 'int32'\n self._times = zeros(self.ntimes, dtype=dtype)\n self.element = zeros(self.nelements, dtype='int32')\n\n #[axial_stress, equiv_stress, total_strain, effective_plastic_creep_strain,\n # effective_creep_strain, linear_torsional_stress]\n self.data = zeros((self.ntimes, self.nelements, 6), dtype='float32')\n\n def build_dataframe(self):\n \"\"\"creates a pandas dataframe\"\"\"\n import pandas as pd\n headers = self.get_headers()\n if self.nonlinear_factor not in (None, np.nan):\n #Time 0.02 0.04\n #ElementID Item\n #102 axial_stress 19.413668 76.139496\n # equiv_stress 19.413668 76.139496\n # total_strain 0.000194 0.000761\n # effective_plastic_creep_strain 0.000000 0.000000\n # effective_creep_strain 0.000000 0.000000\n # linear_torsional_stress 0.000000 0.000000\n column_names, column_values = self._build_dataframe_transient_header()\n self.data_frame = self._build_pandas_transient_elements(\n column_values, column_names,\n headers, self.element, self.data)\n else:\n df1 = pd.DataFrame(self.element).T\n df1.columns = ['ElementID']\n df2 = pd.DataFrame(self.data[0])\n df2.columns = headers\n self.data_frame = df1.join([df2])\n #print(self.data_frame)\n\n def __eq__(self, table): # pragma: no cover\n self._eq_header(table)\n assert self.is_sort1 == table.is_sort1\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n ntimes = self.data.shape[0]\n\n i = 0\n if self.is_sort1:\n for itime in range(ntimes):\n for ieid, eid, in enumerate(self.element):\n t1 = self.data[itime, ieid, :]\n t2 = table.data[itime, ieid, :]\n (axial_stress1, equiv_stress1, total_strain1, effective_plastic_creep_strain1, effective_creep_strain1, linear_torsional_stress1) = t1\n (axial_stress2, equiv_stress2, total_strain2, effective_plastic_creep_strain2, effective_creep_strain2, linear_torsional_stress2) = t2\n if not np.allclose(t1, t2):\n #if not np.array_equal(t1, t2):\n msg += '%s\\n (%s, %s, %s, %s, %s, %s)\\n (%s, %s, %s, %s, %s, %s)\\n' % (\n eid,\n axial_stress1, equiv_stress1, total_strain1, effective_plastic_creep_strain1, effective_creep_strain1, linear_torsional_stress1,\n axial_stress2, equiv_stress2, total_strain2, effective_plastic_creep_strain2, effective_creep_strain2, linear_torsional_stress2)\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n else:\n raise NotImplementedError(self.is_sort2)\n if i > 0:\n print(msg)\n raise ValueError(msg)\n return True\n\n def add_sort1(self, dt, eid, axial_stress, equiv_stress, total_strain,\n effective_plastic_creep_strain, effective_creep_strain, linear_torsional_stress):\n \"\"\"unvectorized method for adding SORT1 transient data\"\"\"\n assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n self._times[self.itime] = dt\n self.element[self.ielement] = eid\n self.data[self.itime, self.ielement, :] = [\n axial_stress, equiv_stress, total_strain, effective_plastic_creep_strain,\n effective_creep_strain, linear_torsional_stress\n ]\n self.ielement += 1\n\n def get_stats(self, short=False):\n if not self.is_built:\n return [\n '<%s>\\n' % self.__class__.__name__,\n ' ntimes: %i\\n' % self.ntimes,\n ' ntotal: %i\\n' % self.ntotal,\n ]\n\n ntimes, nelements, _ = self.data.shape\n assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)\n assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)\n\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i\\n'\n % (self.__class__.__name__, ntimes, nelements))\n ntimes_word = 'ntimes'\n else:\n msg.append(' type=%s nelements=%i\\n'\n % (self.__class__.__name__, nelements))\n ntimes_word = '1'\n msg.append(' eType\\n')\n headers = self.get_headers()\n n = len(headers)\n msg.append(' data: [%s, nelements, %i] where %i=[%s]\\n' % (ntimes_word, n, n, str(', '.join(headers))))\n msg.append(' data.shape = %s\\n' % str(self.data.shape).replace('L', ''))\n msg.append(' element type: %s\\n' % self.element_name)\n msg += self.get_data_code()\n return msg\n\n def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n page_num=1, is_mag_phase=False, is_sort1=True):\n if header is None:\n header = []\n if is_sort1:\n msg = [\n ' N O N L I N E A R S T R E S S E S I N R O D E L E M E N T S ( C R O D )\\n',\n ' \\n',\n ' ELEMENT-ID AXIAL STRESS EQUIVALENT TOTAL STRAIN EFF. STRAIN EFF. CREEP LIN. TORSIONAL\\n',\n ' STRESS PLASTIC/NLELAST STRAIN STRESS\\n'\n ]\n else:\n msg = [\n ' N O N L I N E A R S T R E S S E S I N R O D E L E M E N T S ( C R O D )\\n',\n ' \\n',\n ' TIME AXIAL STRESS EQUIVALENT TOTAL STRAIN EFF. STRAIN EFF. CREEP LIN. TORSIONAL\\n',\n ' STRESS PLASTIC/NLELAST STRAIN STRESS\\n'\n ]\n\n if self.is_sort1:\n page_num = self._write_sort1_as_sort1(header, page_stamp, page_num, f06_file, msg)\n else:\n raise NotImplementedError('RealNonlinearRodArray')\n return page_num\n\n def _write_sort1_as_sort1(self, header, page_stamp, page_num, f06_file, msg_temp):\n ntimes = self.data.shape[0]\n\n eids = self.element\n #is_odd = False\n #nwrite = len(eids)\n\n for itime in range(ntimes):\n dt = self._times[itime]\n header = _eigenvalue_header(self, header, itime, ntimes, dt)\n f06_file.write(''.join(header + msg_temp))\n\n #print(\"self.data.shape=%s itime=%s ieids=%s\" % (str(self.data.shape), itime, str(ieids)))\n axial = self.data[itime, :, 0]\n eqs = self.data[itime, :, 1]\n total = self.data[itime, :, 2]\n epcs = self.data[itime, :, 3]\n ecs = self.data[itime, :, 4]\n lts = self.data[itime, :, 5]\n\n #print \"dt=%s axials=%s eqs=%s ts=%s epcs=%s ecs=%s lts=%s\" %(dt,axial,eqs,ts,epcs,ecs,lts)\n #msgE[eid] = ' ELEMENT-ID = %8i\\n' % (eid)\n #if eid not in msgT:\n #msgT[eid] = []\n #msgT[eid].append(' %9.3E %13.6E %13.6E %13.6E %13.6E %13.6E %13.6E\\n' % (dt, axial, eqs, ts, epcs, ecs, lts))\n\n for eid, axiali, eqsi, totali, epcsi, ecsi, ltsi in zip(eids, axial, eqs, total, epcs, ecs, lts):\n ([saxial, seqs, stotal, sepcs, secs, slts]) = write_floats_13e(\n [axiali, eqsi, totali, epcsi, ecsi, ltsi])\n f06_file.write(\n ' %8i %-13s %-13s %-13s %-13s %-13s %s\\n' % (\n eid, saxial, seqs, stotal, sepcs, secs, slts))\n f06_file.write(page_stamp % page_num)\n page_num += 1\n return page_num - 1\n\n def write_op2(self, op2, op2_ascii, itable, new_result, date,\n is_mag_phase=False, endian='>'):\n \"\"\"writes an OP2\"\"\"\n import inspect\n from struct import Struct, pack\n frame = inspect.currentframe()\n call_frame = inspect.getouterframes(frame, 2)\n op2_ascii.write('%s.write_op2: %s\\n' % (self.__class__.__name__, call_frame[1][3]))\n\n if itable == -1:\n self._write_table_header(op2, op2_ascii, date)\n itable = -3\n\n #if isinstance(self.nonlinear_factor, float):\n #op2_format = '%sif' % (7 * self.ntimes)\n #raise NotImplementedError()\n #else:\n #op2_format = 'i21f'\n #s = Struct(op2_format)\n\n eids = self.element\n\n # table 4 info\n #ntimes = self.data.shape[0]\n #nnodes = self.data.shape[1]\n nelements = self.data.shape[1]\n\n # 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm\n #ntotal = ((nnodes * 21) + 1) + (nelements * 4)\n\n ntotali = self.num_wide\n ntotal = ntotali * nelements\n\n #print('shape = %s' % str(self.data.shape))\n #assert self.ntimes == 1, self.ntimes\n\n device_code = self.device_code\n op2_ascii.write(' ntimes = %s\\n' % self.ntimes)\n\n eids_device = self.element * 10 + self.device_code\n\n #fmt = '%2i %6f'\n #print('ntotal=%s' % (ntotal))\n #assert ntotal == 193, ntotal\n\n if self.is_sort1:\n struct1 = Struct(endian + b'i6f')\n else:\n raise NotImplementedError('SORT2')\n\n op2_ascii.write('nelements=%i\\n' % nelements)\n\n for itime in range(self.ntimes):\n #print('3, %s' % itable)\n self._write_table_3(op2, op2_ascii, new_result, itable, itime)\n\n # record 4\n #print('stress itable = %s' % itable)\n itable -= 1\n #print('4, %s' % itable)\n header = [4, itable, 4,\n 4, 1, 4,\n 4, 0, 4,\n 4, ntotal, 4,\n 4 * ntotal]\n op2.write(pack('%ii' % len(header), *header))\n op2_ascii.write('r4 [4, 0, 4]\\n')\n op2_ascii.write('r4 [4, %s, 4]\\n' % (itable))\n op2_ascii.write('r4 [4, %i, 4]\\n' % (4 * ntotal))\n\n axial = self.data[itime, :, 0]\n eqs = self.data[itime, :, 1]\n total = self.data[itime, :, 2]\n epcs = self.data[itime, :, 3]\n ecs = self.data[itime, :, 4]\n lts = self.data[itime, :, 5]\n\n for eid, axiali, eqsi, totali, epcsi, ecsi, ltsi in zip(eids_device, axial, eqs, total, epcs, ecs, lts):\n data = [eid, axiali, eqsi, totali, epcsi, ecsi, ltsi]\n op2_ascii.write(' eid=%s data=%s\\n' % (eids_device, str(data)))\n op2.write(struct1.pack(*data))\n\n itable -= 1\n header = [4 * ntotal,]\n op2.write(pack('i', *header))\n op2_ascii.write('footer = %s\\n' % header)\n new_result = False\n return itable\n", "import os\nimport unittest\nimport numpy as np\nfrom cpylog import get_logger\n\nfrom pyNastran.converters.openfoam.block_mesh import BlockMesh, read_block_mesh, mirror_block_mesh\nfrom pyNastran.converters.openfoam.points_file import read_points_file\nfrom pyNastran.converters.openfoam.face_file import FaceFile\nfrom pyNastran.converters.openfoam.boundary_file import read_boundary, read_boundary_file\n\nfrom pyNastran.utils import check_path\n\n\nclass TestOpenFOAM(unittest.TestCase):\n def test_boundary_1(self):\n \"\"\"tests the PointsFile, FaceFile, Boundary class using the Boundary\"\"\"\n #points_filename = 'points.foam'\n boundary_filename = 'boundary.foam'\n #with open(points_filename, 'w') as points_file:\n #points_file.write(\n #'4\\n\\n'\n #'(0. 0. 0.)\\n'\n #'(1. 0. 0.)\\n'\n #'(2. 0. 0.)\\n'\n #'(3. 0. 0.)\\n'\n #)\n #nfaces = 2\n boundary_msg = (\n '6\\n'\n '(\\n'\n ' inlet\\n'\n ' {\\n'\n ' type patch;\\n'\n ' nFaces 50;\\n'\n ' startFace 10325;\\n'\n ' }\\n'\n ' outlet\\n'\n ' {\\n'\n ' type patch;\\n'\n ' nFaces 40;\\n'\n ' startFace 10375;\\n'\n ' }\\n'\n ' bottom\\n'\n ' {\\n'\n ' type symmetryPlane;\\n'\n ' inGroups 1(symmetryPlane);\\n'\n ' nFaces 25;\\n'\n ' startFace 10415;\\n'\n ' }\\n'\n ' top\\n'\n ' {\\n'\n ' type symmetryPlane;\\n'\n ' inGroups 1(symmetryPlane);\\n'\n ' nFaces 125;\\n'\n ' startFace 10440;\\n'\n ' }\\n'\n ' obstacle\\n'\n ' {\\n'\n ' type patch;\\n'\n ' nFaces 110;\\n'\n ' startFace 10565;\\n'\n ' }\\n'\n ' defaultFaces\\n'\n ' {\\n'\n ' type empty;\\n'\n ' inGroups 1(empty);\\n'\n ' nFaces 10500;\\n'\n ' startFace 10675;\\n'\n ' }\\n'\n ')\\n'\n '\\n'\n '// *************************************** //\\n'\n )\n with open(boundary_filename, 'w') as boundary_file_obj:\n boundary_file_obj.write(boundary_msg)\n\n log = get_logger(level='warning', encoding='utf-8')\n boundary_file = read_boundary_file(\n boundary_filename, log=log, debug=False)\n #boundary = read_boundary(\n #point_filename, face_filename, boundary_filename,\n #log=None, debug=False)\n\n\n def test_points_1(self):\n \"\"\"tests the PointsFile class\"\"\"\n points_filename = 'points.foam'\n with open(points_filename, 'w') as points_File:\n points_File.write(\n '4\\n\\n'\n '(0. 0. 0.)\\n'\n '(1. 0. 0.)\\n'\n '(2. 0. 0.)\\n'\n '(3. 0. 0.)\\n'\n )\n points = read_points_file(points_filename, ipoints_to_read=None, log=None,\n debug=None)\n #print(points)\n assert points.shape == (4, 3), points.shape\n\n points = read_points_file(points_filename, ipoints_to_read=[0, 3], log=None,\n debug=None)\n assert points.shape == (2, 3), points.shape\n #print(points)\n\n points = read_points_file(points_filename, ipoints_to_read=[3], log=None,\n debug=None)\n #print(points)\n assert points.shape == (1, 3), points.shape\n os.remove(points_filename)\n\n def test_blockmesh_1(self):\n \"\"\"tests the BlockMesh class\"\"\"\n block = BlockMesh(log=None, debug=True)\n\n block.grading = [\n [4, 2, 6, None],\n ]\n block.nodes = np.array([\n [0., 0., 0.],\n [0., 1., 0.],\n [1., 1., 0.],\n [1., 0., 0.],\n\n [0., 0., 1.],\n [0., 1., 1.],\n [1., 1., 1.],\n [1., 0., 1.],\n ])\n block.hexas = [\n [1, 2, 3, 4, 5, 6, 7, 8],\n ]\n hex_id = 1\n block.make_hex_bars(hex_id)\n\n\nif __name__ == '__main__': # pragma: no cover\n unittest.main()", "from numpy import dot, array, zeros, unique, searchsorted, transpose, where, arange\nfrom numpy.linalg import norm # type: ignore\n\nfrom pyNastran.dev.bdf_vectorized.cards.elements.spring.spring_element import SpringElement\n\nfrom pyNastran.bdf.field_writer_8 import print_card_8\nfrom pyNastran.bdf.field_writer_16 import print_card_16\nfrom pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank\n\n\nclass CELAS1(SpringElement):\n type = 'CELAS1'\n def __init__(self, model):\n \"\"\"\n Defines the CELAS1 object.\n\n Parameters\n ----------\n model : BDF\n the BDF object\n \"\"\"\n SpringElement.__init__(self, model)\n\n def allocate(self, card_count):\n ncards = card_count[self.type]\n if ncards:\n self.n = ncards\n #: Element ID\n self.element_id = zeros(ncards, 'int32')\n #: Property ID\n self.property_id = zeros(ncards, 'int32')\n #: Node IDs\n self.node_ids = zeros((ncards, 2), 'int32')\n #: component number\n self.components = zeros((ncards, 2), 'int32')\n\n def add_card(self, card, comment=''):\n i = self.i\n self.element_id[i] = integer(card, 1, 'eid')\n self.property_id[i] = integer_or_blank(card, 2, 'pid', self.element_id[i])\n self.node_ids[i, :] = [integer(card, 3, 'n1'),\n integer(card, 5, 'n2')]\n self.components[i, :] = [integer_or_blank(card, 4, 'c1', 0),\n integer_or_blank(card, 6, 'c2', 0)]\n assert len(card) <= 7, 'len(CELAS1 card) = %i\\ncard=%s' % (len(card), card)\n self.i += 1\n\n def build(self):\n if self.n:\n i = self.element_id.argsort()\n self.element_id = self.element_id[i]\n self.property_id = self.property_id[i]\n self.node_ids = self.node_ids[i, :]\n self.components = self.components[i, :]\n\n unique_eids = unique(self.element_id)\n if len(unique_eids) != len(self.element_id):\n raise RuntimeError('There are duplicate CELAS1 IDs...')\n self._cards = []\n else:\n self.element_id = array([], dtype='int32')\n self.property_id = array([], dtype='int32')\n\n def update(self, maps):\n \"\"\"\n maps = {\n 'node_id' : nid_map,\n 'property' : pid_map,\n }\n \"\"\"\n if self.n:\n eid_map = maps['element']\n pid_map = maps['property']\n nid_map = maps['node']\n for i, eid, pid, nids in enumerate(zip(self.element_id, self.property_id, self.node_ids)):\n self.element_id[i] = eid_map[eid]\n self.property_id[i] = pid_map[pid]\n self.node_ids[i, 0] = nid_map[nids[0]]\n self.node_ids[i, 1] = nid_map[nids[1]]\n\n def write_card(self, bdf_file, size=8, eids=None):\n if self.n:\n if eids is None:\n i = arange(self.n)\n else:\n i = searchsorted(self.element_id, self.eid)\n\n for (eid, pid, n, c) in zip(self.element_id[i], self.property_id[i], self.node_ids[i], self.components[i]):\n card = ['CELAS1', eid, pid, n[0], n[1], c[0], c[1]]\n if size == 8:\n bdf_file.write(print_card_8(card))\n else:\n bdf_file.write(print_card_16(card))\n\n def get_stiffness_matrix(self, i, model, positions, index0s, fnorm=1.0):\n \"\"\"gets the stiffness matrix for CELAS1\"\"\"\n #print(\"----------------\")\n ipid = where(self.model.pelas.property_id==self.property_id[i])[0][0]\n prop = self.model.pelas\n ki = prop.K[ipid]\n k = ki * array([[1, -1,],\n [-1, 1]])\n\n #========================\n n1, n2 = self.node_ids[i, :]\n c1, c2 = self.components[i, :]\n #i0, i1 = index0s\n\n delta1 = 0 if c1 in [0, 1, 2, 3] else 3\n delta2 = 0 if c2 in [0, 1, 2, 3] else 3\n\n c1b = c1-1 if c1 > 0 else c1\n c2b = c2-1 if c2 > 0 else c2\n\n i1 = index0s[n1]\n i2 = index0s[n2]\n dofs = [\n i1 + c1b,\n i2 + c1b,\n ]\n\n n_ijv = [\n (n1, 1 + delta1),\n (n2, 1 + delta2),\n ]\n return (k, dofs, n_ijv)\n\n def displacement_stress(self, model, positions, q, dofs,\n ni, o1, e1, f1):\n n = self.n\n\n du_axial = zeros(n, 'float64')\n for i in range(self.n):\n (n1, n2) = self.node_ids[i, :]\n\n n11 = dofs[(n1, 1)]\n n21 = dofs[(n2, 1)]\n\n q_axial = array([\n q[n11],\n q[n21],\n ])\n u_axial = q_axial\n du_axial[i] = u_axial[0] - u_axial[1]\n\n self.model.log.debug(\"len(pelas) = %s\" % self.model.pelas.n)\n i = searchsorted(self.model.pelas.property_id, self.property_id)\n k = self.model.pelas.K[i]\n s = self.model.pelas.s[i]\n self.model.log.debug(\"k=%s s=%s du_axial=%s\" % (k, s, du_axial))\n\n e1[ni: ni+n] = du_axial * s\n f1[ni: ni+n] = k * du_axial\n o1[ni: ni+n] = f1[ni: ni+n] * s\n #return (axial_strain, axial_stress, axial_force)\n", "\"\"\"\ndefines:\n - read_tetgen(base, dimension_flag=2, log=None, debug=False)\n - Tetgen(log=None, debug=False):\n - write_nastran(self, bdf_filename)\n - read_tetgen(self, node_filename, smesh_filename, ele_filename, dimension_flag)\n - read_smesh(self, smesh_filename)\n - read_nodes(self, node_filename)\n - read_ele(self, ele_filename, form_flag='1')\n\n\"\"\"\nfrom numpy import array, zeros\nfrom cpylog import get_logger2\nfrom pyNastran.bdf.field_writer_8 import print_card_8\n\n\ndef read_tetgen(base, dimension_flag=2, log=None, debug=False):\n \"\"\"simplified interface to Tetgen files\"\"\"\n model = Tetgen(log=log, debug=debug)\n model.read_tetgen(base + '.node', base + '.smesh', base + '.ele', dimension_flag)\n return model\n\nclass Tetgen:\n \"\"\"\n http://www.wias-berlin.de/preprint/1762/wias_preprints_1762.pdf\n \"\"\"\n def __init__(self, log=None, debug=False):\n \"\"\"\n Initializes the Tetgen object\n\n Parameters\n ----------\n debug : bool/None; default=True\n used to set the logger if no logger is passed in\n True: logs debug/info/error messages\n False: logs info/error messages\n None: logs error messages\n log : logging module object / None\n if log is set, debug is ignored and uses the\n settings the logging object has\n \"\"\"\n self.log = get_logger2(log, debug=debug)\n self.nodes = None\n self.tris = None\n self.tets = None\n\n def write_nastran(self, bdf_filename):\n \"\"\"writes a nastran bdf\"\"\"\n with open(bdf_filename, 'w') as bdf_file:\n msg = 'CEND\\n'\n msg += 'BEGIN BULK\\n'\n\n nid = 1\n cid = None\n for (x, y, z) in self.nodes:\n card = ['GRID', nid, cid, x, y, z]\n msg += print_card_8(card)\n nid += 1\n bdf_file.write(msg)\n\n eid = 1\n mid = 100\n if self.tris is not None:\n pid = 1\n thickness = 0.1\n pshell = ['PSHELL', pid, mid, thickness]\n msg = print_card_8(pshell)\n\n for n0, n1, n2 in self.tris + 1:\n card = ['CTRIA3', eid, pid, n0, n1, n2]\n msg += print_card_8(card)\n eid += 1\n if eid % 1000 == 0:\n bdf_file.write(msg)\n msg = ''\n bdf_file.write(msg)\n\n if self.tets is not None:\n pid = 2\n psolid = ['PSOLID', pid, mid]\n msg = print_card_8(psolid)\n for n0, n1, n2, n3 in self.tets + 1:\n card = ['CTETRA', eid, pid, n0, n1, n2, n3]\n msg += print_card_8(card)\n eid += 1\n if eid % 1000 == 0:\n bdf_file.write(msg)\n msg = ''\n bdf_file.write(msg)\n\n E = 1e7\n G = None\n nu = 0.3\n rho = 0.1\n mat1 = ['MAT1', mid, E, G, nu, rho]\n msg = print_card_8(mat1)\n bdf_file.write(msg)\n bdf_file.write('ENDDATA\\n')\n\n def read_tetgen(self, node_filename, smesh_filename, ele_filename, dimension_flag):\n \"\"\"reads a tetgen file\"\"\"\n self.nodes = read_nodes(node_filename)\n if dimension_flag == 2:\n self.log.info('reading the *.smesh')\n self.tris = self.read_smesh(smesh_filename)\n elif dimension_flag == 3:\n self.log.info('reading the *.ele')\n self.tets = read_ele(ele_filename)\n else:\n raise RuntimeError('dimension_flag = %r and must be 2 or 3.' % dimension_flag)\n\n def read_smesh(self, smesh_filename):\n \"\"\"reads the *.smesh file\"\"\"\n with open(smesh_filename, 'r') as smesh_file:\n lines = smesh_file.readlines()\n lines = clean_lines(lines)\n #iline = 0 # 0 3 0 0 # node list is found in .node file.\n\n iline = 1\n nelements, unused_zero = lines[iline].split() # nelements, 0\n nelements = int(nelements)\n self.log.debug('nelements = %s' % nelements)\n\n # facet section\n tri_list = []\n iline += 1\n for unused_ielement in range(nelements):\n sline = lines[iline].split()\n try:\n nnodes = sline[0]\n except IndexError:\n print(sline)\n raise\n element_nodes = sline[1:]\n if nnodes == '3':\n tri_list.append(element_nodes)\n else:\n raise NotImplementedError('nnodes = %s' % nnodes)\n iline += 1\n tri = array(tri_list, 'int32') - 1 # subtract 1 so the node ids start at 0\n return tri\n\n\ndef read_nodes(node_filename):\n \"\"\"reads the *.node file\"\"\"\n with open(node_filename, 'r') as node_file:\n nnodes, three, zero1, zero2 = node_file.readline().strip().split()\n assert three == '3', three\n assert zero1 == '0', zero1\n assert zero2 == '0', zero2\n nnodes = int(nnodes)\n nodes = zeros((nnodes, 3), 'float64')\n for inode in range(nnodes):\n nodes[inode] = node_file.readline().strip().split()[1:]\n return nodes\n\ndef read_ele(ele_filename, form_flag='1'):\n \"\"\"reads the *.ele file\"\"\"\n #print(\"ele_filename =\", ele_filename)\n with open(ele_filename, 'r') as ele_file:\n nelements, four, form_flag_enabled = ele_file.readline().strip().split()\n form_flag_enabled = int(form_flag_enabled)\n\n assert four == '4', four\n nelements = int(nelements)\n #print(\"nelements =\", nelements)\n\n if not form_flag_enabled:\n tets = zeros((nelements, 4), 'int32')\n for ielement in range(nelements):\n # eid n1 n2 n3 n4 flip_flag???\n # 1 13260 15506 16059 16065 -1\n tets[ielement] = ele_file.readline().strip().split()[1:]\n else:\n tets = []\n for ielement in range(nelements):\n # eid n1 n2 n3 n4 flip_flag???\n # 1 13260 15506 16059 16065 -1\n n0, n1, n2, n3, flag = ele_file.readline().strip().split()[1:]\n if flag == form_flag:\n tets.append((n0, n1, n2, n3))\n tets = array(tets, 'int32')\n\n #print(\"nodes =\", nodes)\n return tets - 1\n #self.tet = self.read_ele(ele_filename)\n\n\ndef clean_lines(lines):\n \"\"\"removes blank lines and commented lines\"\"\"\n lines2 = []\n for line in lines:\n line2 = line.split('#')[0].strip()\n if line2:\n lines2.append(line2)\n return lines2\n\n\ndef main(): # pragma: no cover\n import os\n\n #base = 'gear'\n #read_tetgen(base, dimension_flag=2)\n #return\n\n from pyNastran.converters.stl.stl import STL\n m1 = STL()\n m1.read_stl('tetgen_test.stl')\n m1.flip_normals()\n m1.write_stl('tetgen_test_flipped.stl')\n del m1\n\n os.system('tetgen.exe -pqcvVqY tetgen_test_flipped.stl')\n\n m = Tetgen()\n base = 'tetgen_test_flipped.1'\n m.read_tetgen(base + '.node', base + '.smesh', base + '.ele', dimension_flag=3)\n m.write_nastran(base + '.bdf')\n\nif __name__ == '__main__': # pragma: no cover\n main()\n", "import numpy as np\nfrom numpy import zeros\n\nfrom pyNastran.utils.numpy_utils import integer_types\nfrom pyNastran.op2.tables.oes_stressStrain.real.oes_objects import StressObject, StrainObject, OES_Object\nfrom pyNastran.f06.f06_formatting import write_floats_13e, _eigenvalue_header\n\n\nclass RealBushArray(OES_Object):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n OES_Object.__init__(self, data_code, isubcase, apply_data_code=False)\n #self.code = [self.format_code, self.sort_code, self.s_code]\n #self.ntimes = 0 # or frequency/mode\n #self.ntotal = 0\n self.ielement = 0\n self.nelements = 0 # result specific\n #print('RealBushArray.nonlinear_factor =', self.nonlinear_factor)\n\n @property\n def is_real(self):\n return True\n\n @property\n def is_complex(self):\n return False\n\n def _reset_indices(self):\n self.itotal = 0\n if self.table_name not in ['OESRMS2', 'OESNO2', 'OSTRRMS2', 'OSTRNO2']:\n self.ielement = 0\n\n def _get_msgs(self):\n raise NotImplementedError('%s needs to implement _get_msgs' % self.__class__.__name__)\n\n def get_headers(self):\n raise NotImplementedError('%s needs to implement get_headers' % self.__class__.__name__)\n #return headers\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the RealBushArray\"\"\"\n #print(\"self.ielement =\", self.ielement)\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n\n if self.element_type == 102:\n #nnodes_per_element = 1\n pass\n else:\n raise NotImplementedError(self.element_type)\n\n # buggy MSC 2005 (was this ever fixed?)\n # NX doesn't have this bug\n if self.table_name in ['OESRMS2', 'OESNO2', 'OSTRRMS2', 'OSTRNO2']:\n self.ntotal = self.nelements\n\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #self.ntimes = 0\n #self.nelements = 0\n self.is_built = True\n\n #print(\"***name=%s type=%s nnodes_per_element=%s ntimes=%s nelements=%s ntotal=%s\" % (\n #self.element_name, self.element_type, nnodes_per_element, self.ntimes, self.nelements, self.ntotal))\n dtype = 'float32'\n if isinstance(self.nonlinear_factor, integer_types):\n dtype = 'int32'\n _times = zeros(self.ntimes, dtype=dtype)\n element = zeros(self.ntotal, dtype='int32')\n\n # [tx, ty, tz, rx, ry, rz]\n data = zeros((self.ntimes, self.ntotal, 6), dtype='float32')\n\n if self.load_as_h5:\n #for key, value in sorted(self.data_code.items()):\n #print(key, value)\n group = self._get_result_group()\n self._times = group.create_dataset('_times', data=_times)\n self.element = group.create_dataset('element', data=element)\n self.data = group.create_dataset('data', data=data)\n else:\n self._times = _times\n self.element = element\n self.data = data\n\n def build_dataframe(self):\n \"\"\"creates a pandas dataframe\"\"\"\n import pandas as pd\n headers = self.get_headers()\n if self.nonlinear_factor not in (None, np.nan):\n #Time 0.00 0.10\n #ElementID Item\n #11 tx 0.0 0.0\n # ty 0.0 0.0\n # tz 0.0 0.0\n # rx 0.0 0.0\n # ry 0.0 0.0\n # rz 0.0 0.0\n #21 tx 0.0 0.0\n column_names, column_values = self._build_dataframe_transient_header()\n data_frame = self._build_pandas_transient_elements(\n column_values, column_names,\n headers, self.element, self.data)\n else:\n # >25.0\n #Static tx ty tz rx ry rz\n #ElementID\n #1 1000.0 0.0 0.0 0.0 0.0 0.0\n #\n # <=24.2\n #Static 0\n #ElementID Item\n #1 tx 1000.0\n # ty 0.0\n # tz 0.0\n # rx 0.0\n # ry 0.0\n # rz 0.0\n data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.element)\n data_frame.index.name = 'ElementID'\n data_frame.columns.names = ['Static']\n #data_frame = pd.Panel(self.data, major_axis=self.element, minor_axis=headers).to_frame()\n #data_frame.columns.names = ['Static']\n #data_frame.index.names = ['ElementID', 'Item']\n self.data_frame = data_frame\n\n def __eq__(self, table): # pragma: no cover\n assert self.is_sort1 == table.is_sort1\n self._eq_header(table)\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n ntimes = self.data.shape[0]\n\n i = 0\n if self.is_sort1:\n for itime in range(ntimes):\n for ieid, eid, in enumerate(self.element):\n t1 = self.data[itime, ieid, :]\n t2 = table.data[itime, ieid, :]\n (fx1, fy1, fz1, unused_mx1, unused_my1, unused_mz1) = t1\n (fx2, fy2, fz2, unused_mx2, unused_my2, unused_mz2) = t2\n if not np.allclose(t1, t2):\n #if not np.array_equal(t1, t2):\n msg += '%s\\n (%s, %s, %s)\\n (%s, %s, %s)\\n' % (\n eid,\n fx1, fy1, fz1, #mx1, my1, mz1\n fx2, fy2, fz2) #mx2, my2, mz2\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n else:\n raise NotImplementedError(self.is_sort2)\n if i > 0:\n print(msg)\n raise ValueError(msg)\n return True\n\n def add_sort1(self, dt, eid, tx, ty, tz, rx, ry, rz):\n \"\"\"unvectorized method for adding SORT1 transient data\"\"\"\n assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n self._times[self.itime] = dt\n self.element[self.itotal] = eid\n self.data[self.itime, self.itotal, :] = [tx, ty, tz, rx, ry, rz]\n self.itotal += 1\n self.ielement += 1\n\n def get_stats(self, short=False):\n if not self.is_built:\n return ['<%s>\\n' % self.__class__.__name__,\n ' ntimes: %i\\n' % self.ntimes,\n ' ntotal: %i\\n' % self.ntotal,\n ]\n\n nelements = self.ntotal\n ntimes = self.ntimes\n #ntotal = self.ntotal\n nelements = self.ntotal\n\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i\\n'\n % (self.__class__.__name__, ntimes, nelements))\n ntimes_word = 'ntimes'\n else:\n msg.append(' type=%s nelements=%i\\n'\n % (self.__class__.__name__, nelements))\n ntimes_word = '1'\n headers = self.get_headers()\n\n n = len(headers)\n assert n == self.data.shape[2], 'nheaders=%s shape=%s' % (n, str(self.data.shape))\n msg.append(' data: [%s, ntotal, %i] where %i=[%s]\\n' % (ntimes_word, n, n, str(', '.join(headers))))\n msg.append(' element.shape = %s\\n' % str(self.element.shape).replace('L', ''))\n msg.append(' data.shape = %s\\n' % str(self.data.shape).replace('L', ''))\n msg.append(' element type: %s\\n' % self.element_name)\n msg += self.get_data_code()\n return msg\n\n def get_element_index(self, eids):\n # elements are always sorted; nodes are not\n itot = np.searchsorted(eids, self.element) #[0]\n return itot\n\n def eid_to_element_node_index(self, eids):\n ind = np.ravel([np.searchsorted(self.element == eid) for eid in eids])\n return ind\n\n def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n page_num=1, is_mag_phase=False, is_sort1=True):\n if header is None:\n header = []\n msg = self._get_msgs()\n (ntimes, unused_ntotal) = self.data.shape[:2]\n eids = self.element\n\n for itime in range(ntimes):\n dt = self._times[itime]\n header = _eigenvalue_header(self, header, itime, ntimes, dt)\n f06_file.write(''.join(header + msg))\n #[tx, ty, tz, rx, ry, rz]\n tx = self.data[itime, :, 0]\n ty = self.data[itime, :, 1]\n tz = self.data[itime, :, 2]\n rx = self.data[itime, :, 3]\n ry = self.data[itime, :, 4]\n rz = self.data[itime, :, 5]\n\n for eid, txi, tyi, tzi, rxi, ryi, rzi in zip(\n eids, tx, ty, tz, rx, ry, rz):\n vals = [txi, tyi, tzi, rxi, ryi, rzi]\n vals2 = write_floats_13e(vals)\n [txi, tyi, tzi, rxi, ryi, rzi] = vals2\n f06_file.write('0 %8i %-13s %-13s %-13s %-13s %-13s %s\\n' % (\n eid, txi, tyi, tzi, rxi, ryi, rzi))\n f06_file.write(page_stamp % page_num)\n page_num += 1\n if self.nonlinear_factor in (None, np.nan):\n page_num -= 1\n return page_num\n\n def write_op2(self, op2, op2_ascii, itable, new_result, date,\n is_mag_phase=False, endian='>'):\n \"\"\"writes an OP2\"\"\"\n import inspect\n from struct import Struct, pack\n frame = inspect.currentframe()\n call_frame = inspect.getouterframes(frame, 2)\n op2_ascii.write('%s.write_op2: %s\\n' % (self.__class__.__name__, call_frame[1][3]))\n\n if itable == -1:\n self._write_table_header(op2, op2_ascii, date)\n itable = -3\n\n #if isinstance(self.nonlinear_factor, float):\n #op2_format = '%sif' % (7 * self.ntimes)\n #raise NotImplementedError()\n #else:\n #op2_format = 'i21f'\n #s = Struct(op2_format)\n\n eids = self.element\n\n # table 4 info\n #ntimes = self.data.shape[0]\n #nnodes = self.data.shape[1]\n nelements = self.data.shape[1]\n\n # 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm\n #ntotal = ((nnodes * 21) + 1) + (nelements * 4)\n\n ntotali = self.num_wide\n ntotal = ntotali * nelements\n\n #print('shape = %s' % str(self.data.shape))\n #assert self.ntimes == 1, self.ntimes\n\n #device_code = self.device_code\n op2_ascii.write(' ntimes = %s\\n' % self.ntimes)\n\n eids_device = self.element * 10 + self.device_code\n\n #fmt = '%2i %6f'\n #print('ntotal=%s' % (ntotal))\n #assert ntotal == 193, ntotal\n\n if self.is_sort1:\n struct1 = Struct(endian + b'i6f')\n else:\n raise NotImplementedError('SORT2')\n\n op2_ascii.write('nelements=%i\\n' % nelements)\n\n for itime in range(self.ntimes):\n #print('3, %s' % itable)\n self._write_table_3(op2, op2_ascii, new_result, itable, itime)\n\n # record 4\n #print('stress itable = %s' % itable)\n itable -= 1\n #print('4, %s' % itable)\n header = [4, itable, 4,\n 4, 1, 4,\n 4, 0, 4,\n 4, ntotal, 4,\n 4 * ntotal]\n op2.write(pack('%ii' % len(header), *header))\n op2_ascii.write('r4 [4, 0, 4]\\n')\n op2_ascii.write('r4 [4, %s, 4]\\n' % (itable))\n op2_ascii.write('r4 [4, %i, 4]\\n' % (4 * ntotal))\n\n tx = self.data[itime, :, 0]\n ty = self.data[itime, :, 1]\n tz = self.data[itime, :, 2]\n rx = self.data[itime, :, 3]\n ry = self.data[itime, :, 4]\n rz = self.data[itime, :, 5]\n\n for eid, eid_device, txi, tyi, tzi, rxi, ryi, rzi in zip(\n eids, eids_device, tx, ty, tz, rx, ry, rz):\n data = [eid_device, txi, tyi, tzi, rxi, ryi, rzi]\n\n vals = [txi, tyi, tzi, rxi, ryi, rzi]\n vals2 = write_floats_13e(vals)\n [txi, tyi, tzi, rxi, ryi, rzi] = vals2\n op2_ascii.write('0 %8i %-13s %-13s %-13s %-13s %-13s %s\\n' % (\n eid, txi, tyi, tzi, rxi, ryi, rzi))\n op2.write(struct1.pack(*data))\n\n #for eid, axiali, SMai, torsioni, SMti in zip(eids_device, axial, SMa, torsion, SMt):\n #data = [eid, axiali, SMai, torsioni, SMti]\n #op2_ascii.write(' eid=%s axial=%s SMa=%s torsion=%s SMt=%s\\n' % tuple(data))\n #op2.write(struct1.pack(*data))\n\n itable -= 1\n header = [4 * ntotal,]\n op2.write(pack('i', *header))\n op2_ascii.write('footer = %s\\n' % header)\n new_result = False\n return itable\n\n\nclass RealBushStressArray(RealBushArray, StressObject):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n RealBushArray.__init__(self, data_code, is_sort1, isubcase, dt)\n StressObject.__init__(self, data_code, isubcase)\n\n def get_headers(self):\n headers = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']\n return headers\n\n def _get_msgs(self):\n if self.element_type == 102:\n pass\n else:\n raise NotImplementedError(self.element_type)\n\n msg = [\n ' S T R E S S E S I N B U S H E L E M E N T S ( C B U S H )\\n \\n',\n ' ELEMENT-ID STRESS-TX STRESS-TY STRESS-TZ STRESS-RX STRESS-RY STRESS-RZ \\n',\n ]\n return msg\n\nclass RealBushStrainArray(RealBushArray, StrainObject):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n RealBushArray.__init__(self, data_code, is_sort1, isubcase, dt)\n StrainObject.__init__(self, data_code, isubcase)\n\n def get_headers(self):\n headers = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']\n return headers\n\n def _get_msgs(self):\n if self.element_type == 102:\n pass\n else:\n raise NotImplementedError(self.element_type)\n\n msg = [\n ' S T R A I N S I N B U S H E L E M E N T S ( C B U S H )\\n'\n ' \\n'\n ' ELEMENT-ID STRAIN-TX STRAIN-TY STRAIN-TZ STRAIN-RX STRAIN-RY STRAIN-RZ \\n'\n ]\n return msg\n", "from __future__ import print_function, absolute_import\n\nimport numpy as np\nimport pandas as pd\nfrom six.moves import range\nfrom typing import Tuple, Iterable, Union, Dict\n\nfrom h5Nastran.h5.nastran.result import ResultTableData\n\n_Vector = Tuple[float, float, float]\n_Matrix = Tuple[_Vector, _Vector, _Vector]\n\nVector = Union[_Vector, np.ndarray]\nMatrix = Union[_Matrix, np.ndarray]\n\n\ngpf_sum_type = np.dtype(\n [\n ('DETAIL', 'U32'),\n ('LOADCASE', np.int64),\n ('F1', np.float64),\n ('F2', np.float64),\n ('F3', np.float64),\n ('M1', np.float64),\n ('M2', np.float64),\n ('M3', np.float64)\n ]\n)\n\n\nclass GridPointForceSummationData(ResultTableData):\n data_cols = pd.Index(['F1', 'F2', 'F3', 'M1', 'M2', 'M3'])\n data_group_by = ['DETAIL', 'LOADCASE']\n\n\nclass GridPointForceSummationCalculator(object):\n def __init__(self):\n self.data = None\n \"\"\":type: np.ndarray\"\"\"\n\n self.grid_pos = {} # type: Dict[int, Vector]\n\n self._elements = {}\n self._nodes = {}\n self._cases = {}\n\n self._loadcases = []\n\n def set_data(self, data, grid_pos):\n self.data = data\n self.grid_pos = grid_pos\n self._build()\n\n def _build(self):\n self._elements.clear()\n self._nodes.clear()\n self._cases.clear()\n\n eids = self.data['EID']\n nids = self.data['ID']\n cids = self.data['DOMAIN_ID']\n\n elements = self._elements\n nodes = self._nodes\n cases = self._cases\n\n lcs = set()\n\n for i in range(eids.size):\n try:\n elements[eids[i]].append(i)\n except KeyError:\n elements[eids[i]] = [i]\n\n try:\n nodes[nids[i]].append(i)\n except KeyError:\n nodes[nids[i]] = [i]\n\n try:\n cases[cids[i]].append(i)\n except KeyError:\n cases[cids[i]] = [i]\n\n lcs.add(cids[i])\n\n del self._loadcases[:]\n self._loadcases.extend(sorted(lcs))\n\n def _indices(self, elements, nodes, loadcases=()):\n ei = []\n\n for eid in elements:\n try:\n ei.extend(self._elements[eid])\n except KeyError:\n pass\n\n ni = []\n\n for nid in nodes:\n try:\n ni.extend(self._nodes[nid])\n except KeyError:\n pass\n\n lci = []\n\n for lcid in loadcases:\n try:\n lci.extend(self._cases[lcid])\n except KeyError:\n pass\n\n ei = set(ei)\n ni = set(ni)\n\n if len(loadcases) == 0:\n return sorted(ei.intersection(ni))\n else:\n lci = set(lci)\n return sorted(ei.intersection(ni).intersection(lci))\n\n def sum(self, detail_id, nodes, elements, refpoint, coord, loadcases=(), load_factors=None):\n # type: (str, Iterable[int], Iterable[int], Vector, Matrix, Iterable[int], Vector) -> GridPointForceSummationData\n \n loadcases = sorted(loadcases)\n\n if len(loadcases) == 0:\n loadcases = self._loadcases\n\n result = np.zeros(len(loadcases), dtype=gpf_sum_type)\n\n _indices = set(self._indices(elements, nodes))\n\n if len(_indices) == 0:\n result['DETAIL'] = detail_id\n result['LDCASE'] = loadcases\n return GridPointForceSummationData.from_records(result)\n\n for i in range(len(loadcases)):\n lc = loadcases[i]\n\n casei = self._cases[lc]\n\n indices = sorted(_indices.intersection(casei))\n\n if len(indices) == 0:\n tmp = result[i]\n tmp[0] = detail_id\n tmp[1] = lc\n continue\n\n data = self.data.take(indices)\n result[i] = self._sum(data, detail_id, lc, refpoint, coord)\n\n if load_factors is not None:\n result['F1'] *= load_factors[0]\n result['F2'] *= load_factors[1]\n result['F3'] *= load_factors[2]\n result['M1'] *= load_factors[3]\n result['M2'] *= load_factors[4]\n result['M3'] *= load_factors[5]\n\n # print('numpy', result.dtype)\n\n result = GridPointForceSummationData.from_records(result)\n\n # print('dataframe', result.dtypes)\n\n return result\n\n def _sum(self, data, detail_id, lcid, refpoint, coord):\n\n fx = 0.\n fy = 0.\n fz = 0.\n mx = 0.\n my = 0.\n mz = 0.\n\n nids = data['ID'].values\n FX = data['F1'].values\n FY = data['F2'].values\n FZ = data['F3'].values\n MX = data['M1'].values\n MY = data['M2'].values\n MZ = data['M3'].values\n\n # print(FX.keys())\n\n grid_pos = self.grid_pos\n\n for i in range(data.shape[0]):\n _fx = FX[i]\n _fy = FY[i]\n _fz = FZ[i]\n _mx = MX[i]\n _my = MY[i]\n _mz = MZ[i]\n\n fx += _fx\n fy += _fy\n fz += _fz\n\n nid = nids[i]\n\n pos = grid_pos[nid]\n\n mx += _mx + _fz * (pos[1] - refpoint[1]) - _fy * (pos[2] - refpoint[2])\n my += _my + _fx * (pos[2] - refpoint[2]) - _fz * (pos[0] - refpoint[0])\n mz += _mz + _fy * (pos[0] - refpoint[0]) - _fx * (pos[1] - refpoint[1])\n\n _fx = np.dot([fx, fy, fz], coord[0])\n _fy = np.dot([fx, fy, fz], coord[1])\n _fz = np.dot([fx, fy, fz], coord[2])\n _mx = np.dot([mx, my, mz], coord[0])\n _my = np.dot([mx, my, mz], coord[1])\n _mz = np.dot([mx, my, mz], coord[2])\n\n return detail_id, lcid, _fx, _fy, _fz, _mx, _my, _mz\n\n# \n# from zlib import compress as compress_, decompress as decompress_\n# \n# \n# def decompress(compressed_data):\n# return decompress_(compressed_data, -15)\n# \n# \n# def compress(uncompressed_data, compression_level=6):\n# return compress_(uncompressed_data, compression_level)[2:-4]\n\n\n" ]
[ [ "numpy.isnan", "numpy.zeros" ], [ "numpy.sqrt", "numpy.asarray", "numpy.cos", "numpy.exp", "numpy.array", "numpy.where", "numpy.zeros" ], [ "numpy.array", "numpy.zeros", "numpy.where" ], [ "numpy.array_equal", "numpy.zeros", "numpy.allclose", "pandas.DataFrame" ], [ "numpy.array" ], [ "numpy.unique", "numpy.arange", "numpy.searchsorted", "numpy.array", "numpy.zeros", "numpy.where" ], [ "numpy.array", "numpy.zeros" ], [ "numpy.allclose", "numpy.array_equal", "pandas.DataFrame", "numpy.searchsorted", "numpy.zeros" ], [ "numpy.dot", "pandas.Index", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
semeniuta/CarND-Capstone
[ "c61c4bd51789b8018924ecf2058e8228f9580d18" ]
[ "ros/src/tl_detector/image_saver.py" ]
[ "#!/usr/bin/env python\n\nimport rospy\nfrom styx_msgs.msg import TrafficLightArray\nfrom sensor_msgs.msg import Image\nfrom styx_msgs.msg import Lane\nfrom geometry_msgs.msg import PoseStamped\nimport yaml\nimport numpy as np\nfrom scipy.spatial import KDTree\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\nimport os\n\nclass ImageSaver:\n \n def __init__(self):\n\n rospy.init_node('image_saver')\n \n self.sub_tl = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n self.sub_im = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n self.sub_pose = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n self.sub_wp = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.bridge = CvBridge()\n\n self.pose = None\n self.waypoints = None\n self.waypoints_xy = None\n self.wp_tree = None\n self.image = None\n self.lights = None\n\n self.images_to_save = []\n self.prev_idx_diff = None\n self.image_counter = 0\n self.every_nth = 5\n\n rospy.loginfo('CWD: {}'.format(os.getcwd()))\n\n rospy.spin()\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n def image_cb(self, msg):\n self.image = msg\n\n light_wp, state, idx_diff = self.process_traffic_lights()\n\n if self.prev_idx_diff is not None:\n change = idx_diff - self.prev_idx_diff\n if change > 50:\n self.save_images()\n self.image_counter = 0\n self.images_to_save = []\n \n if idx_diff < 100 and idx_diff >= 0:\n \n if self.image_counter == 0:\n \n im = self.bridge.imgmsg_to_cv2(self.image, \"bgr8\")\n ts = rospy.get_rostime()\n self.images_to_save.append((im, ts, state))\n\n self.image_counter += 1\n if self.image_counter == self.every_nth:\n self.image_counter = 0 # save the next one\n\n self.prev_idx_diff = idx_diff\n \n def save_images(self):\n\n # https://answers.ros.org/question/283724/saving-images-with-image_saver-with-timestamp/\n\n n = len(self.images_to_save)\n rospy.loginfo('Saving {} images'.format(n))\n\n im_dir = '/home/alex/carnd_tl/2'\n\n for im, ts, state in self.images_to_save:\n\n fname = '{0}{1}_{2}.jpg'.format(ts.secs, ts.nsecs // 1000000, state)\n fname_full = os.path.join(im_dir, fname)\n cv2.imwrite(fname_full, im)\n rospy.loginfo('Saved image to {}'.format(fname_full))\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n \n self.waypoints = waypoints\n\n positions = (wp.pose.pose.position for wp in self.waypoints.waypoints)\n self.waypoints_xy = np.array([[p.x, p.y] for p in positions])\n self.wp_tree = KDTree(self.waypoints_xy)\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n\n Returns:\n int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n\n # List of positions that correspond to the line to stop in front of for a given intersection\n stop_line_positions = self.config['stop_line_positions']\n \n # Find the closest visible traffic light (if one exists)\n\n closest_light = None\n idx_closest_to_light = None\n\n if self.pose:\n \n car_p = self.pose.pose.position\n idx_closest_to_car = self.get_closest_waypoint(car_p.x, car_p.y)\n\n diff = len(self.waypoints_xy) \n\n for i, light in enumerate(self.lights):\n line = stop_line_positions[i]\n idx_closest_to_light_cand = self.get_closest_waypoint(line[0], line[1])\n\n d = idx_closest_to_light_cand - idx_closest_to_car\n\n if d >= 0 and d < diff:\n diff = d\n idx_closest_to_light = idx_closest_to_light_cand\n closest_light = light\n \n if idx_closest_to_light:\n \n state = self.get_light_state(closest_light)\n \n rospy.loginfo('diff={0}; light_state={1}'.format(diff, state))\n \n return idx_closest_to_light, state, diff\n \n return -1, TrafficLight.UNKNOWN, -1\n\n def get_closest_waypoint(self, x, y):\n \n closest_idx = self.wp_tree.query((x, y), 1)[1]\n return closest_idx\n\n def get_light_state(self, light):\n return light.state\n\n\nif __name__ == '__main__':\n try:\n ImageSaver()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start ImageSaver')" ]
[ [ "numpy.array", "scipy.spatial.KDTree" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
derinaksit/REMARK
[ "fcc6e4720c9f11ddbe4701edcf2511bf6a8cdd2d" ]
[ "REMARKs/cAndCwithStickyE/Code/StickyEparams.py" ]
[ "'''\nThis module holds calibrated parameter dictionaries for the cAndCwithStickyE paper.\nIt defines dictionaries for the six types of models in cAndCwithStickyE:\n\n1) Small open economy\n2) Small open Markov economy\n3) Cobb-Douglas closed economy\n4) Cobb-Douglas closed Markov economy\n5) Representative agent economy\n6) Markov representative agent economy\n\nFor the first four models (heterogeneous agents), it defines dictionaries for\nthe Market instance as well as the consumers themselves. All parameters are quarterly.\n'''\nfrom __future__ import division\nfrom builtins import range\nfrom past.utils import old_div\nimport os\nimport numpy as np\nfrom copy import copy\nfrom HARK.utilities import approxUniform\n\n# Choose file where the Stata executable can be found. This should point at the\n# exe file itself, but the string does not need to include '.exe'. Two examples\n# are included (for locations on two authors' local computers). This variable\n# is irrelevant when the use_stata boolean in cAndCwithStickyE.py is set to False.\n# Using Stata to run the regressions allows the tables to include the KP test\n# statistic; Python's statsmodels.api currently does not have this functionality.\n\n# NOTE: To successfully use_stata, you must have Baum, Schaffer, and Stillman's\n# ivreg2 Stata module installed, as well as Kleibergen and Schaffer's ranktest\n# module. These modules are archived by RePEc IDEAS at:\n# https://ideas.repec.org/c/boc/bocode/s425401.html\n# https://ideas.repec.org/c/boc/bocode/s456865.html\n# You can also simply type \"ssc install ivreg2\" and \"ssc install ranktest\" in Stata.\n\n#stata_exe = \"C:\\Program Files (x86)\\Stata14\\stataMP-64\"\nstata_exe = \"C:\\Program Files (x86)\\Stata15\\StataSE-64\"\n\n# Choose directory paths relative to the StickyE files\n# See: https://stackoverflow.com/questions/918154/relative-paths-in-python\nmy_file_path = os.path.dirname(os.path.abspath(__file__))\n\ncalibration_dir = os.path.join(my_file_path, \"../Calibration/\") # Relative directory for primitive parameter files\ntables_dir = os.path.join(my_file_path, \"../Tables/\") # Relative directory for saving tex tables\nresults_dir = os.path.join(my_file_path, \"./Results/\") # Relative directory for saving output files\nfigures_dir = os.path.join(my_file_path, \"../Figures/\") # Relative directory for saving figures\nempirical_dir = os.path.join(my_file_path, \"../Empirical/\") # Relative directory with empirical files\n\ndef importParam(param_name):\n return float(np.max(np.genfromtxt(calibration_dir + param_name + '.txt')))\n\n# Import primitive parameters from calibrations folder\nCRRA = importParam('CRRA') # Coefficient of relative risk aversion\nDeprFacAnn = importParam('DeprFacAnn') # Annual depreciation factor\nCapShare = importParam('CapShare') # Capital's share in production function\nKYratioSS = importParam('KYratioSS') # Steady state capital to output ratio (PF-DSGE)\nUpdatePrb = importParam('UpdatePrb') # Probability that each agent observes the aggregate productivity state each period (in sticky version)\nUnempPrb = importParam('UnempPrb') # Unemployment probability\nDiePrb = importParam('DiePrb') # Quarterly mortality probability\nTranShkVarAnn = importParam('TranShkVarAnn') # Annual variance of idiosyncratic transitory shocks\nPermShkVarAnn = importParam('PermShkVarAnn') # Annual variance of idiosyncratic permanent shocks\nTranShkAggVar = importParam('TranShkAggVar') # Variance of aggregate transitory shocks\nPermShkAggVar = importParam('PermShkAggVar') # Variance of aggregate permanent shocks\nDiscFacSOE = importParam('betaSOE') # Discount factor, SOE model\n\n# Calculate parameters based on the primitive parameters\nDeprFac = 1. - DeprFacAnn**0.25 # Quarterly depreciation rate\nKSS = KtYratioSS = KYratioSS**(1./(1.-CapShare)) # Steady state Capital to labor productivity\nwRteSS = (1.-CapShare)*KSS**CapShare # Steady state wage rate\nrFreeSS = CapShare*KSS**(CapShare-1.) # Steady state interest rate\nRfreeSS = 1. - DeprFac + rFreeSS # Steady state return factor\nLivPrb = 1. - DiePrb # Quarterly survival probability\nDiscFacDSGE = RfreeSS**(-1) # Discount factor, HA-DSGE and RA models\nTranShkVar = TranShkVarAnn*4. # Variance of idiosyncratic transitory shocks\nPermShkVar = old_div(PermShkVarAnn,4.) # Variance of idiosyncratic permanent shocks\n#TempDstn = approxMeanOneLognormal(N=7,sigma=np.sqrt(PermShkVar))\n#DiscFacSOE = 0.99*LivPrb/(RfreeSS*np.dot(TempDstn[0],TempDstn[1]**(-CRRA))) # Discount factor, SOE model\n\n# Choose basic simulation parameters\nperiods_to_sim = 21010 # Total number of periods to simulate; this might be increased by DSGEmarkov model\nignore_periods = 1000 # Number of simulated periods to ignore (in order to ensure we are near steady state)\ninterval_size = 200 # Number of periods in each subsample interval\nAgentCount = 20000 # Total number of agents to simulate in the economy\nmax_t_between_updates = None # Maximum number of periods an agent will go between updating (can be None)\n\n# Use smaller sample for micro regression tables to save memory\nperiods_to_sim_micro = 4000\nAgentCount_micro = 5000\n\n# Choose extent of discount factor heterogeneity (inapplicable to representative agent models)\nTypeCount = 1 # Number of heterogeneous discount factor types\nDiscFacMeanSOE = DiscFacSOE # Central value of intertemporal discount factor for SOE model\nDiscFacMeanDSGE = DiscFacDSGE # ...for HA-DSGE and RA\nDiscFacSpread = 0.0 # Half-width of intertemporal discount factor band, a la cstwMPC\n\n# These parameters are for a rough \"beta-dist\" specification that fits the wealth distribution in DSGE simple\n#TypeCount = 7\n#DiscFacMeanSOE = 0.96738\n#DiscFacMeanDSGE = 0.96738\n#DiscFacSpread = 0.0227\n\n# Choose parameters for the Markov models\nStateCount = 11 # Number of discrete states in the Markov specifications\nPermGroFacMin = 0.9925 # Minimum value of aggregate permanent growth in Markov specifications\nPermGroFacMax = 1.0075 # Maximum value of aggregate permanent growth in Markov specifications\nPersistence = 0.5 # Base probability that macroeconomic Markov state stays the same; else moves up or down by 1\nRegimeChangePrb = 0.00 # Probability of \"regime change\", randomly jumping to any Markov state\n\n# Make the Markov array with chosen states, persistence, and regime change probability\nPolyMrkvArray = np.zeros((StateCount,StateCount))\nfor i in range(StateCount):\n for j in range(StateCount):\n if i==j:\n PolyMrkvArray[i,j] = Persistence\n elif (i==(j-1)) or (i==(j+1)):\n PolyMrkvArray[i,j] = 0.5*(1.0 - Persistence)\nPolyMrkvArray[0,0] += 0.5*(1.0 - Persistence)\nPolyMrkvArray[StateCount-1,StateCount-1] += 0.5*(1.0 - Persistence)\nPolyMrkvArray *= 1.0 - RegimeChangePrb\nPolyMrkvArray += RegimeChangePrb/StateCount\n\n# Define the set of aggregate permanent growth factors that can occur (Markov specifications only)\nPermGroFacSet = np.exp(np.linspace(np.log(PermGroFacMin),np.log(PermGroFacMax),num=StateCount))\n\n# Define the set of discount factors that agents have (for SOE and DSGE models)\nDiscFacSetSOE = approxUniform(N=TypeCount,bot=DiscFacMeanSOE-DiscFacSpread,top=DiscFacMeanSOE+DiscFacSpread)[1]\nDiscFacSetDSGE = approxUniform(N=TypeCount,bot=DiscFacMeanDSGE-DiscFacSpread,top=DiscFacMeanDSGE+DiscFacSpread)[1]\n\n###############################################################################\n\n# Define parameters for the small open economy version of the model\ninit_SOE_consumer = { 'CRRA': CRRA,\n 'DiscFac': DiscFacMeanSOE,\n 'LivPrb': [LivPrb],\n 'PermGroFac': [1.0],\n 'AgentCount': AgentCount // TypeCount, # Spread agents evenly among types\n 'aXtraMin': 0.00001,\n 'aXtraMax': 40.0,\n 'aXtraNestFac': 3,\n 'aXtraCount': 48,\n 'aXtraExtra': [None],\n 'PermShkStd': [np.sqrt(PermShkVar)],\n 'PermShkCount': 7,\n 'TranShkStd': [np.sqrt(TranShkVar)],\n 'TranShkCount': 7,\n 'UnempPrb': UnempPrb,\n 'UnempPrbRet': 0.0,\n 'IncUnemp': 0.0,\n 'IncUnempRet': 0.0,\n 'BoroCnstArt':0.0,\n 'tax_rate':0.0,\n 'T_retire':0,\n 'MgridBase': np.array([0.5,1.5]),\n 'aNrmInitMean' : np.log(0.00001),\n 'aNrmInitStd' : 0.0,\n 'pLvlInitMean' : 0.0,\n 'pLvlInitStd' : 0.0,\n 'UpdatePrb' : UpdatePrb,\n 'T_age' : None,\n 'T_cycle' : 1,\n 'cycles' : 0,\n 'T_sim' : periods_to_sim,\n 'max_t_between_updates' : max_t_between_updates\n }\n\n# Define market parameters for the small open economy\ninit_SOE_market = { 'PermShkAggCount': 5,\n 'TranShkAggCount': 5,\n 'PermShkAggStd': np.sqrt(PermShkAggVar),\n 'TranShkAggStd': np.sqrt(TranShkAggVar),\n 'PermGroFacAgg': 1.0,\n 'DeprFac': DeprFac,\n 'CapShare': CapShare,\n 'Rfree': RfreeSS,\n 'wRte': wRteSS,\n 'act_T': periods_to_sim,\n }\n\n###############################################################################\n\n# Define parameters for the small open Markov economy version of the model\ninit_SOE_mrkv_consumer = copy(init_SOE_consumer)\ninit_SOE_mrkv_consumer['MrkvArray'] = PolyMrkvArray\n\n# Define market parameters for the small open Markov economy\ninit_SOE_mrkv_market = copy(init_SOE_market)\ninit_SOE_mrkv_market['MrkvArray'] = PolyMrkvArray\ninit_SOE_mrkv_market['PermShkAggStd'] = StateCount*[init_SOE_market['PermShkAggStd']]\ninit_SOE_mrkv_market['TranShkAggStd'] = StateCount*[init_SOE_market['TranShkAggStd']]\ninit_SOE_mrkv_market['PermGroFacAgg'] = PermGroFacSet\ninit_SOE_mrkv_market['MrkvNow_init'] = StateCount // 2\ninit_SOE_mrkv_market['loops_max'] = 1\n\n###############################################################################\n\n# Define parameters for the Cobb-Douglas DSGE version of the model\ninit_DSGE_consumer = copy(init_SOE_consumer)\ninit_DSGE_consumer['DiscFac'] = DiscFacMeanDSGE\ninit_DSGE_consumer['aXtraMax'] = 120.0\ninit_DSGE_consumer['MgridBase'] = np.array([0.1,0.3,0.5,0.6,0.7,0.8,0.9,0.98,1.0,1.02,1.1,1.2,1.3,1.4,1.5,1.6,2.0,3.0,5.0])\n\n# Define market parameters for the Cobb-Douglas economy\ninit_DSGE_market = copy(init_SOE_market)\ninit_DSGE_market.pop('Rfree')\ninit_DSGE_market.pop('wRte')\ninit_DSGE_market['CRRA'] = CRRA\ninit_DSGE_market['DiscFac'] = DiscFacMeanDSGE\ninit_DSGE_market['intercept_prev'] = 0.0\ninit_DSGE_market['slope_prev'] = 1.0\n\n###############################################################################\n\n# Define parameters for the Cobb-Douglas Markov DSGE version of the model\ninit_DSGE_mrkv_consumer = copy(init_DSGE_consumer)\ninit_DSGE_mrkv_consumer['MrkvArray'] = PolyMrkvArray\n\n# Define market parameters for the Cobb-Douglas Markov economy\ninit_DSGE_mrkv_market = copy(init_SOE_mrkv_market)\ninit_DSGE_mrkv_market.pop('Rfree')\ninit_DSGE_mrkv_market.pop('wRte')\ninit_DSGE_mrkv_market['CRRA'] = init_DSGE_mrkv_consumer['CRRA']\ninit_DSGE_mrkv_market['DiscFac'] = init_DSGE_mrkv_consumer['DiscFac']\ninit_DSGE_mrkv_market['intercept_prev'] = StateCount*[0.0]\ninit_DSGE_mrkv_market['slope_prev'] = StateCount*[1.0]\ninit_DSGE_mrkv_market['loops_max'] = 10\n\n###############################################################################\n\n# Define parameters for the representative agent version of the model\ninit_RA_consumer = { 'CRRA': CRRA,\n 'DiscFac': DiscFacMeanDSGE,\n 'LivPrb': [1.0],\n 'PermGroFac': [1.0],\n 'AgentCount': 1,\n 'aXtraMin': 0.00001,\n 'aXtraMax': 120.0,\n 'aXtraNestFac': 3,\n 'aXtraCount': 48,\n 'aXtraExtra': [None],\n 'PermShkStd': [np.sqrt(PermShkAggVar)],\n 'PermShkCount': 7,\n 'TranShkStd': [np.sqrt(TranShkAggVar)],\n 'TranShkCount': 7,\n 'UnempPrb': 0.0,\n 'UnempPrbRet': 0.0,\n 'IncUnemp': 0.0,\n 'IncUnempRet': 0.0,\n 'BoroCnstArt':0.0,\n 'tax_rate':0.0,\n 'T_retire':0,\n 'aNrmInitMean' : np.log(0.00001),\n 'aNrmInitStd' : 0.0,\n 'pLvlInitMean' : 0.0,\n 'pLvlInitStd' : 0.0,\n 'PermGroFacAgg' : 1.0,\n 'UpdatePrb' : UpdatePrb,\n 'CapShare' : CapShare,\n 'DeprFac' : DeprFac,\n 'T_age' : None,\n 'T_cycle' : 1,\n 'T_sim' : periods_to_sim,\n 'tolerance' : 1e-6\n }\n\n###############################################################################\n\n# Define parameters for the Markov representative agent model\ninit_RA_mrkv_consumer = copy(init_RA_consumer)\ninit_RA_mrkv_consumer['MrkvArray'] = PolyMrkvArray\ninit_RA_mrkv_consumer['MrkvNow'] = [StateCount // 2]\ninit_RA_mrkv_consumer['PermGroFac'] = [PermGroFacSet]\n" ]
[ [ "numpy.log", "numpy.sqrt", "numpy.genfromtxt", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
415905716/MQBench
[ "3f8321ec9ab9fd05d99c21700a901b1ff6a90a1e", "3ac8928ef6641e0ea78f9a5f0524b574a835463e" ]
[ "application/imagenet_example/main.py", "mqbench/fake_quantize/quantize_base.py" ]
[ "import argparse\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nfrom mqbench.convert_deploy import convert_deploy\nfrom mqbench.prepare_by_platform import prepare_by_platform, BackendType\nfrom mqbench.utils.state import enable_calibration, enable_quantization, disable_all\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('--train_data', metavar='DIR',\n help='path to dataset', required=True)\nparser.add_argument('--val_data', metavar='DIR',\n help='path to dataset', required=True)\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=100, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n\nparser.add_argument('--model_path', type=str, default=None)\nparser.add_argument('--backend', type=str, choices=['tensorrt', 'nnie', 'ppl', 'snpe'], default='tensorrt')\nparser.add_argument('--optim', type=str, default='sgd')\nparser.add_argument('--not-quant', action='store_true')\nparser.add_argument('--deploy', action='store_true')\n\nBackendMap = {'tensorrt': BackendType.Tensorrt,\n 'nnie': BackendType.NNIE,\n 'ppl': BackendType.PPLW8A16,\n 'snpe': BackendType.SNPE,\n 'vitis': BackendType.Vitis}\n\nbest_acc1 = 0\n\ndef main():\n args = parser.parse_args()\n args.quant = not args.not_quant\n args.backend = BackendMap[args.backend]\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n global best_acc1\n args.gpu = gpu\n\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n\n # create model\n if args.pretrained:\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](pretrained=True)\n else:\n print(\"=> creating model '{}'\".format(args.arch))\n model = models.__dict__[args.arch]()\n # for internal cluster\n if args.model_path:\n state_dict = torch.load(args.model_path)\n print(f'load pretrained checkpoint from: {args.model_path}')\n model.load_state_dict(state_dict)\n # quantize model\n if args.quant:\n model = prepare_by_platform(model, args.backend)\n\n if not torch.cuda.is_available():\n print('using CPU, this will be slow')\n elif args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = model.cuda(args.gpu)\n else:\n # DataParallel will divide and allocate batch_size to all available GPUs\n if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n else:\n model = torch.nn.DataParallel(model).cuda()\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda(args.gpu)\n if args.optim == 'sgd':\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n elif args.optim == 'adam':\n optimizer = torch.optim.Adam(model.parameters(), args.lr,\n betas=(0.9, 0.999), eps=1e-08,\n weight_decay=args.weight_decay,\n amsgrad=False)\n\n # prepare dataset\n train_loader, train_sampler, val_loader, cali_loader = prepare_dataloader(args)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n if args.gpu is None:\n checkpoint = torch.load(args.resume)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.resume, map_location=loc)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n if args.gpu is not None:\n # best_acc1 may be from a checkpoint from a different GPU\n best_acc1 = best_acc1.to(args.gpu)\n\n state_dict = checkpoint['state_dict']\n model_dict = model.state_dict()\n if 'module.' in list(state_dict.keys())[0] and 'module.' not in list(model_dict.keys())[0]:\n for k in list(state_dict.keys()):\n state_dict[k[7:]] = state_dict.pop(k)\n\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {}), acc = {}\"\n .format(args.resume, checkpoint['epoch'], best_acc1))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n elif args.quant:\n enable_calibration(model)\n calibrate(cali_loader, model, args)\n\n cudnn.benchmark = True\n\n if args.quant:\n enable_quantization(model)\n\n if args.quant and args.deploy:\n convert_deploy(model.eval(), args.backend, input_shape_dict={'data': [10, 3, 224, 224]})\n return\n\n if args.evaluate:\n if args.quant:\n from mqbench.convert_deploy import convert_merge_bn\n convert_merge_bn(model.eval())\n validate(val_loader, model, criterion, args)\n return\n\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, epoch, args)\n\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, args)\n\n # evaluate on validation set\n acc1 = validate(val_loader, model, criterion, args)\n\n # remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer' : optimizer.state_dict(),\n }, is_best)\n\ndef prepare_dataloader(args):\n traindir = os.path.join(args.train_data, 'train')\n valdir = os.path.join(args.val_data, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler)\n\n cali_batch_size = 10\n cali_batch = 10\n cali_dataset = torch.utils.data.Subset(train_dataset, indices=torch.arange(cali_batch_size * cali_batch))\n cali_loader = torch.utils.data.DataLoader(cali_dataset, batch_size=cali_batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n return train_loader, train_sampler, val_loader, cali_loader\n\ndef calibrate(cali_loader, model, args):\n model.eval()\n print(\"Start calibration ...\")\n print(\"Calibrate images number = \", len(cali_loader.dataset))\n with torch.no_grad():\n for i, (images, target) in enumerate(cali_loader):\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n output = model(images)\n print(\"Calibration ==> \", i+1)\n print(\"End calibration.\")\n return\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, data_time, losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (images, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n\ndef validate(val_loader, model, criterion, args):\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(val_loader),\n [batch_time, losses, top1, top5],\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n # TODO: this should also be done with the ProgressMeter\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'model_best.pth.tar')\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n main()\n", "import torch\nfrom torch.quantization import FakeQuantizeBase\nfrom torch.quantization.observer import MovingAverageMinMaxObserver\nfrom torch.quantization.fake_quantize import _is_per_channel, _is_per_tensor\n\nfrom mqbench.utils import is_symmetric_quant\n\n\nclass QuantizeBase(FakeQuantizeBase):\n r\"\"\" This is an extension of the FakeQuantize module in fake_quantize.py, which\n supports more generalized lower-bit quantization and support learning of the scale\n and zero point parameters through backpropagation. For literature references,\n please see the class _LearnableFakeQuantizePerTensorOp.\n In addition to the attributes in the original FakeQuantize module, the _LearnableFakeQuantize\n module also includes the following attributes to support quantization parameter learning.\n \"\"\"\n def __init__(self, observer=MovingAverageMinMaxObserver, **observer_kwargs):\n super().__init__()\n self.activation_post_process = observer(**observer_kwargs)\n self.dtype = self.activation_post_process.dtype\n self.qscheme = self.activation_post_process.qscheme\n self.quant_min = self.activation_post_process.quant_min\n self.quant_max = self.activation_post_process.quant_max\n assert self.quant_min <= self.quant_max, \\\n 'quant_min must be less than or equal to quant_max'\n self.pot_scale = self.activation_post_process.pot_scale\n self.ch_axis = self.activation_post_process.ch_axis \\\n if hasattr(self.activation_post_process, 'ch_axis') else -1\n assert _is_per_channel(self.qscheme) or \\\n _is_per_tensor(self.qscheme), \\\n 'Only per channel and per tensor quantization are supported in fake quantize' + \\\n ' got qscheme: ' + str(self.qscheme)\n self.is_per_channel = _is_per_channel(self.qscheme)\n bitrange = torch.tensor(self.quant_max - self.quant_min + 1).double()\n self.bitwidth = int(torch.log2(bitrange).item())\n self.is_symmetric_quant = is_symmetric_quant(self.qscheme)\n\n @torch.jit.export\n def calculate_qparams(self):\n return self.activation_post_process.calculate_qparams()\n\n @torch.jit.export\n def extra_repr(self):\n return 'fake_quant_enabled={}, observer_enabled={}, ' \\\n 'quant_min={}, quant_max={}, dtype={}, qscheme={}, ch_axis={}, '.format(\n self.fake_quant_enabled, self.observer_enabled,\n self.quant_min, self.quant_max,\n self.dtype, self.qscheme, self.ch_axis)" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.distributed.init_process_group", "torch.multiprocessing.spawn", "torch.load", "torch.utils.data.distributed.DistributedSampler", "torch.manual_seed", "torch.cuda.set_device", "torch.utils.data.DataLoader", "torch.nn.DataParallel", "torch.no_grad", "torch.cuda.is_available", "torch.arange", "torch.cuda.device_count", "torch.nn.parallel.DistributedDataParallel", "torch.save" ], [ "torch.log2", "torch.quantization.fake_quantize._is_per_tensor", "torch.quantization.fake_quantize._is_per_channel", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HansBlackCat/Python
[ "32c69f1f749a46b5bf1a305e385d96b2449c2a28" ]
[ "data_science/np_arr2.py" ]
[ "import numpy as np\n\nnp.random.seed(0)\n\nx1=np.random.randint(10, size=6)\nx2=np.random.randint(10, size=(3,4))\nx3=np.random.randint(10, size=(3,4,5))\n\nprint(\"------------------------------------------\")\nprint(x3)\nprint(\"------------------------------------------\")\nprint(\"x3 ndim: \", x3.ndim)\nprint(\"x2 shape: \", x3.shape)\nprint(\"x3 size: \", x3.size)\nprint(\"x3 dtypr: \", x3.dtype)\nprint(\"x3 itemsize: \", x3.itemsize, \"bytes\")\nprint(\"x3 nbytes: \", x3.nbytes, \"bytes\")\nprint()\n\nprint(\"------------------------------------------\")\nprint(x1)\nprint(\"------------------------------------------\")\nprint(\"x1[0]: \",x1[0])\nprint(\"x1[4]: \",x1[4])\nx1[0]=16.84756857\nprint(\"x1[0]=16.84756857:\", end=' ')\nprint(x1)\nprint()\n\n\n" ]
[ [ "numpy.random.seed", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
google/learned_optimization
[ "1c9ee0159c97815fc6afe79a76224fb28b199053", "1c9ee0159c97815fc6afe79a76224fb28b199053" ]
[ "learned_optimization/eval_training.py", "learned_optimization/distributed.py" ]
[ "# coding=utf-8\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for evaluating optimizers and learned optimizers on tasks.\"\"\"\nimport dataclasses\nimport functools\nfrom typing import Any, Callable, Iterator, Mapping, Optional, Sequence, Tuple\n\nfrom absl import logging\nimport gin\nimport jax\nfrom jax import lax\nimport jax.numpy as jnp\nfrom learned_optimization import jax_utils\nfrom learned_optimization import profile\nfrom learned_optimization import summary\nfrom learned_optimization import training\nfrom learned_optimization import tree_utils\nfrom learned_optimization.learned_optimizers import base as lopt_base\nfrom learned_optimization.optimizers import base as opt_base\nfrom learned_optimization.tasks import base as tasks_base\nimport numpy as onp\nimport tqdm\n\nOptState = Any\nTaskParam = Any\nData = Any\nPRNGKey = jnp.ndarray\n\n\[email protected](\n jax.jit, static_argnames=(\"task\", \"opt\", \"pmap_axis_name\", \"with_metrics\"))\ndef _next_state(\n task: tasks_base.Task,\n opt: opt_base.Optimizer,\n opt_state: OptState,\n data: Any,\n key: PRNGKey,\n pmap_axis_name: Optional[str] = None,\n is_valid: bool = False,\n with_metrics: bool = False,\n) -> Tuple[OptState, jnp.ndarray, PRNGKey, Mapping[str, jnp.ndarray]]:\n \"\"\"Take a single step on on inner-training.\"\"\"\n\n def fn(opt_state, key, data):\n key, key1 = jax.random.split(key)\n p, s = opt.get_params_state(opt_state)\n (l, state), grad = jax.value_and_grad(\n task.loss_with_state, has_aux=True)(p, s, key1, data)\n\n if pmap_axis_name:\n grad = lax.pmean(grad, pmap_axis_name)\n l = lax.pmean(l, pmap_axis_name)\n\n key, key1 = jax.random.split(key)\n next_opt_state = opt.update(\n opt_state, grad, loss=l, model_state=state, is_valid=is_valid, key=key1)\n return next_opt_state, l, key\n\n if with_metrics:\n key, summary_key = jax.random.split(key)\n (next_opt_state, loss,\n key), metrics = summary.with_summary_output_reduced(fn)(\n opt_state, key, data, summary_sample_rng_key=summary_key)\n else:\n next_opt_state, loss, key = fn(opt_state, key, data)\n metrics = {}\n return next_opt_state, loss, key, metrics\n\n\[email protected](jax.jit, static_argnames=(\"task\", \"opt\", \"pmap_axis_name\"))\ndef _loss_and_aux(\n task: tasks_base.Task,\n opt: opt_base.Optimizer,\n opt_state: OptState,\n data: Data,\n key: PRNGKey,\n pmap_axis_name: Optional[str] = None\n) -> Tuple[jnp.ndarray, jnp.ndarray, Mapping[str, jnp.ndarray]]:\n \"\"\"Compute loss and auxilary data from a task.\"\"\"\n p, s = opt.get_params_state(opt_state)\n l, _, aux = task.loss_with_state_and_aux(p, s, key, data)\n if pmap_axis_name:\n l = lax.pmean(l, pmap_axis_name)\n aux = lax.pmean(aux, pmap_axis_name)\n\n norm_fn = getattr(task, \"normalizer\", lambda x: x)\n return l, norm_fn(l), aux\n\n\ndef _batch_eval(\n task: tasks_base.Task,\n opt: opt_base.Optimizer,\n opt_state: Any,\n key: PRNGKey,\n data_iter: Iterator[Any],\n eval_batches: int,\n device: Optional[jax.lib.xla_client.Device] = None\n) -> Tuple[jnp.ndarray, jnp.ndarray, Mapping[str, jnp.ndarray]]:\n \"\"\"Compute loss and auxilary data over `eval_batches` of data.\"\"\"\n eval_losses = []\n eval_norm_losses = []\n eval_auxs = []\n\n for _ in range(eval_batches):\n key, key1 = jax.random.split(key)\n if data_iter:\n batch = next(data_iter)\n else:\n batch = ()\n if device:\n batch = jax.device_put(batch, device=device)\n ls, norm_ls, aux = _loss_and_aux(task, opt, opt_state, batch, key=key1)\n eval_losses.append(ls)\n eval_norm_losses.append(norm_ls)\n eval_auxs.append(aux)\n\n return (onp.mean(eval_losses), onp.mean(eval_norm_losses),\n jax.tree_map(onp.mean, tree_utils.tree_zip_onp(eval_auxs)))\n\n\[email protected]()\ndef single_task_training_curves(\n task: tasks_base.Task,\n opt: opt_base.Optimizer,\n num_steps: int,\n key: PRNGKey,\n eval_every: int = 10,\n eval_batches: int = 5,\n last_eval_batches: int = 20,\n eval_task: Optional[tasks_base.Task] = None,\n device: Optional[jax.lib.xla_client.Device] = None,\n summary_writer: Optional[summary.SummaryWriterBase] = None,\n) -> Mapping[str, jnp.ndarray]:\n \"\"\"Compute training curves.\"\"\"\n\n if eval_task is None:\n eval_task = task\n\n splits = [\"train\", \"outer_valid\", \"test\"]\n\n with profile.Profile(\"setup\"):\n key = jax.device_put(key, device)\n\n key, key1 = jax.random.split(key)\n p, s = jax_utils.cached_jit(task.init_with_state)(key)\n opt_state = jax_utils.cached_jit(\n opt.init, static_argnames=(\"num_steps\",))(\n p, model_state=s, num_steps=num_steps)\n\n losses = []\n eval_auxs = []\n use_data = task.datasets is not None\n train_xs = []\n eval_xs = []\n for i in tqdm.trange(num_steps + 1, position=0):\n with profile.Profile(\"eval\"):\n m = {}\n if i % eval_every == 0 and eval_batches:\n on_last = (i == num_steps)\n for s in splits:\n key, key1 = jax.random.split(key)\n loss, loss_normalized, aux = _batch_eval(\n eval_task,\n opt,\n opt_state,\n key1,\n task.datasets.split(s) if use_data else (),\n eval_batches if not on_last else last_eval_batches,\n device=device)\n m[f\"eval/{s}/loss\"] = loss\n m[f\"eval/{s}/loss_normalized\"] = loss_normalized\n for k, v in aux.items():\n m[f\"eval/{s}/{k}\"] = v\n eval_auxs.append(m)\n if summary_writer:\n for k, v in m.items():\n summary_writer.scalar(k, v, step=i)\n eval_xs.append(i)\n\n with profile.Profile(\"get_batch\"):\n batch = next(task.datasets.train) if use_data else ()\n with profile.Profile(\"put_batch_and_split\"):\n batch = jax.device_put(batch, device=device)\n\n with profile.Profile(\"next_state\"):\n opt_state, l, key, _ = _next_state(\n task, opt, opt_state, batch, key, with_metrics=False)\n losses.append(l)\n train_xs.append(i)\n\n ret = {\n \"train/xs\": onp.asarray(train_xs),\n \"train/loss\": onp.asarray(losses),\n }\n\n if eval_batches:\n stacked_metrics = tree_utils.tree_zip_onp(eval_auxs)\n ret[\"eval/xs\"] = onp.asarray(eval_xs)\n ret[\"eval/last_eval_batches\"] = onp.asarray(last_eval_batches)\n ret[\"eval/eval_batches\"] = onp.asarray(eval_batches)\n ret = {**ret, **stacked_metrics}\n return ret\n\n\[email protected](jax.pmap, static_broadcasted_argnums=(1,))\[email protected](jax.vmap, in_axes=(0, None))\ndef _pmap_vector_random_split(key: PRNGKey, n_split: int) -> PRNGKey:\n key1, key2 = jax.random.split(key)\n return jax.random.split(key1, n_split), key2\n\n\[email protected]\nclass _CachedTrainFun:\n init: Callable[[lopt_base.MetaParams, PRNGKey, int], Tuple[OptState,\n TaskParam]]\n init_with_task_params: Callable[\n [lopt_base.MetaParams, PRNGKey, int, TaskParam], OptState]\n unroll_n_steps: Callable[\n [lopt_base.MetaParams, OptState, TaskParam, Tuple[Data, PRNGKey]],\n Tuple[OptState, jnp.ndarray, jnp.ndarray]]\n eval_loss: Callable[\n [lopt_base.MetaParams, TaskParam, OptState, Tuple[Any,\n PRNGKey]], jnp.ndarray]\n\n\[email protected]_cache(maxsize=None)\ndef _cached_vectorize_train_fns(\n task_family: tasks_base.TaskFamily,\n learned_opt: lopt_base.LearnedOptimizer,\n n_tasks: int,\n steps_per_jit: int = 10,\n with_aux_values: Sequence[str] = ()\n) -> _CachedTrainFun:\n \"\"\"Construct the pmap, vmap functions for training.\n\n This function is cached, so repeated calls don't have to pay compile times.\n\n Args:\n task_family: task family to sample tasks from.\n learned_opt: learned optimizer\n n_tasks: number of tasks to train spread across devices\n steps_per_jit: number of steps to fuse together.\n with_aux_values: aux values to return in addition to losses.\n\n Returns:\n A dataclass containing functions which initialize, unroll, and evalute the\n inner problem being trained.\n \"\"\"\n logging.info( # pylint: disable=logging-fstring-interpolation\n f\"Recreating get_function with: {task_family} ({id(task_family)}), {learned_opt} ({id(learned_opt)}), {n_tasks}\"\n )\n\n @functools.partial(jax.pmap, in_axes=(None, 0, None))\n def vec_single_task(theta, key, num_steps):\n opt = learned_opt.opt_fn(theta)\n\n @jax.vmap\n def fn(key):\n key1, key2, key3 = jax.random.split(key, 3)\n task_param = task_family.sample(key1)\n inner_param, inner_state = task_family.task_fn(\n task_param).init_with_state(key2)\n opt_state = opt.init(\n inner_param, model_state=inner_state, num_steps=num_steps, key=key3)\n return opt_state, task_param\n\n return fn(key)\n\n @functools.partial(jax.pmap, in_axes=(None, 0, None, 0))\n def vec_single_task_with_task_params(theta, key, num_steps, task_params):\n opt = learned_opt.opt_fn(theta)\n\n @jax.vmap\n def fn(key, task_param):\n key1, key2 = jax.random.split(key, 2)\n inner_param, inner_state = task_family.task_fn(\n task_param).init_with_state(key1)\n opt_state = opt.init(\n inner_param, model_state=inner_state, num_steps=num_steps, key=key2)\n return opt_state\n\n return fn(key, task_params)\n\n def one_step(opt, task_param, opt_state, data_key):\n data, key = data_key\n task = task_family.task_fn(task_param)\n next_opt_state, l, key, _ = _next_state(\n task, opt, opt_state, data, key, with_metrics=False)\n return next_opt_state, l\n\n @functools.partial(jax.pmap, in_axes=(None, 0, 0, 0))\n def vec_unroll_n_steps(theta, opt_states, task_params, datas_key):\n opt = learned_opt.opt_fn(theta)\n\n @jax.vmap\n def fn(opt_states, task_params, data_key):\n p_one_step = functools.partial(one_step, opt, task_params)\n opt_states, losses = lax.scan(\n p_one_step, opt_states, data_key, length=steps_per_jit)\n norm_losses = jax.vmap(task_family.task_fn(task_params).normalizer)(\n losses)\n return opt_states, losses, norm_losses\n\n return fn(opt_states, task_params, datas_key)\n\n @functools.partial(jax.pmap, in_axes=(None, 0, 0, 0))\n def eval_loss(theta, task_params, opt_state, data_key):\n opt = learned_opt.opt_fn(theta)\n\n @jax.vmap\n def fn(opt_state, task_param, data_key):\n task = task_family.task_fn(task_param)\n\n def single_batch(data, key):\n p = opt.get_params(opt_state)\n s = opt.get_state(opt_state)\n l, _, aux = task.loss_with_state_and_aux(p, s, key, data)\n aux = {k: v for k, v in aux.items() if k in with_aux_values}\n return l, task.normalizer(l), aux\n\n data, key = data_key\n loss, norm_loss, aux = jax.vmap(single_batch)(data, key)\n return jnp.mean(loss), jnp.mean(norm_loss), jax.tree_map(jnp.mean, aux)\n\n return fn(opt_state, task_params, data_key)\n\n return _CachedTrainFun(\n init=vec_single_task,\n init_with_task_params=vec_single_task_with_task_params,\n unroll_n_steps=vec_unroll_n_steps,\n eval_loss=eval_loss)\n\n\[email protected]\ndef multi_task_training_curves(\n task_family: tasks_base.TaskFamily,\n learned_opt: lopt_base.LearnedOptimizer,\n theta: lopt_base.MetaParams,\n n_tasks: int,\n seed: Optional[int] = None,\n key: Optional[PRNGKey] = None,\n task_params: Optional[Any] = None,\n n_devices: Optional[int] = None,\n n_eval_batches_vec: int = 1,\n n_eval_batches: int = 1,\n last_eval_batches: int = 1,\n eval_every: int = 10,\n steps_per_jit: int = 10,\n eval_just_at_end: bool = False,\n steps: int = 10000,\n splits: Sequence[str] = (\"train\",),\n with_aux_values: Sequence[str] = (),\n) -> Mapping[str, onp.ndarray]:\n \"\"\"Train n_tasks which are sampled from the task_family using a learned opt.\n\n This runs on multiple chips (using pmap) for increased throughput UNLESS pmap\n is set.\n\n Arguments:\n task_family: TaskFamily to train.\n learned_opt: LearnedOptimizer to inner-train with.\n theta: weights of learned optimizer\n n_tasks: number of tasks to train in parallel. Must be a multiple of\n n_devices.\n seed: Initial seed for jax RNG. Note this does not control data.\n key: RNG to seed task initializations. Note this does not control data.\n task_params: Task parameters to use instead of sampling them.\n n_devices: number of devices to spread the n_tasks over.\n n_eval_batches_vec: number of evaluation batches to run vectorized.\n n_eval_batches: number of evaluation batches to run in python for loop.\n last_eval_batches: Number of batches to evaluate at the end of training.\n eval_every: number of steps per evaluation.\n steps_per_jit: number of steps to unroll in each jit function.\n eval_just_at_end: Just evaluate at the end of training.\n steps: total number of steps to run.\n splits: data splits to evaluate on.\n with_aux_values: aux values to return in addition to losses.\n\n Returns:\n A dictionary containing training curves for the trained models. All values\n will have a leading `n_tasks` dimension.\n eval_losses: 1d array of unnormalized losses\n normalized_eval_losses: 1d array of normalized losses. This is using the\n inner norm.\n \"\"\"\n assert eval_every % steps_per_jit == 0\n if n_devices is None:\n n_devices = len(jax.local_devices())\n\n if key is None:\n if seed is None:\n seed = onp.random.randint(0, 1000000)\n key = jax.random.PRNGKey(seed)\n\n keys = jax.random.split(key, n_devices)\n if n_tasks % n_devices != 0:\n raise ValueError(\"Must specify n_tasks to be a multiple of n_devices.\"\n f\" Got n_tasks={n_tasks} and n_devices={n_devices}\")\n\n n_tasks_per_device = n_tasks // n_devices\n keys = jax.vmap(lambda k: jax.random.split(k, n_tasks_per_device))(keys)\n\n logging.info(f\"Running _cached_vectorize_train_fns with: \" # pylint: disable=logging-fstring-interpolation\n f\"{task_family} ({id(task_family)}), \"\n f\"{learned_opt} ({id(learned_opt)}).\"\n f\"Found n_devices {n_devices} and n_tasks {n_tasks}.\")\n\n train_fns = _cached_vectorize_train_fns(\n task_family,\n learned_opt,\n n_tasks,\n steps_per_jit=steps_per_jit,\n with_aux_values=with_aux_values)\n\n # Not passed in! So sample a new task params\n if task_params is None:\n opt_states, task_params = train_fns.init(theta, keys, steps)\n else:\n if jax.tree_leaves(task_params):\n assert tree_utils.first_dim(task_params) == n_tasks\n task_params = jax.tree_map(\n lambda x: jnp.reshape(x, (n_devices, n_tasks_per_device) + x.shape[1:]),\n task_params)\n opt_states = train_fns.init_with_task_params(theta, keys, steps,\n task_params)\n\n if steps % steps_per_jit:\n raise ValueError(\"Please set steps and steps_per_jit to be multiples of\"\n f\" each other. Got steps:{steps}\"\n f\" steps_per_jit{steps_per_jit}\")\n\n def get_datas(batches, split=\"train\"):\n # TODO(lmetz) move axis?\n return training.get_batches(\n task_family, [n_devices, n_tasks_per_device, batches], split=split)\n\n def eval_loop(theta, task_params, opt_states, keys, n_eval_batches):\n\n with profile.Profile(\"eval\"):\n\n def losses_for_split(split):\n sub_l = []\n sub_norm_l = []\n sub_auxs = []\n for _ in range(n_eval_batches):\n eval_datas = get_datas(n_eval_batches_vec, split=split)\n l, norm_l, auxs = train_fns.eval_loss(theta, task_params, opt_states,\n (eval_datas, keys))\n sub_l.append(l)\n sub_norm_l.append(norm_l)\n sub_auxs.append(auxs)\n\n sub_auxs = tree_utils.tree_zip_onp(sub_auxs)\n with profile.Profile(\"eval_agg_blocked\"):\n # mean over the n_eval_batches sample\n return (onp.mean(sub_l, axis=0), onp.mean(sub_norm_l, axis=0),\n {k: onp.mean(v, axis=0) for k, v in sub_auxs.items()})\n\n all_losses = {}\n for s in splits:\n unnorm_l, norm_l, auxs = losses_for_split(s)\n all_losses[f\"eval/{s}/loss\"] = unnorm_l\n all_losses[f\"eval/{s}/norm_loss\"] = norm_l\n for k, v in auxs.items():\n all_losses[f\"eval/{s}/aux/{k}\"] = v\n\n return all_losses\n\n eval_losses = []\n eval_xs = []\n train_losses = []\n train_norm_losses = []\n\n # Note ordering here is to overlap data grabbing with computation\n for i in tqdm.trange(steps // steps_per_jit):\n if (i * steps_per_jit) % eval_every == 0 and n_eval_batches_vec > 0 and (\n not eval_just_at_end):\n data_keys, keys = _pmap_vector_random_split(keys, n_eval_batches_vec)\n l = eval_loop(theta, task_params, opt_states, data_keys, n_eval_batches)\n eval_losses.append(l)\n eval_xs.append(i * steps_per_jit)\n\n with profile.Profile(\"data\"):\n datas = get_datas(steps_per_jit)\n with profile.Profile(\"shard_data\"):\n data_keys, keys = _pmap_vector_random_split(keys, steps_per_jit)\n with profile.Profile(\"unroll_n_steps__noblocking\"):\n opt_states, train_loss, train_loss_norm = train_fns.unroll_n_steps(\n theta, opt_states, task_params, (datas, data_keys))\n train_losses.append(train_loss)\n train_norm_losses.append(train_loss_norm)\n\n # One final eval at the end.\n with profile.Profile(\"final_eval\"):\n if n_eval_batches_vec > 0:\n data_keys, keys = _pmap_vector_random_split(keys, n_eval_batches_vec)\n l = eval_loop(theta, task_params, opt_states, data_keys,\n last_eval_batches)\n eval_losses.append(l)\n eval_xs.append(steps)\n\n train_losses = onp.concatenate(train_losses, axis=2)\n train_losses = train_losses.reshape([n_tasks, train_losses.shape[2]])\n\n eval_losses = tree_utils.tree_zip_onp(eval_losses)\n eval_losses = jax.tree_map(\n lambda x: x.reshape([x.shape[0], n_tasks]).transpose(1, 0), eval_losses)\n\n return {\n \"train/xs\":\n onp.tile(onp.expand_dims(onp.arange(steps), 0), [n_tasks, 1]),\n \"train/loss\":\n train_losses,\n \"eval/xs\":\n onp.tile(onp.expand_dims(onp.asarray(eval_xs), 0), [n_tasks, 1]),\n \"eval/last_eval_batches\":\n onp.asarray(last_eval_batches),\n \"eval/eval_batches\":\n onp.asarray(n_eval_batches * n_eval_batches_vec),\n **eval_losses\n }\n", "# coding=utf-8\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Manages distributed training.\n\nThis module currently exposes an AsyncLearner which runs on a centralized\nserver, and AsyncWorker which runs on a multiple clients.\n\nWorkers grab the current weight values, and put back computed information.\nThis information usually contains a gradient estimate.\nBatches of this info can be grabbed from the learner and used to compute\nmeta-weight updates. The resulting update can then be used to update the\nmeta-weights. Once updated, the meta-weights can wet back on the learner.\n\"\"\"\nfrom concurrent import futures\nimport hashlib\nimport threading\nimport time\nfrom typing import Any, Callable, Generic, Optional, Sequence, Tuple, TypeVar\n\nfrom absl import logging\nimport courier\nfrom learned_optimization import profile\nimport numpy as onp\n\nT = TypeVar(\"T\")\nW = TypeVar(\"W\")\n\n\ndef uniquify_server_name(server_name: str, experiment_name: str) -> str:\n \"\"\"Create a unique name for the server.\n\n Args:\n server_name: name of server. There could be multiple of these per single\n training job.\n experiment_name: name of the experiemnt. This is shared across all machines\n for a given training job. Often this is the log directory.\n\n Returns:\n name: The name of the server.\n \"\"\"\n\n hmod = hashlib.sha256()\n hmod.update(experiment_name.encode(\"utf-8\"))\n hval = hmod.hexdigest()[0:20]\n logging.info(f\"Hashing experiment name [{experiment_name}] => {str(hval)}\") #. pylint: disable=logging-fstring-interpolation\n return str(hval) + \"__\" + server_name\n\n\nclass AsyncLearner(Generic[T, W]):\n \"\"\"Centralizedd learner for async training.\n\n This class creates a server and provides interfaces to get batches of\n meta-gradients.\n \"\"\"\n\n def __init__(self,\n experiment_name: str,\n weights: W,\n current_iteration: int,\n batch_size: int,\n staleness: int,\n buffer_size: Optional[int] = None,\n block_when_buffer_full=True,\n start_server: bool = True,\n port: Optional[int] = None):\n \"\"\"Initializer.\n\n Args:\n experiment_name: Name of experiment. Shared across all jobs in same model\n being trained.\n weights: PyTree of weights / values to be fetched by the worker.\n current_iteration: Current step / update number. Used to keep track of\n stale weights.\n batch_size: batchsize of gradients to return\n staleness: max amount of staleness acceptable before removing work from\n workers.\n buffer_size: max number of gradients to store in memory. Any more than\n this number will cause workers to hang until space is free. This is used\n To controll memmory if workers compute very quickly.\n block_when_buffer_full: Block if buffer is full. Otherwise throw away data\n start_server: Option to not start the courier server.\n port: int port to host server at.\n \"\"\"\n\n self._outer_gradients = []\n self._weights = weights\n self._batch_size = batch_size\n self._block_when_buffer_full = block_when_buffer_full\n\n if not buffer_size:\n buffer_size = batch_size * 5\n\n self._buffer_size = buffer_size\n\n self._experiment_name = experiment_name\n self._current_iteration = current_iteration\n self._staleness = staleness\n self._lock = threading.Lock()\n self._cv = threading.Condition()\n self._server = None\n self._port = port\n\n if start_server:\n self.start_server()\n\n def start_server(self):\n if not self._server:\n self._server = courier.Server(\n uniquify_server_name(\"learner\", self._experiment_name),\n port=self._port)\n self._server.Bind(\"put_grads\", self.put_grads)\n self._server.Bind(\"get_weights\", self.get_weights)\n logging.info(\"Started Async Server!\")\n self._server.Start()\n\n def _is_step_valid(self, step: int) -> bool:\n step = onp.asarray(step)\n return (self._current_iteration >= step and\n (self._current_iteration - step) <= self._staleness)\n\n def put_grads(self, worker_id: Any, step: int, value: T):\n \"\"\"Put computed gradients into learner.\"\"\"\n while True:\n if self._is_step_valid(step):\n self._lock.acquire(blocking=True)\n logging.info( # pylint: disable=logging-fstring-interpolation\n f\"size of outer_gradients {len(self._outer_gradients)}....\")\n if len(self._outer_gradients) < self._buffer_size:\n self._outer_gradients.append((int(step), value))\n self._lock.release()\n break\n else:\n self._lock.release()\n if self._block_when_buffer_full:\n logging.info(f\"Hanging worker {worker_id}....\") # pylint: disable=logging-fstring-interpolation\n time.sleep(1)\n else:\n logging.info(f\"Throwing away data for {worker_id}....\") # pylint: disable=logging-fstring-interpolation\n return\n with self._cv:\n self._cv.notify_all()\n else:\n break\n\n if self._is_step_valid(step):\n with self._cv:\n self._cv.notify_all()\n\n @profile.wrap()\n def get_weights(self, worker_id: Any) -> Tuple[int, W]: # pylint: disable=unused-argument\n return self._current_iteration, self._weights\n\n @profile.wrap()\n def gather_grads(\n self,\n filter_fn: Callable[[W], bool] = lambda x: True\n ) -> Tuple[Sequence[int], Sequence[W]]:\n \"\"\"Grab a batch of gradients from the learner.\n\n If gradients are not yet avalible, block.\n\n Args:\n filter_fn: Function to filter gradients / gradients that should not be\n included in the batch.\n\n Returns:\n steps: A batch of steps for which gradients had been computed.\n gradients: A list of gradients computed from workers.\n \"\"\"\n with self._cv:\n\n def filtered_grads():\n return [(step, val)\n for step, val in self._outer_gradients\n if (self._is_step_valid(step) and filter_fn(val))]\n\n while True:\n self._cv.wait_for(\n lambda: len(self._outer_gradients) >= self._batch_size)\n\n # get a batch. the first lets say.\n with self._lock:\n self._outer_gradients = filtered_grads()\n if len(self._outer_gradients) < self._batch_size:\n continue\n\n steps, grads = zip(*self._outer_gradients[0:self._batch_size])\n\n with self._lock:\n self._outer_gradients = self._outer_gradients[self._batch_size:]\n\n return steps, grads\n\n @profile.wrap()\n def set_weights(self,\n current_iteration: int,\n weights: W,\n clear_buffer: bool = False) -> int:\n \"\"\"Set the current weights on the learner.\n\n Args:\n current_iteration: The iteration these weights come from.\n weights: Value of the weights.\n clear_buffer: To clear the remaining weights.\n\n Returns:\n number of gradients which have been removed.\n \"\"\"\n with self._lock:\n self._weights = weights\n self._current_iteration = onp.asarray(current_iteration)\n\n before = len(self._outer_gradients)\n\n if clear_buffer:\n self._outer_gradients = []\n\n self._outer_gradients = [\n (s, g) for s, g in self._outer_gradients if self._is_step_valid(s)\n ]\n after = len(self._outer_gradients)\n return before - after\n\n\nclass DistributedWorker(Generic[T, W]):\n \"\"\"Distributed worker used to compute gradients.\n\n This can be run on a large number of workers concurrently and can be used with\n either AsyncLearner or SyncLearner.\n \"\"\"\n\n def __init__(self,\n experiment_name: str,\n worker_id: Any,\n learner_address: Optional[str] = None):\n \"\"\"Initializer.\n\n Args:\n experiment_name: Name of experiment. Should be the same for the entire\n job.\n worker_id: ID of the current worker.\n learner_address: adress of learner courier server.\n \"\"\"\n self._client = courier.Client(\n learner_address if learner_address else uniquify_server_name(\n \"learner\", experiment_name))\n self._worker_id = worker_id\n\n @profile.wrap()\n def get_weights(self) -> W:\n \"\"\"Get the current set of weights from the learner.\"\"\"\n return self._client.get_weights(self._worker_id)\n\n @profile.wrap()\n def put_grads(self, step: int, grad: T):\n \"\"\"Send the computed gradient from the given step to the learner.\"\"\"\n return self._client.put_grads(self._worker_id, step, grad)\n\n\nclass SyncLearner(Generic[T, W]):\n \"\"\"Centralized syncronous learner for distributed training.\n\n This class creates a server and provides interfaces to get batches of\n meta-gradients.\n\n WARNING: This class does not yet work with population based training.\n \"\"\"\n\n def __init__(self,\n experiment_name: str,\n weights: W,\n current_iteration: int,\n num_workers: int = 4,\n start_server: bool = True,\n port: Optional[int] = None,\n monitor_state: bool = False):\n \"\"\"Initializer.\n\n Args:\n experiment_name: Name of experiment. Shared across all jobs in same model\n being trained.\n weights: PyTree of weights / values to be fetched by the worker.\n current_iteration: Current step / update number.\n num_workers: Number of sync workers.\n start_server: Option to not start the courier server.\n port: int port to host server at.\n monitor_state: Debug flag. Start a thread which prints the learner's\n state.\n \"\"\"\n self._outer_gradients = {i: None for i in range(num_workers)}\n\n self._weights = weights\n self._num_workers = num_workers\n self._experiment_name = experiment_name\n\n self._current_iteration = current_iteration\n\n self._lock = threading.Lock()\n self._cv = threading.Condition()\n self._server = None\n self._port = port\n\n if start_server:\n self.start_server()\n\n if monitor_state:\n self._pool = futures.ThreadPoolExecutor(1)\n self._pool.submit(self.monitor)\n\n def monitor(self):\n while True:\n print(\"Monitor\")\n print(\"current iter\", self._current_iteration)\n print(self._outer_gradients)\n print(\"EndMonitor\")\n time.sleep(2)\n\n def start_server(self):\n if not self._server:\n self._server = courier.Server(\n uniquify_server_name(\"learner\", self._experiment_name),\n port=self._port)\n self._server.Bind(\"put_grads\", self.put_grads)\n self._server.Bind(\"get_weights\", self.get_weights)\n logging.info(\"Started Sync Server!\")\n self._server.Start()\n\n def put_grads(self, worker_id: Any, step: int, value: T):\n \"\"\"Put computed gradients into learner.\"\"\"\n with self._cv:\n self._lock.acquire(blocking=True)\n assert worker_id < self._num_workers\n if step == self._current_iteration:\n self._outer_gradients[worker_id] = (step, value)\n self._lock.release()\n self._cv.notify_all()\n\n @profile.wrap()\n def get_weights(self, worker_id: Any) -> Tuple[int, W]: # pylint: disable=unused-argument\n \"\"\"Get the weights from learner.\"\"\"\n while True:\n self._lock.acquire(blocking=True)\n if self._outer_gradients[worker_id] is None:\n ret = self._current_iteration, self._weights\n else:\n ret = None\n self._lock.release()\n if ret:\n return ret\n\n with self._cv:\n self._cv.wait_for(lambda: self._outer_gradients[worker_id] is None)\n\n @profile.wrap()\n def gather_grads(\n self,\n filter_fn: Callable[[W], bool] = lambda x: True\n ) -> Tuple[Sequence[int], Sequence[W]]:\n \"\"\"Grab a batch of gradients from the learner.\n\n If gradients are not yet avalible, block.\n\n Args:\n filter_fn: Function to filter gradients / gradients that should not be\n included in the batch.\n\n Returns:\n steps: A batch of steps for which gradients had been computed.\n gradients: A list of gradients computed from workers.\n \"\"\"\n # TODO(lmetz) use filter_fn so that this works with population based\n # training\n del filter_fn\n\n while True:\n self._lock.acquire(blocking=True)\n if all(self._outer_gradients.values()):\n steps_grads = list(self._outer_gradients.values())\n self._lock.release()\n steps, grads = zip(*steps_grads)\n return steps, grads\n else:\n self._lock.release()\n\n with self._cv:\n self._cv.wait_for(lambda: all(self._outer_gradients.values()))\n\n @profile.wrap()\n def set_weights(self,\n current_iteration: int,\n weights: W,\n clear_buffer: bool = False) -> int:\n \"\"\"Set the current weights on the learner.\n\n Args:\n current_iteration: The iteration these weights come from.\n weights: Value of the weights.\n clear_buffer: To clear the remaining weights. This is unused for sync\n training.\n\n Returns:\n number of gradients which have been removed.\n \"\"\"\n # in sync training, buffer is always clear.\n del clear_buffer\n with self._lock, self._cv:\n self._weights = weights\n self._current_iteration = onp.asarray(current_iteration)\n self._outer_gradients = {k: None for k in self._outer_gradients.keys()}\n self._cv.notify_all()\n\n # no samples where deleted.\n return 0\n" ]
[ [ "numpy.asarray", "numpy.arange", "numpy.concatenate", "numpy.mean", "numpy.random.randint" ], [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GraduationTeam2020/LipNet
[ "a3f63d3b793c0074bc7237e082f3eba4eaf2fc86" ]
[ "training/overlapped_speakers/train.py" ]
[ "from keras.optimizers import Adam\nfrom keras.callbacks import TensorBoard, CSVLogger, ModelCheckpoint\nfrom lipnet.lipreading.generators import BasicGenerator\nfrom lipnet.lipreading.callbacks import Statistics, Visualize\nfrom lipnet.lipreading.curriculums import Curriculum\nfrom lipnet.core.decoders import Decoder\nfrom lipnet.lipreading.helpers import labels_to_text\nfrom lipnet.utils.spell import Spell\nfrom lipnet.model2 import LipNet\nimport numpy as np\nimport datetime\nimport os\nimport sys\n\nnp.random.seed(55)\n\nCURRENT_PATH = os.path.dirname(os.path.abspath(__file__))\n\nPREDICT_GREEDY = False\nPREDICT_BEAM_WIDTH = 200\nPREDICT_DICTIONARY = os.path.join(CURRENT_PATH,'..','..','common','dictionaries','grid.txt')\n\ndef curriculum_rules(epoch):\n return { 'sentence_length': -1, 'flip_probability': 0.5, 'jitter_probability': 0.05 }\n\n\ndef train(run_name, speaker, start_epoch, stop_epoch, img_c, img_w, img_h, frames_n, absolute_max_string_len, minibatch_size):\n DATASET_DIR = os.path.join(CURRENT_PATH, speaker, 'datasets')\n OUTPUT_DIR = os.path.join(CURRENT_PATH, speaker, 'results')\n LOG_DIR = os.path.join(CURRENT_PATH, speaker, 'logs')\n\n curriculum = Curriculum(curriculum_rules)\n lip_gen = BasicGenerator(dataset_path=DATASET_DIR,\n minibatch_size=minibatch_size,\n img_c=img_c, img_w=img_w, img_h=img_h, frames_n=frames_n,\n absolute_max_string_len=absolute_max_string_len,\n curriculum=curriculum, start_epoch=start_epoch).build()\n\n lipnet = LipNet(img_c=img_c, img_w=img_w, img_h=img_h, frames_n=frames_n,\n absolute_max_string_len=absolute_max_string_len, output_size=lip_gen.get_output_size())\n lipnet.summary()\n\n adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n\n # the loss calc occurs elsewhere, so use a dummy lambda func for the loss\n lipnet.model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam)\n\n # load weight if necessary\n if start_epoch > 0:\n weight_file = os.path.join(OUTPUT_DIR, os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))\n lipnet.model.load_weights(weight_file)\n\n spell = Spell(path=PREDICT_DICTIONARY)\n decoder = Decoder(greedy=PREDICT_GREEDY, beam_width=PREDICT_BEAM_WIDTH,\n postprocessors=[labels_to_text, spell.sentence])\n\n # define callbacks\n statistics = Statistics(lipnet, lip_gen.next_val(), decoder, 256, output_dir=os.path.join(OUTPUT_DIR, run_name))\n visualize = Visualize(os.path.join(OUTPUT_DIR, run_name), lipnet, lip_gen.next_val(), decoder, num_display_sentences=minibatch_size)\n tensorboard = TensorBoard(log_dir=os.path.join(LOG_DIR, run_name))\n csv_logger = CSVLogger(os.path.join(LOG_DIR, \"{}-{}.csv\".format('training',run_name)), separator=',', append=True)\n checkpoint = ModelCheckpoint(os.path.join(OUTPUT_DIR, run_name, \"weights{epoch:02d}.h5\"), monitor='val_loss', save_weights_only=True, mode='auto', period=1)\n\n lipnet.model.fit_generator(generator=lip_gen.next_train(),\n steps_per_epoch=lip_gen.default_training_steps, epochs=stop_epoch,\n validation_data=lip_gen.next_val(), validation_steps=lip_gen.default_validation_steps,\n initial_epoch=start_epoch,\n verbose=1,\n max_q_size=5,\n workers=2,\n pickle_safe=False)\n\nif __name__ == '__main__':\n run_name = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n speaker = sys.argv[1]\n train(run_name, speaker, 0, 5000, 3, 360, 288, 75, 32, 50)" ]
[ [ "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hongyehu/NeuralRG
[ "ff4eb18f7f9e083dac6f3da3995f3f69ecf381e8", "ff4eb18f7f9e083dac6f3da3995f3f69ecf381e8" ]
[ "test/test_tebd.py", "source/ising.py" ]
[ "from flowRelated import *\n\nimport os\nimport sys\nsys.path.append(os.getcwd())\n\nimport torch\nfrom torch import nn\nimport numpy as np\nimport utils\nimport flow\nimport source\n\ndef test_bijective():\n p = source.Gaussian([1,4,4])\n BigList = []\n for _ in range(2*2*2):\n maskList = []\n for n in range(4):\n if n %2==0:\n b = torch.zeros(1,4)\n i = torch.randperm(b.numel()).narrow(0, 0, b.numel() // 2)\n b.zero_()[:,i] = 1\n b=b.view(1,1,2,2)\n else:\n b = 1-b\n maskList.append(b)\n maskList = torch.cat(maskList,0).to(torch.float32)\n BigList.append(maskList)\n\n layers = [flow.RNVP(BigList[n], [utils.SimpleMLPreshape([4,32,32,4],[nn.ELU(),nn.ELU(),None]) for _ in range(4)], [utils.SimpleMLPreshape([4,32,32,4],[nn.ELU(),nn.ELU(),utils.ScalableTanh(4)]) for _ in range(4)]\n) for n in range(2*2*2)]\n length = 4\n depth = 4\n t = flow.TEBD(2,length,layers,depth,p)\n bijective(t)\n\ndef test_saveload():\n p = source.Gaussian([1,4,4])\n BigList = []\n for _ in range(2*2*2):\n maskList = []\n for n in range(4):\n if n %2==0:\n b = torch.zeros(1,4)\n i = torch.randperm(b.numel()).narrow(0, 0, b.numel() // 2)\n b.zero_()[:,i] = 1\n b=b.view(1,1,2,2)\n else:\n b = 1-b\n maskList.append(b)\n maskList = torch.cat(maskList,0).to(torch.float32)\n BigList.append(maskList)\n\n layers = [flow.RNVP(BigList[n], [utils.SimpleMLPreshape([4,32,32,4],[nn.ELU(),nn.ELU(),None]) for _ in range(4)], [utils.SimpleMLPreshape([4,32,32,4],[nn.ELU(),nn.ELU(),utils.ScalableTanh(4)]) for _ in range(4)]\n) for n in range(2*2*2)]\n length = 4\n depth = 4\n t = flow.TEBD(2,length,layers,depth,p)\n\n p = source.Gaussian([1,4,4])\n BigList = []\n for _ in range(2*2*2):\n maskList = []\n for n in range(4):\n if n %2==0:\n b = torch.zeros(1,4)\n i = torch.randperm(b.numel()).narrow(0, 0, b.numel() // 2)\n b.zero_()[:,i] = 1\n b=b.view(1,1,2,2)\n else:\n b = 1-b\n maskList.append(b)\n maskList = torch.cat(maskList,0).to(torch.float32)\n BigList.append(maskList)\n\n layers = [flow.RNVP(BigList[n], [utils.SimpleMLPreshape([4,32,32,4],[nn.ELU(),nn.ELU(),None]) for _ in range(4)], [utils.SimpleMLPreshape([4,32,32,4],[nn.ELU(),nn.ELU(),utils.ScalableTanh(4)]) for _ in range(4)]\n) for n in range(2*2*2)]\n length = 4\n depth = 4\n blankt = flow.TEBD(2,length,layers,depth,p)\n saveload(t,blankt)\n\nif __name__ == \"__main__\":\n test_bijective()", "import numpy as np\nimport torch\nimport torch.nn.functional as F\n\nimport scipy.sparse as sps\nfrom scipy.linalg import eigh, inv, det \nfrom numpy import zeros\nimport math\n\nfrom .source import Source\nfrom utils import roll\n\nclass Lattice:\n def __init__(self,L, d, BC='periodic'):\n self.L = L \n self.d = d\n self.shape = [L]*d \n self.Nsite = L**d \n self.BC = BC\n\n def move(self, idx, d, shift):\n coord = self.index2coord(idx)\n coord[d] += shift\n\n if self.BC != 'periodic':\n if (coord[d]>=self.L) or (coord[d]<0):\n return None\n #wrap around because of the PBC\n if (coord[d]>=self.L): coord[d] -= self.L; \n if (coord[d]<0): coord[d] += self.L; \n\n return self.coord2index(coord)\n\n def index2coord(self, idx):\n coord = zeros(self.d, int) \n for d in range(self.d):\n coord[self.d-d-1] = idx%self.L;\n idx /= self.L\n return coord \n\n def coord2index(self, coord):\n idx = coord[0]\n for d in range(1, self.d):\n idx *= self.L; \n idx += coord[d]\n return idx \n\nclass Hypercube(Lattice):\n def __init__(self,L, d, BC='periodic'):\n super(Hypercube, self).__init__(L, d, BC)\n self.Adj = zeros((self.Nsite,self.Nsite), int)\n for i in range(self.Nsite):\n for d in range(self.d):\n j = self.move(i, d, 1)\n\n if j is not None:\n self.Adj[i, j] = 1.0\n self.Adj[j, i] = 1.0\n\nclass Ising(Source):\n def __init__(self,L,d,T,name = None):\n if name is None:\n name = \"Ising_l\"+str(L)+\"_d\" +str(d)+\"_t\"+str(T)\n super(Ising,self).__init__([L**d],name)\n self.beta = 1.0\n self.lattice = Hypercube(L, d, 'periodic')\n self.K = self.lattice.Adj/T\n \n w, v = eigh(self.K) \n offset = 0.1-w.min()\n self.K += np.eye(w.size)*offset\n sign, logdet = np.linalg.slogdet(self.K)\n #print (sign)\n #print (0.5*self.nvars[0] *(np.log(4.)-offset - np.log(2.*np.pi)) - 0.5*logdet)\n Kinv = torch.from_numpy(inv(self.K)).to(torch.float32)\n self.register_buffer(\"Kinv\",Kinv)\n\n def energy(self,x):\n return -(-0.5*(torch.mm(x.reshape(-1, self.nvars[0]),self.Kinv) * x.reshape(-1, self.nvars[0])).sum(dim=1) \\\n + (torch.nn.Softplus()(2.*self.beta*x.reshape(-1, self.nvars[0])) - self.beta*x.reshape(-1, self.nvars[0]) - math.log(2.)).sum(dim=1))\n" ]
[ [ "torch.nn.ELU", "torch.cat", "torch.zeros" ], [ "torch.nn.Softplus", "numpy.linalg.slogdet", "numpy.eye", "scipy.linalg.eigh", "scipy.linalg.inv", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] } ]
shoneg/createMessdienerplan
[ "85830b257b58cc14ba5d08bbbcb5bbc4a28fd957" ]
[ "main.py" ]
[ "from exit_methods import *\n\ntry:\n import itertools\n import shutil\n import sys\n import warnings\n from os.path import exists\n from os import mkdir\n\n import numpy as np\n\n from table_utils import *\nexcept ImportError as e:\n print(\"Ein Import Fehler ist aufgetreten: \" + str(e))\n input(\"Drücke Enter, um das Programm zu beenden.\")\n exit_programm()\n\n\nclass Continue(Exception):\n pass\n\n\ndef add_messdiener_count(gottesdienste: pd.DataFrame, gottesdienst_arten: dict) -> pd.DataFrame:\n \"\"\"\n Asks interactive for count of messdiener per gottesdienst and gives as default value from gottesdienst-arten. Returns gottesdienste with extra column with the learned count.\n :param gottesdienste: table with Tag, Datum, Zeit, Gottesdienst, index\n :param gottesdienst_arten: if a gottesdienst not given, we assume zero\n :return: the given table with extra column Messdienerzahl\n \"\"\"\n print(\n \"Tippe die Zahl an Messdienern ein, die du für diesen Gottesdienst einteilen möchtest. Falls du keine Zahl tippst, wird die in den eckigen Klammern verwendet. Wenn du die vorherige Eingabe rückgängig machen möchtest, tippe -1\")\n counts = [0] * len(gottesdienste.index)\n ret = gottesdienste\n\n indexes = gottesdienste.index\n i = 0\n while i < len(indexes):\n gottesdienst = gottesdienste.iloc[i]\n # takes given from dict if existing else 0\n default_count = gottesdienst_arten[gottesdienst[GOTTESDIENST]] if gottesdienst[GOTTESDIENST] in gottesdienst_arten else 0\n # string to ask with\n question = f\"{gottesdienst[TAG]}, {gottesdienst[DATUM]} {gottesdienst[ZEIT]} - {gottesdienst[GOTTESDIENST]} [{default_count}]: \"\n # takes count for this gottesdienst\n count = int(input(question) or default_count)\n # if <0 we want to go one gottesdienst back\n if count < 0:\n i = max(0, i - 1)\n continue\n counts[i] = count\n i += 1\n if i >= len(indexes) and (str(input('Eingabe beenden und mit Zuteilung starten?[ja]: ')) or 'ja') != 'ja':\n i -= 1\n continue\n\n # add column\n ret[MESSDIENERZAHL] = counts\n # if 0 Messdiener needed we don't need this gottesdienst anymore, else it's in return table\n ret = ret[ret[MESSDIENERZAHL] > 0]\n\n return ret\n\n\ndef split_in_summanden(number: int):\n \"\"\"\n Calculates a list of all possibilities of splitting a number into summanden\n :param number:\n :return:\n \"\"\"\n # route\n if number == 1:\n return [[1]]\n # take first number iterating and then recursive the rest\n ret = [[number]]\n for i in np.arange(1, number):\n for j in split_in_summanden(number - i):\n ret.append(sorted([i] + j))\n return np.unique(ret)\n\n\ndef group_splittings(gottesdienste: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Adds a column with possible splittings of the Messdienerzahl column\n :param gottesdienste:\n :return: gottesdienste\n \"\"\"\n splittings = []\n # for every row calculate the splittings\n for count in gottesdienste[MESSDIENERZAHL]:\n splittings.append(split_in_summanden(count))\n gottesdienste[GRUPPEN_SPLITTINGS] = splittings\n return gottesdienste\n\n\ndef messdiener_column_to_string(gottesdienste: pd.DataFrame) -> pd.DataFrame:\n new_col = []\n for i, gottesdienst in gottesdienste.iterrows():\n messdiener = gottesdienst[MESSDIENER]\n names = messdiener[0][1]\n for j in np.arange(1, len(messdiener)):\n names += f', {messdiener[j][1]}'\n new_col.append(names)\n gottesdienste[MESSDIENER] = new_col\n return gottesdienste\n\n\ndef zuteilung(gottesdienste: pd.DataFrame, messdiener: pd.DataFrame):\n # set new column\n gottesdienste[MESSDIENER] = [[]] * len(gottesdienste.index)\n # short handle for later\n gd_index = gottesdienste.index\n # current best option\n best_gottesdienste = None\n best_messdiener = None\n best_max_einteilungen = sys.maxsize\n # iterate all possible group combinations\n for combi in list(itertools.product(*gottesdienste[GRUPPEN_SPLITTINGS].values)):\n # try-catch to continue when we see that there's a problem in this combi\n try:\n # working copies\n tmp_gottesdienste = gottesdienste.copy()\n tmp_messdiener = messdiener.copy()\n # setting one gottesdienst after another\n for i, dienst in enumerate(combi):\n # setting one group after another\n for group in dienst:\n # query possible messdiener for the group\n ids_in_use = [*map(lambda item: item[0], tmp_gottesdienste[MESSDIENER].values[i])]\n take3 = tmp_messdiener[tmp_messdiener[ANZAHL] == group]\n take2 = take3[~take3[BLACK_LIST_TAGE].str.contains(tmp_gottesdienste[TAG].values[i])]\n take1 = take2[~take2[ID].isin(ids_in_use)]\n take0 = take1[take1[EINTEILUNGEN] == take1[EINTEILUNGEN].min()]\n # if one found take first, else continue with next combi, 'cause this combi doesn't work\n if len(take0.values) > 0:\n take = take0.values[0]\n # add messdiener to gottesdienst and increment its einteilungen\n tmp_gottesdienste.at[gd_index[i], MESSDIENER] = (tmp_gottesdienste[MESSDIENER].values[i] or []) + [take]\n tmp_messdiener.at[tmp_messdiener[tmp_messdiener[ID] == take[0]].index[0], EINTEILUNGEN] += 1\n else:\n raise Continue()\n # if highest einteilungszahl is smaller than on best option replace\n tmp_max_einteilungen = tmp_messdiener[tmp_messdiener[EINTEILUNGEN] == tmp_messdiener[EINTEILUNGEN].max()].values[0][3]\n if best_max_einteilungen > tmp_max_einteilungen:\n best_gottesdienste = tmp_gottesdienste.copy()\n best_messdiener = tmp_messdiener.copy()\n best_max_einteilungen = tmp_max_einteilungen\n except Continue:\n continue\n return best_gottesdienste, best_messdiener\n\n\ndef remove_not_wanted_columns(gottesdienste: pd.DataFrame) -> pd.DataFrame:\n if MESSDIENERZAHL in gottesdienste:\n del gottesdienste[MESSDIENERZAHL]\n if GRUPPEN_SPLITTINGS in gottesdienste:\n del gottesdienste[GRUPPEN_SPLITTINGS]\n return gottesdienste\n\n\ndef reset_einteilungen(messdiener: pd.DataFrame) -> pd.DataFrame:\n new_einteilungen = []\n min = messdiener[messdiener[EINTEILUNGEN] == messdiener[EINTEILUNGEN].min()].values[0][3]\n for einteilung in messdiener[EINTEILUNGEN]:\n new_einteilungen.append(einteilung - min)\n messdiener[EINTEILUNGEN] = new_einteilungen\n return messdiener\n\n\ndef build_messdienerplanung(gottesdienst_arten: dict, gottesdienste: pd.DataFrame, messdiener: pd.DataFrame) -> (pd.DataFrame, pd.DataFrame):\n gottesdienste = add_messdiener_count(gottesdienste, gottesdienst_arten)\n print(\"Jetzt wird eine optimale Zuteilung gebildet, das kann einige Zeit dauern. Während dieser Zeit sollte die CPU-Auslastung hoch sein\")\n gottesdienste = group_splittings(gottesdienste)\n gottesdienste, messdiener = zuteilung(gottesdienste, messdiener)\n gottesdienste = messdiener_column_to_string(gottesdienste)\n return gottesdienste, messdiener\n\n\ndef prepare_files():\n for file_name in [MESSPLAN_INPUT, MESSDIENER_INPUT, GOTTESDIENST_ARTEN_INPUT]:\n if not exists(file_name):\n print(f'Die Datei {file_name} existiert nicht. Falls du den Namen der Datei dauerhaft ändern möchtest, kannst du das in der Datei constants.py machen')\n exit_programm()\n if not exists('output'):\n mkdir(\"output\")\n shutil.copy2(MESSDIENER_INPUT, MESSDIENER_OUTPUT)\n print(f'Die Datei {MESSDIENER_INPUT} wird während der Ausführung aktualisiere (Einteilungen), falls etwas schief läuft wurde die ursprungsdatei nach {MESSDIENER_OUTPUT} kopiert')\n if not exists('tmp'):\n mkdir(\"tmp\")\n\n\nif __name__ == '__main__':\n warnings.filterwarnings('ignore')\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', 400)\n\n prepare_files()\n docx_table_to_html_md_tabel(MESSPLAN_INPUT, MESSPLAN_TMP)\n gottesdienste = get_gottesdienstplan_from_html(MESSPLAN_TMP)\n messdiener = get_messdiener_from_csv(MESSDIENER_INPUT)\n gottesdienst_arten = get_gottesdienst_arten_from_json(GOTTESDIENST_ARTEN_INPUT)\n gottesdienste, messdiener = build_messdienerplanung(gottesdienst_arten, gottesdienste, messdiener)\n print(\"Die Zuteilung wurde erfolgreich abgeschlossen\")\n gottesdienste = remove_not_wanted_columns(gottesdienste)\n messdiener = reset_einteilungen(messdiener)\n export_table_to_excel(gottesdienste, MESSPLAN_OUTPUT)\n print(f\"Der Messdienerplan wurde nach {MESSPLAN_OUTPUT} exportiert\")\n export_table_to_csv(messdiener, MESSDIENER_INPUT)\n print(f'Die Einteilungen in {MESSDIENER_INPUT} wurden aktualisiert')\n try:\n shutil.rmtree(\"tmp\")\n except:\n print(\n 'Während der Ordner tmp gelöscht werden sollte ist ein Fehler aufgetreten. Falls der Ordner noch existiert, kannst du es manuell versuchen, es ist aber auch nicht schlimm, wenn du nichts tust')\n else:\n print(\"Temporäre Dateien wurden gelöscht\")\n\n exit_programm()\n" ]
[ [ "numpy.arange", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
coderkhaleesi/Computer-Vision-Traditional-Techniques
[ "ac24334e2f1762ad8ae450b6a01c9086474c132c" ]
[ "CameraGeometry_andTwoViewHomography/calibrate.py" ]
[ "# -*- coding: utf-8 -*-\r\n# CLAB3\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport cv2\r\n\r\nI = Image.open('stereo2012a.jpg');\r\nplt.imshow(I)\r\nuv = plt.ginput(12) # Graphical user interface to get 6 points\r\n\r\n#xyz coordinates (world coordinates)\r\nxyz = np.asarray([[7, 7, 0],\r\n [14, 7, 0],\r\n [21, 7, 0],\r\n [21, 14, 0],\r\n [0, 7, 7],\r\n [0, 14, 7],\r\n [0, 21, 7],\r\n [0, 21, 14],\r\n [7, 0, 7],\r\n [7, 0, 14],\r\n [7, 0, 21],\r\n [14, 0, 21]])\r\n\r\n'''\r\n%% TASK 1: CALIBRATE\r\n%\r\n% Function to perform camera calibration\r\n%\r\n% Usage: calibrate(image, XYZ, uv)\r\n% return C\r\n% Where: image - is the image of the calibration target.\r\n% XYZ - is a N x 3 array of XYZ coordinates\r\n% of the calibration target points.\r\n% uv - is a N x 2 array of the image coordinates\r\n% of the calibration target points.\r\n% K - is the 3 x 4 camera calibration matrix.\r\n% The variable N should be an integer greater than or equal to 6.\r\n%\r\n% This function plots the uv coordinates onto the image of the calibration\r\n% target.\r\n%\r\n% It also projects the XYZ coordinates back into image coordinates using\r\n% the calibration matrix and plots these points too as\r\n% a visual check on the accuracy of the calibration process.\r\n%\r\n% Lines from the origin to the vanishing points in the X, Y and Z\r\n% directions are overlaid on the image.\r\n%\r\n% The mean squared error between the positions of the uv coordinates\r\n% and the projected XYZ coordinates is also reported.\r\n%\r\n% The function should also report the error in satisfying the\r\n% camera calibration matrix constraints.\r\n%\r\n'''\r\n\r\n\r\n#####################################################################\r\ndef calibrate(im, XYZ, uv):\r\n X = XYZ[:, 0] #get X, Y, and Z\r\n Y = XYZ[:, 1]\r\n Z = XYZ[:, 2]\r\n\r\n u = [x[0] for x in uv] #Get u and v separately from tuples\r\n v = [x[1] for x in uv]\r\n\r\n num_points = XYZ.shape[0] #get the number of points marked\r\n\r\n #declare matrices A and b\r\n A = np.zeros((num_points*2, 11))\r\n b = np.zeros((num_points*2, 1))\r\n\r\n j=0\r\n for i in range(0, num_points*2, 2):\r\n #DLT algorithm from lectures\r\n A[i] = [X[j], Y[j], Z[j], 1, 0, 0, 0, 0, -u[j]*X[j], -u[j]*Y[j], -u[j]*Z[j]]\r\n A[i+1] = [0, 0, 0, 0, X[j], Y[j], Z[j], 1, -v[j]*X[j], -v[j]*Y[j], -v[j]*Z[j]]\r\n b[i] = u[j]\r\n b[i+1] = v[j]\r\n j += 1\r\n\r\n #The matrix is the solution to a linear least squares problem ||Ax - b||^2\r\n C = np.linalg.lstsq(A, b, rcond=None)[0]\r\n\r\n #these two should be equal, verify by printing\r\n # print(A@C)\r\n # print(uv)\r\n\r\n newrow = [1]\r\n C = np.vstack([C, newrow]) #append 1 (last entry) so that it can be reshaped to 3x4\r\n C = C.reshape((3,4))\r\n print(f\"{C}\")\r\n return C\r\n\r\n#This function is used to reconstruct u, v from the calibration matrix and X,Y,Z coordinates\r\ndef reconstruct_from_C(C, XYZ):\r\n num_points = XYZ.shape[0]\r\n XYZ = XYZ.T\r\n newrow = np.repeat([1], num_points)\r\n XYZ = np.vstack([XYZ, newrow]) #convert to homogenous coordinates\r\n\r\n reconstructed_uv = np.zeros((num_points, 2))\r\n uvw = C@XYZ\r\n\r\n r_u = uvw[0, :]/uvw[2, :]\r\n reconstructed_uv[:, 0] = r_u.T\r\n r_v = uvw[1, :]/uvw[2, :]\r\n reconstructed_uv[:, 1] = r_v.T #convert back to cartesian\r\n\r\n return reconstructed_uv\r\n\r\nC = calibrate(I, xyz, uv)\r\nrecons_uv = reconstruct_from_C(C, xyz)\r\n\r\nerror = ((recons_uv - uv)**2).mean()\r\nprint(f\"The error between actual u,v and reconstructed u,v from calibration matrix is: {error}\")\r\n\r\nu1 = [x[0] for x in uv]\r\nv1 = [x[1] for x in uv]\r\n\r\nu2 = recons_uv[:, 0]\r\nv2 = recons_uv[:, 1]\r\n\r\nplt.show()\r\nplt.imshow(I)\r\nplt.scatter(u1, v1, c='red', s=5, marker='x')\r\nplt.scatter(u2, v2, c='blue', s=4, marker='x')\r\nplt.show()\r\n\r\n\r\n\r\n\r\n############################################################################\r\n\r\n'''\r\n%% TASK 2:\r\n% Computes the homography H applying the Direct Linear Transformation\r\n% The transformation is such that\r\n% p = np.matmul(H, p.T), i.e.,\r\n% (uBase, vBase, 1).T = np.matmul(H, (u2Trans , v2Trans, 1).T)\r\n% Note: we assume (a, b, c) => np.concatenate((a, b, c), axis), be careful when\r\n% deal the value of axis\r\n%\r\n% INPUTS:\r\n% u2Trans, v2Trans - vectors with coordinates u and v of the transformed image point (p')\r\n% uBase, vBase - vectors with coordinates u and v of the original base image point p\r\n%\r\n% OUTPUT\r\n% H - a 3x3 Homography matrix\r\n%\r\n% your name, date\r\n'''\r\n\r\n\r\nI_left = Image.open('left.jpg');\r\nplt.imshow(I_left)\r\ntry:\r\n uv_circ = plt.ginput(6) # Graphical user interface to get 6 points\r\nexcept Exception as e:\r\n print(\"ginput 1 failed\")\r\nplt.close('all')\r\nu_circ = [x[0] for x in uv_circ]\r\nv_circ = [x[1] for x in uv_circ]\r\n\r\nI_right = Image.open('right.jpg');\r\nplt.imshow(I_right)\r\ntry:\r\n uv = plt.ginput(6) # Graphical user interface to get 6 points\r\nexcept Exception as e:\r\n print(\"ginput 2 failed\")\r\n\r\nplt.close('all')\r\nu_base = [x[0] for x in uv]\r\nv_base = [x[1] for x in uv]\r\n\r\ndef homography(u2Trans, v2Trans, uBase, vBase):\r\n\r\n num_points = len(u2Trans)\r\n A = np.zeros((num_points*2, 9))\r\n\r\n j=0\r\n for i in range(0, num_points*2, 2): #Mapping points using formula from lectures\r\n print(i)\r\n A[i] = [u2Trans[j], v2Trans[j], 1, 0, 0, 0, -u2Trans[j]*uBase[j], -uBase[j]*v2Trans[j], -uBase[j]]\r\n A[i+1] = [0, 0, 0, u2Trans[j], v2Trans[j], 1, -u2Trans[j]*vBase[j], -v2Trans[j]*vBase[j], -vBase[j]]\r\n j += 1\r\n\r\n u, s, vh = np.linalg.svd(A, full_matrices=True) #SVD to solve the linear equation\r\n\r\n H = vh[-1, :]/vh[-1,-1]\r\n return H\r\n\r\nH_matrix = homography(u_circ, v_circ, u_base, v_base)\r\n\r\n#This function 'test_homography' uses the homography matrix calculated above to reconstruct points and compare original and\r\n#reconstructed points\r\n\r\ndef test_homography(H, base, circ):\r\n\r\n newrow = np.repeat([1],circ.shape[1])\r\n circ = np.vstack([circ, newrow])\r\n\r\n #H = H/H[-1]\r\n print(H)\r\n x = H.reshape((3,3))@circ\r\n\r\n r_u = x[0, :]/x[2, :]\r\n r_v = x[1, :]/x[2, :]\r\n reconstructed_base = np.asarray([r_u, r_v])\r\n print(reconstructed_base)\r\n print(base)\r\n\r\ncirc = np.asarray([u_circ, v_circ])\r\nprint(circ.shape)\r\nbase = np.asarray([u_base, v_base])\r\ntest_homography(H_matrix, base, circ)\r\n\r\n#Function to warp left image\r\ndef warp_img(img, H):\r\n #since it's a square image\r\n dst = np.zeros(img.shape)\r\n h, w = img.shape\r\n # print(h)\r\n # print(w)\r\n\r\n for x in range(h):\r\n for y in range(w):\r\n newrow = np.repeat([1],1)\r\n init_coords = np.vstack([x, y, newrow])\r\n u, v, s = H.reshape((3,3))@init_coords\r\n u = int(u/s)\r\n v = int(v/s) #no interpolation technique applied here, just converted to int\r\n\r\n if (u >= 0 and u < h) and (v >=0 and v < w):\r\n dst[u, v] = img[x,y]\r\n return dst\r\n\r\nI = cv2.imread('Left.jpg', 1);\r\nI = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)\r\nwarped = warp_img(I, H_matrix)\r\nplt.imshow(warped, cmap=plt.cm.gray)\r\nplt.show()\r\n\r\n\r\n############################################################################\r\ndef rq(A):\r\n # RQ factorisation\r\n\r\n [q,r] = np.linalg.qr(A.T) # numpy has QR decomposition, here we can do it\r\n # with Q: orthonormal and R: upper triangle. Apply QR\r\n # for the A-transpose, then A = (qr).T = [email protected] = RQ\r\n R = r.T\r\n Q = q.T\r\n return R,Q\r\n\r\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.linalg.svd", "matplotlib.pyplot.scatter", "numpy.asarray", "numpy.repeat", "numpy.linalg.lstsq", "matplotlib.pyplot.close", "numpy.linalg.qr", "matplotlib.pyplot.ginput", "matplotlib.pyplot.show", "numpy.zeros", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Toni-SM/skrl
[ "15b429d89e3b8a1828b207d88463bf7090288d18", "15b429d89e3b8a1828b207d88463bf7090288d18", "15b429d89e3b8a1828b207d88463bf7090288d18", "15b429d89e3b8a1828b207d88463bf7090288d18" ]
[ "skrl/agents/torch/cem/cem.py", "docs/source/examples/isaacgym_sequential_shared_memory_eval.py", "docs/source/examples/omniisaacgym/ppo_cartpole.py", "docs/source/examples/isaacgym3/ppo_franka_cabinet.py" ]
[ "from typing import Union, Tuple, Dict, Any\n\nimport gym\nimport copy\n\nimport torch\nimport torch.nn.functional as F\n\nfrom ....memories.torch import Memory\nfrom ....models.torch import Model\n\nfrom .. import Agent\n\n\nCEM_DEFAULT_CONFIG = {\n \"rollouts\": 16, # number of rollouts before updating\n \"percentile\": 0.70, # percentile to compute the reward bound [0, 1]\n\n \"discount_factor\": 0.99, # discount factor (gamma)\n \n \"learning_rate\": 1e-2, # learning rate\n \"learning_rate_scheduler\": None, # learning rate scheduler class (see torch.optim.lr_scheduler)\n \"learning_rate_scheduler_kwargs\": {}, # learning rate scheduler's kwargs (e.g. {\"step_size\": 1e-3})\n\n \"random_timesteps\": 0, # random exploration steps\n \"learning_starts\": 0, # learning starts after this many steps\n\n \"rewards_shaper\": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward\n\n \"experiment\": {\n \"directory\": \"\", # experiment's parent directory\n \"experiment_name\": \"\", # experiment name\n \"write_interval\": 250, # TensorBoard writing interval (timesteps)\n\n \"checkpoint_interval\": 1000, # interval for checkpoints (timesteps)\n \"checkpoint_policy_only\": True, # checkpoint for policy only\n }\n}\n\n\nclass CEM(Agent):\n def __init__(self, \n models: Dict[str, Model], \n memory: Union[Memory, Tuple[Memory], None] = None, \n observation_space: Union[int, Tuple[int], gym.Space, None] = None, \n action_space: Union[int, Tuple[int], gym.Space, None] = None, \n device: Union[str, torch.device] = \"cuda:0\", \n cfg: dict = {}) -> None:\n \"\"\"Cross-Entropy Method (CEM)\n\n https://ieeexplore.ieee.org/abstract/document/6796865/\n \n :param models: Models used by the agent\n :type models: dictionary of skrl.models.torch.Model\n :param memory: Memory to storage the transitions.\n If it is a tuple, the first element will be used for training and \n for the rest only the environment transitions will be added\n :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None\n :param observation_space: Observation/state space or shape (default: None)\n :type observation_space: int, tuple or list of integers, gym.Space or None, optional\n :param action_space: Action space or shape (default: None)\n :type action_space: int, tuple or list of integers, gym.Space or None, optional\n :param device: Computing device (default: \"cuda:0\")\n :type device: str or torch.device, optional\n :param cfg: Configuration dictionary\n :type cfg: dict\n\n :raises KeyError: If the models dictionary is missing a required key\n \"\"\"\n _cfg = copy.deepcopy(CEM_DEFAULT_CONFIG)\n _cfg.update(cfg)\n super().__init__(models=models, \n memory=memory, \n observation_space=observation_space, \n action_space=action_space, \n device=device, \n cfg=_cfg)\n\n # models\n self.policy = self.models.get(\"policy\", None)\n\n # checkpoint models\n self.checkpoint_models = self.models\n \n # configuration:\n self._rollouts = self.cfg[\"rollouts\"]\n self._rollout = 0\n\n self._percentile = self.cfg[\"percentile\"]\n self._discount_factor = self.cfg[\"discount_factor\"]\n\n self._learning_rate = self.cfg[\"learning_rate\"]\n self._learning_rate_scheduler = self.cfg[\"learning_rate_scheduler\"]\n self._learning_rate_scheduler_kwargs = self.cfg[\"learning_rate_scheduler_kwargs\"]\n \n self._random_timesteps = self.cfg[\"random_timesteps\"]\n self._learning_starts = self.cfg[\"learning_starts\"]\n \n self._rewards_shaper = self.cfg[\"rewards_shaper\"]\n\n self._episode_tracking = []\n\n # set up optimizer and learning rate scheduler\n if self.policy is not None:\n self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate)\n if self._learning_rate_scheduler is not None:\n self.scheduler = self._learning_rate_scheduler(self.optimizer, **self._learning_rate_scheduler_kwargs)\n\n def init(self) -> None:\n \"\"\"Initialize the agent\n \"\"\"\n super().init()\n \n # create tensors in memory\n if self.memory is not None:\n self.memory.create_tensor(name=\"states\", size=self.observation_space, dtype=torch.float32)\n self.memory.create_tensor(name=\"next_states\", size=self.observation_space, dtype=torch.float32)\n self.memory.create_tensor(name=\"actions\", size=self.action_space, dtype=torch.int64)\n self.memory.create_tensor(name=\"rewards\", size=1, dtype=torch.float32)\n self.memory.create_tensor(name=\"dones\", size=1, dtype=torch.bool)\n\n self.tensors_names = [\"states\", \"actions\", \"rewards\", \"next_states\", \"dones\"]\n\n def act(self, \n states: torch.Tensor, \n timestep: int, \n timesteps: int, \n inference: bool = False) -> torch.Tensor:\n \"\"\"Process the environment's states to make a decision (actions) using the main policy\n\n :param states: Environment's states\n :type states: torch.Tensor\n :param timestep: Current timestep\n :type timestep: int\n :param timesteps: Number of timesteps\n :type timesteps: int\n :param inference: Flag to indicate whether the model is making inference\n :type inference: bool\n\n :return: Actions\n :rtype: torch.Tensor\n \"\"\"\n # sample random actions\n # TODO, check for stochasticity\n if timestep < self._random_timesteps:\n return self.policy.random_act(states)\n\n # sample stochastic actions \n return self.policy.act(states, inference=inference)\n\n def record_transition(self, \n states: torch.Tensor, \n actions: torch.Tensor, \n rewards: torch.Tensor, \n next_states: torch.Tensor, \n dones: torch.Tensor, \n infos: Any, \n timestep: int, \n timesteps: int) -> None:\n \"\"\"Record an environment transition in memory\n \n :param states: Observations/states of the environment used to make the decision\n :type states: torch.Tensor\n :param actions: Actions taken by the agent\n :type actions: torch.Tensor\n :param rewards: Instant rewards achieved by the current actions\n :type rewards: torch.Tensor\n :param next_states: Next observations/states of the environment\n :type next_states: torch.Tensor\n :param dones: Signals to indicate that episodes have ended\n :type dones: torch.Tensor\n :param infos: Additional information about the environment\n :type infos: Any type supported by the environment\n :param timestep: Current timestep\n :type timestep: int\n :param timesteps: Number of timesteps\n :type timesteps: int\n \"\"\"\n super().record_transition(states, actions, rewards, next_states, dones, infos, timestep, timesteps)\n\n # reward shaping\n if self._rewards_shaper is not None:\n rewards = self._rewards_shaper(rewards, timestep, timesteps)\n \n if self.memory is not None:\n self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, dones=dones)\n for memory in self.secondary_memories:\n memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, dones=dones)\n\n # track episodes internally\n if self._rollout:\n indexes = torch.nonzero(dones)\n if indexes.numel():\n for i in indexes[:, 0]:\n self._episode_tracking[i.item()].append(self._rollout + 1)\n else:\n self._episode_tracking = [[0] for _ in range(rewards.size(-1))]\n\n def pre_interaction(self, timestep: int, timesteps: int) -> None:\n \"\"\"Callback called before the interaction with the environment\n\n :param timestep: Current timestep\n :type timestep: int\n :param timesteps: Number of timesteps\n :type timesteps: int\n \"\"\"\n pass\n\n def post_interaction(self, timestep: int, timesteps: int) -> None:\n \"\"\"Callback called after the interaction with the environment\n\n :param timestep: Current timestep\n :type timestep: int\n :param timesteps: Number of timesteps\n :type timesteps: int\n \"\"\"\n self._rollout += 1\n if not self._rollout % self._rollouts and timestep >= self._learning_starts:\n self._rollout = 0\n self._update(timestep, timesteps)\n\n # write tracking data and checkpoints\n super().post_interaction(timestep, timesteps)\n\n def _update(self, timestep: int, timesteps: int) -> None:\n \"\"\"Algorithm's main update step\n\n :param timestep: Current timestep\n :type timestep: int\n :param timesteps: Number of timesteps\n :type timesteps: int\n \"\"\"\n # sample all memory\n sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \\\n self.memory.sample_all(names=self.tensors_names)[0]\n\n with torch.no_grad():\n # compute discounted return threshold\n limits = []\n returns = []\n for e in range(sampled_rewards.size(-1)):\n for i, j in zip(self._episode_tracking[e][:-1], self._episode_tracking[e][1:]):\n limits.append([e + i, e + j])\n rewards = sampled_rewards[e + i: e + j]\n returns.append(torch.sum(rewards * self._discount_factor ** \\\n torch.arange(rewards.size(0), device=rewards.device).flip(-1).view(rewards.size())))\n\n if not len(returns):\n print(\"[WARNING] No returns to update. Consider increasing the number of rollouts\")\n return\n \n returns = torch.tensor(returns)\n return_threshold = torch.quantile(returns, self._percentile, dim=-1)\n \n # get elite states and actions\n indexes = torch.nonzero(returns >= return_threshold)\n elite_states = torch.cat([sampled_states[limits[i][0]:limits[i][1]] for i in indexes[:, 0]], dim=0)\n elite_actions = torch.cat([sampled_actions[limits[i][0]:limits[i][1]] for i in indexes[:, 0]], dim=0)\n\n # compute scores for the elite states\n scores = self.policy.act(elite_states)[2]\n\n # compute policy loss\n policy_loss = F.cross_entropy(scores, elite_actions.view(-1))\n\n # optimize policy\n self.optimizer.zero_grad()\n policy_loss.backward()\n self.optimizer.step()\n\n # update learning rate\n if self._learning_rate_scheduler:\n self.scheduler.step()\n\n # record data\n self.track_data(\"Loss / Policy loss\", policy_loss.item())\n\n self.track_data(\"Coefficient / Return threshold\", return_threshold.item())\n self.track_data(\"Coefficient / Mean discounted returns\", torch.mean(returns).item())\n \n if self._learning_rate_scheduler:\n self.track_data(\"Learning / Learning rate\", self.scheduler.get_last_lr()[0])\n", "import isaacgym\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# Import the skrl components to build the RL system\nfrom skrl.models.torch import GaussianModel, DeterministicModel\nfrom skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG\nfrom skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG\nfrom skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG\nfrom skrl.trainers.torch import SequentialTrainer\nfrom skrl.envs.torch import wrap_env\nfrom skrl.envs.torch import load_isaacgym_env_preview2, load_isaacgym_env_preview3\n\n\n# Define only the policies for evaluation \nclass StochasticActor(GaussianModel):\n def __init__(self, observation_space, action_space, device, clip_actions=False,\n clip_log_std=True, min_log_std=-20, max_log_std=2):\n super().__init__(observation_space, action_space, device, clip_actions,\n clip_log_std, min_log_std, max_log_std)\n\n self.linear_layer_1 = nn.Linear(self.num_observations, 32)\n self.linear_layer_2 = nn.Linear(32, 32)\n self.mean_action_layer = nn.Linear(32, self.num_actions)\n self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))\n\n def compute(self, states, taken_actions):\n x = F.elu(self.linear_layer_1(states))\n x = F.elu(self.linear_layer_2(x))\n return torch.tanh(self.mean_action_layer(x)), self.log_std_parameter\n\nclass DeterministicActor(DeterministicModel):\n def __init__(self, observation_space, action_space, device, clip_actions = False):\n super().__init__(observation_space, action_space, device, clip_actions)\n\n self.linear_layer_1 = nn.Linear(self.num_observations, 32)\n self.linear_layer_2 = nn.Linear(32, 32)\n self.action_layer = nn.Linear(32, self.num_actions)\n\n def compute(self, states, taken_actions):\n x = F.elu(self.linear_layer_1(states))\n x = F.elu(self.linear_layer_2(x))\n return torch.tanh(self.action_layer(x))\n\n\n# Load and wrap the Isaac Gym environment.\n# The following lines are intended to support both versions (preview 2 and 3). \n# It tries to load from preview 3, but if it fails, it will try to load from preview 2\ntry:\n env = load_isaacgym_env_preview3(task_name=\"Cartpole\")\nexcept Exception as e:\n print(\"Isaac Gym (preview 3) failed: {}\\nTrying preview 2...\".format(e))\n env = load_isaacgym_env_preview2(\"Cartpole\")\nenv = wrap_env(env)\n\ndevice = env.device\n\n\n# Instantiate the agent's policies.\n# DDPG requires 4 models, visit its documentation for more details\n# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ddpg.html#spaces-and-models\nmodels_ddpg = {\"policy\": DeterministicActor(env.observation_space, env.action_space, device, clip_actions=True)}\n# TD3 requires 6 models, visit its documentation for more details\n# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.td3.html#spaces-and-models\nmodels_td3 = {\"policy\": DeterministicActor(env.observation_space, env.action_space, device, clip_actions=True)}\n# SAC requires 5 models, visit its documentation for more details\n# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.sac.html#spaces-and-models\nmodels_sac = {\"policy\": StochasticActor(env.observation_space, env.action_space, device, clip_actions=True)}\n\n# load checkpoints\nmodels_ddpg[\"policy\"].load(\"./runs/22-02-06_19-37-44-874837_DDPG/checkpoints/8000_policy.pt\")\nmodels_td3[\"policy\"].load(\"./runs/22-02-06_19-28-48-436345_TD3/checkpoints/5000_policy.pt\")\nmodels_sac[\"policy\"].load(\"./runs/22-02-06_19-28-48-441161_SAC/checkpoints/3000_policy.pt\")\n\n\n# Configure and instantiate the agents.\n# Only modify some of the default configuration, visit its documentation to see all the options\n# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ddpg.html#configuration-and-hyperparameters\ncfg_ddpg = DDPG_DEFAULT_CONFIG.copy()\ncfg_ddpg[\"random_timesteps\"] = 0\n# logging to TensorBoard each 25 timesteps and ignore checkpoints\ncfg_ddpg[\"experiment\"][\"write_interval\"] = 25\ncfg_ddpg[\"experiment\"][\"checkpoint_interval\"] = 0\n# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.td3.html#configuration-and-hyperparameters\ncfg_td3 = TD3_DEFAULT_CONFIG.copy()\ncfg_td3[\"random_timesteps\"] = 0\n# logging to TensorBoard each 25 timesteps and ignore checkpoints\ncfg_td3[\"experiment\"][\"write_interval\"] = 25\ncfg_td3[\"experiment\"][\"checkpoint_interval\"] = 0\n# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.sac.html#configuration-and-hyperparameters\ncfg_sac = SAC_DEFAULT_CONFIG.copy()\ncfg_sac[\"random_timesteps\"] = 0\n# logging to TensorBoard each 25 timesteps and ignore checkpoints\ncfg_sac[\"experiment\"][\"write_interval\"] = 25\ncfg_sac[\"experiment\"][\"checkpoint_interval\"] = 0\n\nagent_ddpg = DDPG(models=models_ddpg, \n memory=None, \n cfg=cfg_ddpg, \n observation_space=env.observation_space, \n action_space=env.action_space,\n device=device)\n\nagent_td3 = TD3(models=models_td3, \n memory=None, \n cfg=cfg_td3, \n observation_space=env.observation_space, \n action_space=env.action_space,\n device=device)\n\nagent_sac = SAC(models=models_sac, \n memory=None, \n cfg=cfg_sac, \n observation_space=env.observation_space, \n action_space=env.action_space,\n device=device)\n\n\n# Configure and instantiate the RL trainer\ncfg = {\"timesteps\": 8000, \"headless\": True}\ntrainer = SequentialTrainer(cfg=cfg, \n env=env, \n agents=[agent_ddpg, agent_td3, agent_sac],\n agents_scope=[])\n\n# evaluate the agents\ntrainer.eval()\n", "import torch\nimport torch.nn as nn\n\n# Import the skrl components to build the RL system\nfrom skrl.models.torch import GaussianModel, DeterministicModel\nfrom skrl.memories.torch import RandomMemory\nfrom skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG\nfrom skrl.resources.schedulers.torch import KLAdaptiveRL\nfrom skrl.trainers.torch import SequentialTrainer\nfrom skrl.envs.torch import wrap_env\nfrom skrl.envs.torch import load_omniverse_isaacgym_env\nfrom skrl.utils import set_seed\n\n\n# set the seed for reproducibility\nset_seed(42)\n\n\n# Define the models (stochastic and deterministic models) for the agent using helper classes.\n# - Policy: takes as input the environment's observation/state and returns an action\n# - Value: takes the state as input and provides a value to guide the policy\nclass Policy(GaussianModel):\n def __init__(self, observation_space, action_space, device, clip_actions=False,\n clip_log_std=True, min_log_std=-20, max_log_std=2):\n super().__init__(observation_space, action_space, device, clip_actions,\n clip_log_std, min_log_std, max_log_std)\n\n self.net = nn.Sequential(nn.Linear(self.num_observations, 32),\n nn.ELU(),\n nn.Linear(32, 32),\n nn.ELU(),\n nn.Linear(32, self.num_actions))\n self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))\n\n def compute(self, states, taken_actions):\n return self.net(states), self.log_std_parameter\n\nclass Value(DeterministicModel):\n def __init__(self, observation_space, action_space, device, clip_actions=False):\n super().__init__(observation_space, action_space, device, clip_actions)\n\n self.net = nn.Sequential(nn.Linear(self.num_observations, 32),\n nn.ELU(),\n nn.Linear(32, 32),\n nn.ELU(),\n nn.Linear(32, 1))\n\n def compute(self, states, taken_actions):\n return self.net(states)\n\n\n# Load and wrap the Omniverse Isaac Gym environment\nenv = load_omniverse_isaacgym_env(task_name=\"Cartpole\")\nenv = wrap_env(env)\n\ndevice = env.device\n\n\n# Instantiate a RandomMemory as rollout buffer (any memory can be used for this)\nmemory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device)\n\n\n# Instantiate the agent's models (function approximators).\n# PPO requires 2 models, visit its documentation for more details\n# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models\nmodels_ppo = {\"policy\": Policy(env.observation_space, env.action_space, device),\n \"value\": Value(env.observation_space, env.action_space, device)}\n\n# Initialize the models' parameters (weights and biases) using a Gaussian distribution\nfor model in models_ppo.values():\n model.init_parameters(method_name=\"normal_\", mean=0.0, std=0.1) \n\n\n# Configure and instantiate the agent.\n# Only modify some of the default configuration, visit its documentation to see all the options\n# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters\ncfg_ppo = PPO_DEFAULT_CONFIG.copy()\ncfg_ppo[\"rollouts\"] = 16\ncfg_ppo[\"learning_epochs\"] = 8\ncfg_ppo[\"mini_batches\"] = 1 # 16 * 512 / 8192\ncfg_ppo[\"discount_factor\"] = 0.99\ncfg_ppo[\"lambda\"] = 0.95\ncfg_ppo[\"learning_rate\"] = 3e-4\ncfg_ppo[\"learning_rate_scheduler\"] = KLAdaptiveRL\ncfg_ppo[\"learning_rate_scheduler_kwargs\"] = {\"kl_threshold\": 0.008}\ncfg_ppo[\"random_timesteps\"] = 0\ncfg_ppo[\"learning_starts\"] = 0\ncfg_ppo[\"grad_norm_clip\"] = 1.0\ncfg_ppo[\"ratio_clip\"] = 0.2\ncfg_ppo[\"value_clip\"] = 0.2\ncfg_ppo[\"clip_predicted_values\"] = True\ncfg_ppo[\"entropy_loss_scale\"] = 0.0\ncfg_ppo[\"value_loss_scale\"] = 2.0\ncfg_ppo[\"kl_threshold\"] = 0\ncfg_ppo[\"rewards_shaper\"] = lambda rewards, timestep, timesteps: rewards * 0.1\n# logging to TensorBoard and write checkpoints each 16 and 80 timesteps respectively\ncfg_ppo[\"experiment\"][\"write_interval\"] = 16\ncfg_ppo[\"experiment\"][\"checkpoint_interval\"] = 80\n\nagent = PPO(models=models_ppo,\n memory=memory, \n cfg=cfg_ppo, \n observation_space=env.observation_space, \n action_space=env.action_space,\n device=device)\n\n\n# Configure and instantiate the RL trainer\ncfg_trainer = {\"timesteps\": 1600, \"headless\": True, \"progress_interval\": 160}\ntrainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)\n\n# start training\ntrainer.train()\n", "import isaacgym\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# Import the skrl components to build the RL system\nfrom skrl.models.torch import GaussianModel, DeterministicModel\nfrom skrl.memories.torch import RandomMemory\nfrom skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG\nfrom skrl.resources.schedulers.torch import KLAdaptiveRL\nfrom skrl.trainers.torch import SequentialTrainer\nfrom skrl.envs.torch import wrap_env\nfrom skrl.envs.torch import load_isaacgym_env_preview2, load_isaacgym_env_preview3\nfrom skrl.utils import set_seed\n\n\n# set the seed for reproducibility\nset_seed(42)\n\n\n# Define the models (stochastic and deterministic models) for the agent using helper classes.\n# - Policy: takes as input the environment's observation/state and returns an action\n# - Value: takes the state as input and provides a value to guide the policy\nclass Policy(GaussianModel):\n def __init__(self, observation_space, action_space, device, clip_actions=False,\n clip_log_std=True, min_log_std=-20, max_log_std=2):\n super().__init__(observation_space, action_space, device, clip_actions,\n clip_log_std, min_log_std, max_log_std)\n\n self.net = nn.Sequential(nn.Linear(self.num_observations, 256),\n nn.ELU(),\n nn.Linear(256, 128),\n nn.ELU(),\n nn.Linear(128, 64),\n nn.ELU(),\n nn.Linear(64, self.num_actions))\n self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))\n\n def compute(self, states, taken_actions):\n return self.net(states), self.log_std_parameter\n\nclass Value(DeterministicModel):\n def __init__(self, observation_space, action_space, device, clip_actions=False):\n super().__init__(observation_space, action_space, device, clip_actions)\n\n self.net = nn.Sequential(nn.Linear(self.num_observations, 256),\n nn.ELU(),\n nn.Linear(256, 128),\n nn.ELU(),\n nn.Linear(128, 64),\n nn.ELU(),\n nn.Linear(64, 1))\n\n def compute(self, states, taken_actions):\n return self.net(states)\n\n\n# Load and wrap the Isaac Gym environment.\n# The following lines are intended to support both versions (preview 2 and 3). \n# It tries to load from preview 3, but if it fails, it will try to load from preview 2\ntry:\n env = load_isaacgym_env_preview3(task_name=\"FrankaCabinet\")\nexcept Exception as e:\n print(\"Isaac Gym (preview 3) failed: {}\\nTrying preview 2...\".format(e))\n env = load_isaacgym_env_preview2(\"FrankaCabinet\")\nenv = wrap_env(env)\n\ndevice = env.device\n\n\n# Instantiate a RandomMemory as rollout buffer (any memory can be used for this)\nmemory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device)\n\n\n# Instantiate the agent's models (function approximators).\n# PPO requires 2 models, visit its documentation for more details\n# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models\nmodels_ppo = {\"policy\": Policy(env.observation_space, env.action_space, device),\n \"value\": Value(env.observation_space, env.action_space, device)}\n\n# Initialize the models' parameters (weights and biases) using a Gaussian distribution\nfor model in models_ppo.values():\n model.init_parameters(method_name=\"normal_\", mean=0.0, std=0.1) \n\n\n# Configure and instantiate the agent.\n# Only modify some of the default configuration, visit its documentation to see all the options\n# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters\ncfg_ppo = PPO_DEFAULT_CONFIG.copy()\ncfg_ppo[\"rollouts\"] = 16\ncfg_ppo[\"learning_epochs\"] = 8\ncfg_ppo[\"mini_batches\"] = 8 # 16 * 4096 / 8192 \ncfg_ppo[\"discount_factor\"] = 0.99\ncfg_ppo[\"lambda\"] = 0.95\ncfg_ppo[\"learning_rate\"] = 5e-4\ncfg_ppo[\"learning_rate_scheduler\"] = KLAdaptiveRL\ncfg_ppo[\"learning_rate_scheduler_kwargs\"] = {\"kl_threshold\": 0.008}\ncfg_ppo[\"random_timesteps\"] = 0\ncfg_ppo[\"learning_starts\"] = 0\ncfg_ppo[\"grad_norm_clip\"] = 1.0\ncfg_ppo[\"ratio_clip\"] = 0.2\ncfg_ppo[\"value_clip\"] = 0.2\ncfg_ppo[\"clip_predicted_values\"] = True\ncfg_ppo[\"entropy_loss_scale\"] = 0.0\ncfg_ppo[\"value_loss_scale\"] = 2.0\ncfg_ppo[\"kl_threshold\"] = 0\ncfg_ppo[\"rewards_shaper\"] = lambda rewards, timestep, timesteps: rewards * 0.01\n# logging to TensorBoard and write checkpoints each 120 and 1200 timesteps respectively\ncfg_ppo[\"experiment\"][\"write_interval\"] = 120\ncfg_ppo[\"experiment\"][\"checkpoint_interval\"] = 1200\n\nagent = PPO(models=models_ppo,\n memory=memory, \n cfg=cfg_ppo, \n observation_space=env.observation_space, \n action_space=env.action_space,\n device=device)\n\n\n# Configure and instantiate the RL trainer\ncfg_trainer = {\"timesteps\": 24000, \"headless\": True}\ntrainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)\n\n# start training\ntrainer.train()\n" ]
[ [ "torch.mean", "torch.cat", "torch.tensor", "torch.quantile", "torch.no_grad", "torch.nonzero" ], [ "torch.nn.Linear", "torch.zeros" ], [ "torch.nn.Linear", "torch.nn.ELU", "torch.zeros" ], [ "torch.nn.Linear", "torch.nn.ELU", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
juroberttyb/DRL
[ "24c3b4bccd87384a3842283d970bd5d17f189e67", "24c3b4bccd87384a3842283d970bd5d17f189e67" ]
[ "spinup/algos/pytorch/ppo/robert/ppo.py", "spinup/algos/pytorch/vpg/robert_core.py" ]
[ "import numpy as np\nimport torch\nfrom torch.optim import Adam\nimport gym\nimport time\nimport spinup.algos.pytorch.ppo.core as core\nfrom spinup.utils.logx import EpochLogger\nfrom spinup.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads\nfrom spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs\n\nclass PPOBuffer:\n \"\"\"\n A buffer for storing trajectories experienced by a PPO agent interacting\n with the environment, and using Generalized Advantage Estimation (GAE-Lambda)\n for calculating the advantages of state-action pairs.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95):\n self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)\n self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)\n self.adv_buf = np.zeros(size, dtype=np.float32)\n self.rew_buf = np.zeros(size, dtype=np.float32)\n self.ret_buf = np.zeros(size, dtype=np.float32)\n self.val_buf = np.zeros(size, dtype=np.float32)\n self.logp_buf = np.zeros(size, dtype=np.float32)\n self.gamma, self.lam = gamma, lam\n self.ptr, self.path_start_idx, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, val, logp):\n \"\"\"\n Append one timestep of agent-environment interaction to the buffer.\n \"\"\"\n assert self.ptr < self.max_size # buffer has to have room so you can store\n self.obs_buf[self.ptr] = obs\n self.act_buf[self.ptr] = act\n self.rew_buf[self.ptr] = rew\n self.val_buf[self.ptr] = val\n self.logp_buf[self.ptr] = logp\n self.ptr += 1\n\n def finish_path(self, last_val=0):\n \"\"\"\n Call this at the end of a trajectory, or when one gets cut off\n by an epoch ending. This looks back in the buffer to where the\n trajectory started, and uses rewards and value estimates from\n the whole trajectory to compute advantage estimates with GAE-Lambda,\n as well as compute the rewards-to-go for each state, to use as\n the targets for the value function.\n\n The \"last_val\" argument should be 0 if the trajectory ended\n because the agent reached a terminal state (died), and otherwise\n should be V(s_T), the value function estimated for the last state.\n This allows us to bootstrap the reward-to-go calculation to account\n for timesteps beyond the arbitrary episode horizon (or epoch cutoff).\n \"\"\"\n\n path_slice = slice(self.path_start_idx, self.ptr)\n rews = np.append(self.rew_buf[path_slice], last_val)\n vals = np.append(self.val_buf[path_slice], last_val)\n \n # the next two lines implement GAE-Lambda advantage calculation\n deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]\n self.adv_buf[path_slice] = core.discount_cumsum(deltas, self.gamma * self.lam)\n \n # the next line computes rewards-to-go, to be targets for the value function\n self.ret_buf[path_slice] = core.discount_cumsum(rews, self.gamma)[:-1]\n \n self.path_start_idx = self.ptr\n\n def get(self):\n \"\"\"\n Call this at the end of an epoch to get all of the data from\n the buffer, with advantages appropriately normalized (shifted to have\n mean zero and std one). Also, resets some pointers in the buffer.\n \"\"\"\n assert self.ptr == self.max_size # buffer has to be full before you can get\n self.ptr, self.path_start_idx = 0, 0\n # the next two lines implement the advantage normalization trick\n adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf)\n self.adv_buf = (self.adv_buf - adv_mean) / adv_std\n data = dict(obs=self.obs_buf, act=self.act_buf, ret=self.ret_buf,\n adv=self.adv_buf, logp=self.logp_buf)\n return {k: torch.as_tensor(v, dtype=torch.float32) for k,v in data.items()}\n\n\n\ndef ppo(env_fn, actor_critic=core.MLPActorCritic, ac_kwargs=dict(), seed=0, \n steps_per_epoch=4000, epochs=50, gamma=0.99, clip_ratio=0.2, pi_lr=3e-4,\n vf_lr=1e-3, train_pi_iters=80, train_v_iters=80, lam=0.97, max_ep_len=1600,\n target_kl=0.01, logger_kwargs=dict(), save_freq=10):\n \"\"\"\n Proximal Policy Optimization (by clipping), \n\n with early stopping based on approximate KL\n\n Args:\n env_fn : A function which creates a copy of the environment.\n The environment must satisfy the OpenAI Gym API.\n\n actor_critic: The constructor method for a PyTorch Module with a \n ``step`` method, an ``act`` method, a ``pi`` module, and a ``v`` \n module. The ``step`` method should accept a batch of observations \n and return:\n\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``a`` (batch, act_dim) | Numpy array of actions for each \n | observation.\n ``v`` (batch,) | Numpy array of value estimates\n | for the provided observations.\n ``logp_a`` (batch,) | Numpy array of log probs for the\n | actions in ``a``.\n =========== ================ ======================================\n\n The ``act`` method behaves the same as ``step`` but only returns ``a``.\n\n The ``pi`` module's forward call should accept a batch of \n observations and optionally a batch of actions, and return:\n\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``pi`` N/A | Torch Distribution object, containing\n | a batch of distributions describing\n | the policy for the provided observations.\n ``logp_a`` (batch,) | Optional (only returned if batch of\n | actions is given). Tensor containing \n | the log probability, according to \n | the policy, of the provided actions.\n | If actions not given, will contain\n | ``None``.\n =========== ================ ======================================\n\n The ``v`` module's forward call should accept a batch of observations\n and return:\n\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``v`` (batch,) | Tensor containing the value estimates\n | for the provided observations. (Critical: \n | make sure to flatten this!)\n =========== ================ ======================================\n\n\n ac_kwargs (dict): Any kwargs appropriate for the ActorCritic object \n you provided to PPO.\n\n seed (int): Seed for random number generators.\n\n steps_per_epoch (int): Number of steps of interaction (state-action pairs) \n for the agent and the environment in each epoch.\n\n epochs (int): Number of epochs of interaction (equivalent to\n number of policy updates) to perform.\n\n gamma (float): Discount factor. (Always between 0 and 1.)\n\n clip_ratio (float): Hyperparameter for clipping in the policy objective.\n Roughly: how far can the new policy go from the old policy while \n still profiting (improving the objective function)? The new policy \n can still go farther than the clip_ratio says, but it doesn't help\n on the objective anymore. (Usually small, 0.1 to 0.3.) Typically\n denoted by :math:`\\epsilon`. \n\n pi_lr (float): Learning rate for policy optimizer.\n\n vf_lr (float): Learning rate for value function optimizer.\n\n train_pi_iters (int): Maximum number of gradient descent steps to take \n on policy loss per epoch. (Early stopping may cause optimizer\n to take fewer than this.)\n\n train_v_iters (int): Number of gradient descent steps to take on \n value function per epoch.\n\n lam (float): Lambda for GAE-Lambda. (Always between 0 and 1,\n close to 1.)\n\n max_ep_len (int): Maximum length of trajectory / episode / rollout.\n\n target_kl (float): Roughly what KL divergence we think is appropriate\n between new and old policies after an update. This will get used \n for early stopping. (Usually small, 0.01 or 0.05.)\n\n logger_kwargs (dict): Keyword args for EpochLogger.\n\n save_freq (int): How often (in terms of gap between epochs) to save\n the current policy and value function.\n\n \"\"\"\n\n # Special function to avoid certain slowdowns from PyTorch + MPI combo.\n setup_pytorch_for_mpi()\n\n # Set up logger and save configuration\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n # Random seed\n seed += 10000 * proc_id()\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n # Instantiate environment\n env = env_fn()\n obs_dim = env.observation_space.shape\n act_dim = env.action_space.shape\n\n # Create actor-critic module\n ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs)\n\n # Sync params across processes\n sync_params(ac)\n\n # Count variables\n var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.v])\n logger.log('\\nNumber of parameters: \\t pi: %d, \\t v: %d\\n'%var_counts)\n\n # Set up experience buffer\n local_steps_per_epoch = int(steps_per_epoch / num_procs())\n buf = PPOBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam)\n\n # Set up function for computing PPO policy loss\n def compute_loss_pi(data):\n obs, act, adv, logp_old = data['obs'], data['act'], data['adv'], data['logp']\n\n # Policy loss\n pi, logp = ac.pi(obs, act)\n ratio = torch.exp(logp - logp_old)\n clip_adv = torch.clamp(ratio, 1-clip_ratio, 1+clip_ratio) * adv\n loss_pi = -(torch.min(ratio * adv, clip_adv)).mean()\n\n # Useful extra info\n approx_kl = (logp_old - logp).mean().item()\n ent = pi.entropy().mean().item()\n clipped = ratio.gt(1+clip_ratio) | ratio.lt(1-clip_ratio)\n clipfrac = torch.as_tensor(clipped, dtype=torch.float32).mean().item()\n pi_info = dict(kl=approx_kl, ent=ent, cf=clipfrac)\n\n return loss_pi, pi_info\n\n # Set up function for computing value loss\n def compute_loss_v(data):\n obs, ret = data['obs'], data['ret']\n return ((ac.v(obs) - ret)**2).mean()\n\n # Set up optimizers for policy and value function\n pi_optimizer = Adam(ac.pi.parameters(), lr=pi_lr)\n vf_optimizer = Adam(ac.v.parameters(), lr=vf_lr)\n\n # Set up model saving\n logger.setup_pytorch_saver(ac)\n\n def update():\n data = buf.get()\n\n pi_l_old, pi_info_old = compute_loss_pi(data)\n pi_l_old = pi_l_old.item()\n v_l_old = compute_loss_v(data).item()\n\n # Train policy with multiple steps of gradient descent\n for i in range(train_pi_iters):\n pi_optimizer.zero_grad()\n loss_pi, pi_info = compute_loss_pi(data)\n kl = mpi_avg(pi_info['kl'])\n if kl > 1.5 * target_kl:\n logger.log('Early stopping at step %d due to reaching max kl.'%i)\n break\n loss_pi.backward()\n mpi_avg_grads(ac.pi) # average grads across MPI processes\n pi_optimizer.step()\n\n logger.store(StopIter=i)\n\n # Value function learning\n for i in range(train_v_iters):\n vf_optimizer.zero_grad()\n loss_v = compute_loss_v(data)\n loss_v.backward()\n mpi_avg_grads(ac.v) # average grads across MPI processes\n vf_optimizer.step()\n\n # Log changes from update\n kl, ent, cf = pi_info['kl'], pi_info_old['ent'], pi_info['cf']\n logger.store(LossPi=pi_l_old, LossV=v_l_old,\n KL=kl, Entropy=ent, ClipFrac=cf,\n DeltaLossPi=(loss_pi.item() - pi_l_old),\n DeltaLossV=(loss_v.item() - v_l_old))\n\n # Prepare for interaction with environment\n start_time = time.time()\n o, ep_ret, ep_len = env.reset(), 0, 0\n\n # Main loop: collect experience in env and update/log each epoch\n for epoch in range(epochs):\n first_episode = True\n for t in range(local_steps_per_epoch):\n a, v, logp = ac.step(torch.as_tensor(o, dtype=torch.float32))\n\n if first_episode and epoch%16==0 and proc_id()==0:\n env.render()\n\n next_o, r, d, _ = env.step(a)\n ep_ret += r\n ep_len += 1\n\n # save and log\n buf.store(o, a, r, v, logp)\n logger.store(VVals=v)\n \n # Update obs (critical!)\n o = next_o\n\n timeout = ep_len == max_ep_len\n terminal = d or timeout\n epoch_ended = t==local_steps_per_epoch-1\n\n if terminal or epoch_ended:\n if epoch_ended and not(terminal):\n print('Warning: trajectory cut off by epoch at %d steps.'%ep_len, flush=True)\n # if trajectory didn't reach terminal state, bootstrap value target\n if d:\n #if verbose: print(\"terminal state encountered at %d steps.\" %ep_len, flush=True)\n v = 0 # , ent = 0, 0\n else:\n if timeout: # reach max_ep_len\n #if verbose: print(\"trajectory cut off at %d steps for reaching max episode len.\" %ep_len, flush=True)\n ...\n else: # epoch_ended\n ...\n #if verbose: print('trajectory cut off at %d steps for local epoch ended.' %ep_len, flush=True)\n _, v, _ = ac.step(torch.as_tensor(o, dtype=torch.float32).reshape(1, -1))\n buf.finish_path(v)\n if terminal:\n # only save EpRet / EpLen if trajectory finished\n logger.store(EpRet=ep_ret, EpLen=ep_len)\n o, ep_ret, ep_len = env.reset(), 0, 0\n first_episode = False\n\n\n # Save model\n if (epoch % save_freq == 0) or (epoch == epochs-1):\n logger.save_state({'env': env}, None)\n\n # Perform PPO update!\n update()\n\n # Log info about epoch\n logger.log_tabular('Epoch', epoch)\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.log_tabular('VVals', with_min_and_max=True)\n logger.log_tabular('TotalEnvInteracts', (epoch+1)*steps_per_epoch)\n logger.log_tabular('LossPi', average_only=True)\n logger.log_tabular('LossV', average_only=True)\n logger.log_tabular('DeltaLossPi', average_only=True)\n logger.log_tabular('DeltaLossV', average_only=True)\n logger.log_tabular('Entropy', average_only=True)\n logger.log_tabular('KL', average_only=True)\n logger.log_tabular('ClipFrac', average_only=True)\n logger.log_tabular('StopIter', average_only=True)\n logger.log_tabular('Time', time.time()-start_time)\n logger.dump_tabular()\n\nif __name__ == '__main__':\n import os\n os.environ['OMPI_ALLOW_RUN_AS_ROOT'] = '1'\n os.environ['OMPI_ALLOW_RUN_AS_ROOT_CONFIRM'] = '1'\n os.environ['OMPI_MCA_btl_vader_single_copy_mechanism'] = 'None'\n\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='HumanoidStandup-v2') # Ant-v2, BipedalWalker-v3, HumanoidStandup-v2\n parser.add_argument('--hid', type=int, default=64)\n parser.add_argument('--l', type=int, default=2)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--seed', '-s', type=int, default=0)\n parser.add_argument('--cpu', type=int, default=6)\n parser.add_argument('--steps', type=int, default=9600)\n parser.add_argument('--epochs', type=int, default=10000)\n parser.add_argument('--exp_name', type=str, default='ppo')\n args = parser.parse_args()\n\n mpi_fork(args.cpu) # run parallel code with mpi\n\n from spinup.utils.run_utils import setup_logger_kwargs\n logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)\n\n ppo(lambda : gym.make(args.env), actor_critic=core.MLPActorCritic,\n ac_kwargs=dict(hidden_sizes=[args.hid]*args.l), gamma=args.gamma, \n seed=args.seed, steps_per_epoch=args.steps, epochs=args.epochs,\n logger_kwargs=logger_kwargs)", "import numpy as np\nimport scipy.signal\nfrom gym.spaces import Box, Discrete\n\nimport torch\nimport torch.nn as nn\nfrom torch.distributions.normal import Normal\nfrom torch.distributions.categorical import Categorical\n\n\ndef combined_shape(length, shape=None):\n if shape is None:\n return (length,)\n return (length, shape) if np.isscalar(shape) else (length, *shape)\n\n\ndef mlp(sizes, activation, output_activation=nn.Identity):\n layers = []\n for j in range(len(sizes)-1):\n act = activation if j < len(sizes)-2 else output_activation\n layers += [nn.Linear(sizes[j], sizes[j+1]), act()]\n return nn.Sequential(*layers)\n\n\ndef count_vars(module):\n return sum([np.prod(p.shape) for p in module.parameters()])\n\n\ndef discount_cumsum(x, discount):\n \"\"\"\n magic from rllab for computing discounted cumulative sums of vectors.\n\n input: \n vector x, \n [x0, \n x1, \n x2]\n\n output:\n [x0 + discount * x1 + discount^2 * x2, \n x1 + discount * x2,\n x2]\n \"\"\"\n return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]\n\n\nclass Actor(nn.Module):\n\n def _distribution(self, obs):\n raise NotImplementedError\n\n def _log_prob_from_distribution(self, pi, act):\n raise NotImplementedError\n\n def forward(self, obs, act=None):\n # Produce action distributions for given observations, and \n # optionally compute the log likelihood of given actions under\n # those distributions.\n pi = self._distribution(obs)\n logp_a = None\n if act is not None:\n logp_a = self._log_prob_from_distribution(pi, act)\n return pi, logp_a\n\nclass MLPCovarGaussActor(Actor):\n\n def __init__(self, obs_dim, act_dim, hidden_sizes, activation):\n super().__init__()\n log_std = -0.5 * np.ones(act_dim, dtype=np.float32)\n log_covar = np.full((act_dim, act_dim), fill_value=-8, dtype=np.float32)\n np.fill_diagonal(log_covar, log_std)\n # print(\"log_covar\", log_covar)\n\n self.mu_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)\n\n log_covar = torch.as_tensor(log_covar)\n upper_log_covar = torch.triu(log_covar)\n self.log_covar = torch.nn.Parameter(torch.triu(upper_log_covar))\n\n @staticmethod\n def symmetric(X):\n return X.triu() + X.triu(1).transpose(-1, -2)\n\n def _distribution(self, obs):\n mu = self.mu_net(obs)\n # print(\"mu\", mu)\n # print(\"self.log_covar\", self.log_covar)\n sym_log_covar = MLPCovarGaussActor.symmetric(self.log_covar)\n # print(\"sym_log_covar\", sym_log_covar)\n covar = torch.exp(sym_log_covar)\n # print(\"covar\", covar)\n\n from torch.distributions.multivariate_normal import MultivariateNormal\n # print(\"MultivariateNormal(mu, covar)\", MultivariateNormal(mu, covar))\n return MultivariateNormal(mu, covar)\n\n def _log_prob_from_distribution(self, pi, act):\n # print(\"act\", act)\n # print(\"pi.log_prob(act)\", pi.log_prob(act))\n # print(\"pi.log_prob(act).sum(axis=-1)\", pi.log_prob(act).sum(axis=-1))\n return pi.log_prob(act) # .sum(axis=-1) # Last axis sum needed for Torch Normal distribution\n\nclass MLPCategoricalActor(Actor):\n \n def __init__(self, obs_dim, act_dim, hidden_sizes, activation):\n super().__init__()\n self.logits_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)\n\n def _distribution(self, obs):\n logits = self.logits_net(obs)\n print(\"logits\", logits)\n print(\"Categorical(logits=logits)\", Categorical(logits=logits))\n return Categorical(logits=logits)\n\n def _log_prob_from_distribution(self, pi, act):\n print(\"act\", act)\n print(\"pi.log_prob(act)\", pi.log_prob(act))\n return pi.log_prob(act)\n\nclass MLPGaussianActor(Actor):\n\n def __init__(self, obs_dim, act_dim, hidden_sizes, activation):\n super().__init__()\n log_std = -0.5 * np.ones(act_dim, dtype=np.float32)\n self.log_std = torch.nn.Parameter(torch.as_tensor(log_std))\n self.mu_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)\n\n def _distribution(self, obs):\n mu = self.mu_net(obs)\n std = torch.exp(self.log_std)\n # print(\"self.log_std\", self.log_std)\n return Normal(mu, std)\n\n def _log_prob_from_distribution(self, pi, act):\n return pi.log_prob(act).sum(axis=-1) # Last axis sum needed for Torch Normal distribution\n\nclass MLPCritic(nn.Module):\n\n def __init__(self, obs_dim, hidden_sizes, activation):\n super().__init__()\n self.v_net = mlp([obs_dim] + list(hidden_sizes) + [1], activation)\n\n def forward(self, obs):\n return torch.squeeze(self.v_net(obs), -1) # Critical to ensure v has right shape.\n\n\n\nclass MLPActorCritic(nn.Module):\n\n\n def __init__(self, observation_space, action_space, \n hidden_sizes=(64,64), activation=nn.Tanh):\n super().__init__()\n\n obs_dim = observation_space.shape[0]\n\n # policy builder depends on action space\n if isinstance(action_space, Box):\n print(\"using continuous actor\")\n self.pi = MLPCovarGaussActor(obs_dim, action_space.shape[0], hidden_sizes, activation)\n # self.pi = MLPGaussianActor(obs_dim, action_space.shape[0], hidden_sizes, activation)\n elif isinstance(action_space, Discrete):\n print(\"using categorical actor\")\n self.pi = MLPCategoricalActor(obs_dim, action_space.n, hidden_sizes, activation)\n\n # build value function\n self.v = MLPCritic(obs_dim, hidden_sizes, activation)\n\n def step(self, obs):\n with torch.no_grad():\n pi = self.pi._distribution(obs)\n a = pi.sample()\n logp_a = self.pi._log_prob_from_distribution(pi, a)\n v = self.v(obs)\n return a.numpy(), v.numpy(), logp_a.numpy()\n\n def act(self, obs):\n return self.step(obs)[0]" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.min", "torch.exp", "numpy.append", "torch.clamp", "numpy.zeros", "torch.as_tensor" ], [ "torch.nn.Sequential", "torch.distributions.categorical.Categorical", "torch.distributions.normal.Normal", "numpy.full", "torch.exp", "torch.nn.Linear", "numpy.ones", "torch.no_grad", "numpy.fill_diagonal", "numpy.isscalar", "numpy.prod", "torch.triu", "torch.distributions.multivariate_normal.MultivariateNormal", "torch.as_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SLAB-NLP/Akk
[ "baa07b0fdf8c7d8623fbd78508867c30a8a7ff6d" ]
[ "akkadian_bert/data_collators_bert.py" ]
[ "from typing import List, Union, Dict, Tuple\n\nimport torch\nfrom torch.nn.utils.rnn import pad_sequence\nfrom preprocessing.main_preprocess import MISSING_SIGN_CHAR\n\n\nclass DataCollatorForLanguageModelingAkkadian:\n \"\"\"\n Data collator used for language modeling.\n - collates batches of tensors, honoring their tokenizer's pad_token\n - preprocesses batches for masked language modeling\n \"\"\"\n\n def __init__(self, tokenizer, mlm_probability: float = 0.15, missing_sign=None):\n self.tokenizer = tokenizer\n self.mlm_probability: float = mlm_probability\n self.missing_sign = missing_sign if missing_sign else MISSING_SIGN_CHAR\n\n def __call__(\n self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]\n ) -> Dict[str, torch.Tensor]:\n examples_input_ids = [e[\"input_ids\"] for e in examples]\n examples_labels = [e[\"labels\"] for e in examples]\n batch_input_ids = self._tensorize_batch(examples_input_ids)\n batch_labels = self._tensorize_batch(examples_labels)\n inputs, labels = self.mask_tokens(batch_input_ids, batch_labels)\n return {\"input_ids\": inputs, \"labels\": labels}\n\n def _tensorize_batch(\n self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]\n ) -> torch.Tensor:\n # In order to accept both lists of lists and lists of Tensors\n if isinstance(examples[0], (list, tuple)):\n examples = [torch.tensor(e, dtype=torch.long) for e in examples]\n length_of_first = examples[0].size(0)\n are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)\n if are_tensors_same_length:\n return torch.stack(examples, dim=0)\n else:\n if self.tokenizer._pad_token is None:\n raise ValueError(\n \"You are attempting to pad samples but the tokenizer you are using\"\n f\" ({self.tokenizer.__class__.__name__}) does not have one.\"\n )\n return pad_sequence(examples, batch_first=True, padding_value=self.tokenizer.pad_token_id)\n\n def mask_tokens(self, inputs: torch.Tensor, labels) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.\n \"\"\"\n\n if self.tokenizer.mask_token is None:\n raise ValueError(\n \"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.\"\n )\n # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)\n probability_matrix = torch.full(labels.shape, self.mlm_probability)\n special_tokens_mask = [\n self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()\n ]\n probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)\n if self.tokenizer._pad_token is not None:\n padding_mask = labels.eq(self.tokenizer.pad_token_id)\n probability_matrix.masked_fill_(padding_mask, value=0.0)\n missing_mask = inputs.eq(self.tokenizer.get_vocab()[self.missing_sign])\n probability_matrix.masked_fill_(missing_mask, value=0.0)\n masked_indices = torch.bernoulli(probability_matrix).bool()\n labels[~masked_indices] = -100 # We only compute loss on masked tokens\n\n # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])\n indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices\n inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced\n random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)\n inputs[indices_random] = random_words[indices_random]\n\n # The rest of the time (10% of the time) we keep the masked input tokens unchanged\n return inputs, labels\n" ]
[ [ "torch.full", "torch.nn.utils.rnn.pad_sequence", "torch.tensor", "torch.bernoulli", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BangShiuh/plasmatools
[ "4e1ef0f187e1cc4b1fdf66f69fd557458cea0692" ]
[ "plasmatools/VVSystem.py" ]
[ "import numpy as np\n\nclass VVSystem:\n \"\"\"\n Calculate VV transfer rate coefficient\n \"\"\"\n def __init__(self, w_ei, x_ei, w_ej, x_ej, c, S, sigma, rm):\n \"\"\"\n param w_ei:\n The energy spacing between the vobrational energy levels for species i.\n Correspond to index v. [K]\n param w_ej:\n The energy spacing between the vobrational energy levels for species j.\n Correspond to index w. [K]\n param x_ei:\n The anharmonicity of species i. Correspond to index v. [K]\n param x_ej:\n The anharmonicity of species j. Correspond to index w. [K]\n param c:\n An adustable parameter for the short range interaction. [1/K]\n param S:\n An adustable parameter for the short range interaction.\n param sigma:\n The square root of collision cross section. [m]\n \"\"\"\n self.w_ei = w_ei\n self.x_ei = x_ei\n self.w_ej = w_ej\n self.x_ej = x_ej\n self.c = c #[K-1]\n self.S = S\n self.sigma = sigma\n self.kb = 1.381e-23\n self.rm = rm\n\n def dE(self, v, w):\n return self.w_ei * (1.0 - 2.0 * self.x_ei * v) - self.w_ej * (1.0 - 2.0 * self.x_ej * w)\n\n def lam(self, v, w, T):\n \"\"\"\n λ = 2^(-1.5) * (c/T) ^ 0.5 * |ΔE_v|\n \"\"\"\n return 2.0**(-1.5) * (self.c/T)**0.5 * np.abs(self.dE(v,w))\n\n def F(self, x):\n \"\"\"\n F(λ) = [3-exp(-2λ/3)] * exp(-2λ/3)\n \"\"\"\n return (3.0 - np.exp(-2.0 * x / 3.0)) * np.exp(-2.0 * x / 3.0)\n\n def S_vw(self, v, w, T):\n '''\n S(v->v-1, w-1->w) = 1/2 * S * T * v / (1-xe * v) * w / [1-xe * w] * F(λ),\n '''\n S_vw = 0.5 * self.S * T * v / (1 - self.x_ei * v) * w / (1 - self.x_ej * w) * self.F(self.lam(v,w,T))\n return S_vw\n\n def Z(self, T):\n return 4.0 * self.sigma * self.sigma * np.sqrt(np.pi * self.kb * T / 2.0 / self.rm)\n\n def k(self, v, w, T):\n return self.Z(T) * self.S_vw(v, w, T) * np.exp(-self.dE(v,w) / 2.0 / T)" ]
[ [ "numpy.exp", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
moritzj29/Examples
[ "63e8f1f84433eee4dec57cb7a4a9015a2239b837" ]
[ "SignalGenerators/Python/RsSmw_ScpiPackage/RsSmw_FileTransferWithProgress_Example.py" ]
[ "\"\"\"Example showing how you can transfer a big file to the instrument and from the instrument with showing the progress.\nSince the SMW is quite fast on data transfer, we slow it down by waiting for 100ms between each chunk transfer (1MB)\nThis way we see the transfer progress better and we do not need a file that is so big - let's take cca 20MB.\nFor big files, use the example without the time.sleep(0.1)\"\"\"\n\nimport time\nimport numpy as np\nfrom RsSmw import *\n\n\ndef my_transfer_handler(args):\n \"\"\"Function called each time a chunk of data is transferred\"\"\"\n total_size = args.total_size if args.total_size is not None else \"unknown\"\n print(f\"Context: '{args.context}{'with opc' if args.opc_sync else ''}', \"\n f\"chunk {args.chunk_ix}, \"\n f\"transferred {args.transferred_size} bytes, \"\n f\"total size {total_size}, \"\n f\"direction {'reading' if args.reading else 'writing'}, \"\n f\"data '{args.data}'\")\n if args.end_of_transfer:\n print('End of Transfer')\n # Slow down the transfer by 200ms to see the progress better\n time.sleep(0.1)\n\n\nRsSmw.assert_minimum_version('4.80.2')\nsmw = RsSmw('TCPIP::10.112.1.179::HISLIP')\nprint(smw.utilities.idn_string)\nsmw.utilities.reset()\n\npc_file = r'c:\\temp\\bigFile.bin'\ninstr_file = '/var/user/bigFileInstr.bin'\npc_file_back = r'c:\\temp\\bigFileBack.bin'\n\n# Generate a random file of 20MB size\nx1mb = 1024 * 1024\nwith open(pc_file, 'wb') as file:\n for x in range(20):\n file.write(np.random.bytes(x1mb))\n\n# Send the file to the instrument with events\nsmw.events.on_write_handler = my_transfer_handler\nsmw.utilities.data_chunk_size = x1mb\nprint(f'Sending file to the instrument...')\nsmw.utilities.send_file_from_pc_to_instrument(pc_file, instr_file)\nsmw.events.on_write_handler = None\nprint(f'Receiving file from the instrument...')\nsmw.events.on_read_handler = my_transfer_handler\nsmw.utilities.read_file_from_instrument_to_pc(instr_file, pc_file_back)\nsmw.events.on_read_handler = None\nsmw.close()\n" ]
[ [ "numpy.random.bytes" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HSE-LAMBDA/roerich
[ "17e178292593d1ea6a821b99705620ba066abd2a" ]
[ "roerich/viz.py" ]
[ "import numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef display(X, T, L, S, Ts, peaks=None, plot_peak_height=10, s_max=10):\n n = X.shape[1] + 1 if peaks is None else X.shape[1] + 2\n \n plt.figure(figsize=(12, n*2.5+0.25))\n \n for i in range(X.shape[1]):\n \n plt.subplot(n, 1, i+1)\n ax = X[:, i]\n plt.plot(T, ax, linewidth=2, label='Original signal', color='C0')\n for t in T[L == 1]:\n plt.plot([t]*2, [ax.min(), ax.max()], color='0', linestyle='--')\n plt.ylim(ax.min(), ax.max())\n plt.xlim(0, T.max())\n plt.xticks(size=16)\n plt.yticks(size=16)\n plt.legend(loc='upper left', fontsize=16)\n plt.tight_layout()\n \n score_plot_ix = n if peaks is None else n - 1\n plt.subplot(n, 1, score_plot_ix)\n plt.plot(Ts, S, linewidth=3, label=\"Change-point score\", color='C3')\n for t in T[L == 1]:\n plt.plot([t]*2, [-1, s_max], color='0', linestyle='--')\n \n # display find peaks #todo refactoring\n if peaks is not None:\n plt.subplot(n, 1, n)\n new_score_peaks = np.zeros(len(T))\n new_score_peaks[peaks] = plot_peak_height\n plt.plot(new_score_peaks, linewidth=3, label=\"Peaks\", color='C4')\n for t in T[L == 1]:\n plt.plot([t]*2, [-1, s_max], color='0', linestyle='--')\n \n plt.ylim(-1, s_max)\n plt.xlim(0, T.max())\n plt.xticks(size=16)\n plt.yticks(np.arange(0, s_max+1, 5), size=16)\n plt.xlabel(\"Time\", size=16)\n plt.legend(loc='upper left', fontsize=16)\n plt.tight_layout()" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.yticks", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylim", "numpy.arange", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ummavi/pfrl-1
[ "e856a7cca30fcc3871024cdf7522d066006a5f0c", "e856a7cca30fcc3871024cdf7522d066006a5f0c", "e856a7cca30fcc3871024cdf7522d066006a5f0c", "e856a7cca30fcc3871024cdf7522d066006a5f0c", "e856a7cca30fcc3871024cdf7522d066006a5f0c" ]
[ "tests/agents_tests/basetest_training.py", "pfrl/agents/dpp.py", "pfrl/collections/prioritized.py", "pfrl/nn/concat_obs_and_action.py", "tests/agents_tests/test_double_categorical_dqn.py" ]
[ "import logging\nimport os\nimport tempfile\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\n\nimport pfrl\nfrom pfrl.experiments import (\n train_agent_async,\n train_agent_batch_with_evaluation,\n train_agent_with_evaluation,\n)\nfrom pfrl.experiments.evaluator import (\n batch_run_evaluation_episodes,\n run_evaluation_episodes,\n)\nfrom pfrl.utils import random_seed\n\n\nclass _TestTraining:\n @pytest.fixture(autouse=True)\n def set_tmp_paths(self):\n self.tmpdir = tempfile.mkdtemp()\n self.agent_dirname = os.path.join(self.tmpdir, \"agent_final\")\n self.rbuf_filename = os.path.join(self.tmpdir, \"rbuf.pkl\")\n\n def make_agent(self, env, gpu):\n raise NotImplementedError()\n\n def make_env_and_successful_return(self, test):\n raise NotImplementedError()\n\n def _test_training(self, gpu, steps=5000, load_model=False, require_success=True):\n\n random_seed.set_random_seed(1)\n logging.basicConfig(level=logging.DEBUG)\n\n env = self.make_env_and_successful_return(test=False)[0]\n test_env, successful_return = self.make_env_and_successful_return(test=True)\n agent = self.make_agent(env, gpu)\n\n if load_model:\n print(\"Load agent from\", self.agent_dirname)\n agent.load(self.agent_dirname)\n agent.replay_buffer.load(self.rbuf_filename)\n\n # Train\n train_agent_with_evaluation(\n agent=agent,\n env=env,\n steps=steps,\n outdir=self.tmpdir,\n eval_interval=200,\n eval_n_steps=None,\n eval_n_episodes=5,\n successful_score=1,\n eval_env=test_env,\n )\n\n # Test\n n_test_runs = 5\n eval_returns, _ = run_evaluation_episodes(\n test_env,\n agent,\n n_steps=None,\n n_episodes=n_test_runs,\n )\n n_succeeded = np.sum(np.asarray(eval_returns) >= successful_return)\n if require_success:\n assert n_succeeded == n_test_runs\n\n # Save\n agent.save(self.agent_dirname)\n agent.replay_buffer.save(self.rbuf_filename)\n\n @pytest.mark.slow\n @pytest.mark.gpu\n def test_training_gpu(self):\n self._test_training(0, steps=100000)\n self._test_training(0, steps=0, load_model=True)\n\n @pytest.mark.slow\n def test_training_cpu(self):\n self._test_training(-1, steps=100000)\n self._test_training(-1, steps=0, load_model=True)\n\n @pytest.mark.gpu\n def test_training_gpu_fast(self):\n self._test_training(0, steps=10, require_success=False)\n self._test_training(0, steps=0, load_model=True, require_success=False)\n\n def test_training_cpu_fast(self):\n self._test_training(-1, steps=10, require_success=False)\n self._test_training(-1, steps=0, load_model=True, require_success=False)\n\n\nclass _TestBatchTrainingMixin(object):\n \"\"\"Mixin for testing batch training.\n\n Inherit this after _TestTraining to enable test cases for batch training.\n \"\"\"\n\n def make_vec_env_and_successful_return(self, test, num_envs=2):\n successful_return = self.make_env_and_successful_return(test=test)[1]\n vec_env = pfrl.envs.SerialVectorEnv(\n [self.make_env_and_successful_return(test=test)[0] for _ in range(num_envs)]\n )\n return vec_env, successful_return\n\n def _test_batch_training(\n self, gpu, steps=5000, load_model=False, require_success=True\n ):\n\n random_seed.set_random_seed(1)\n logging.basicConfig(level=logging.DEBUG)\n\n env, _ = self.make_vec_env_and_successful_return(test=False)\n test_env, successful_return = self.make_vec_env_and_successful_return(test=True)\n agent = self.make_agent(env, gpu)\n\n if load_model:\n print(\"Load agent from\", self.agent_dirname)\n agent.load(self.agent_dirname)\n agent.replay_buffer.load(self.rbuf_filename)\n\n # Train\n train_agent_batch_with_evaluation(\n agent=agent,\n env=env,\n steps=steps,\n outdir=self.tmpdir,\n eval_interval=200,\n eval_n_steps=None,\n eval_n_episodes=5,\n successful_score=1,\n eval_env=test_env,\n )\n env.close()\n\n # Test\n n_test_runs = 5\n eval_returns, _ = batch_run_evaluation_episodes(\n test_env,\n agent,\n n_steps=None,\n n_episodes=n_test_runs,\n )\n test_env.close()\n n_succeeded = np.sum(np.asarray(eval_returns) >= successful_return)\n if require_success:\n assert n_succeeded == n_test_runs\n\n # Save\n agent.save(self.agent_dirname)\n agent.replay_buffer.save(self.rbuf_filename)\n\n @pytest.mark.slow\n @pytest.mark.gpu\n def test_batch_training_gpu(self):\n self._test_batch_training(0, steps=100000)\n self._test_batch_training(0, steps=0, load_model=True)\n\n @pytest.mark.slow\n def test_batch_training_cpu(self):\n self._test_batch_training(-1, steps=100000)\n self._test_batch_training(-1, steps=0, load_model=True)\n\n @pytest.mark.gpu\n def test_batch_training_gpu_fast(self):\n self._test_batch_training(0, steps=10, require_success=False)\n self._test_batch_training(0, steps=0, load_model=True, require_success=False)\n\n def test_batch_training_cpu_fast(self):\n self._test_batch_training(-1, steps=10, require_success=False)\n self._test_batch_training(-1, steps=0, load_model=True, require_success=False)\n\n\nclass _TestActorLearnerTrainingMixin(object):\n \"\"\"Mixin for testing actor-learner training.\n Inherit this after _TestTraining to enable test cases for batch training.\n \"\"\"\n\n def _test_actor_learner_training(self, gpu, steps=100000, require_success=True):\n\n logging.basicConfig(level=logging.DEBUG)\n\n test_env, successful_return = self.make_env_and_successful_return(test=True)\n agent = self.make_agent(test_env, gpu)\n\n # cumulative_steps init to 0\n assert agent.cumulative_steps == 0\n\n def make_env(process_idx, test):\n env, _ = self.make_env_and_successful_return(test=test)\n return env\n\n step_hook = mock.Mock()\n optimizer_step_hook = mock.Mock()\n\n # Train\n if steps > 0:\n (\n make_actor,\n learner,\n poller,\n exception_event,\n ) = agent.setup_actor_learner_training(\n n_actors=2,\n step_hooks=[step_hook],\n optimizer_step_hooks=[optimizer_step_hook],\n )\n\n poller.start()\n learner.start()\n train_agent_async(\n processes=2,\n steps=steps,\n outdir=self.tmpdir,\n eval_interval=200,\n eval_n_steps=None,\n eval_n_episodes=5,\n successful_score=successful_return,\n make_env=make_env,\n make_agent=make_actor,\n stop_event=learner.stop_event,\n exception_event=exception_event,\n )\n learner.stop()\n learner.join()\n poller.stop()\n poller.join()\n\n # Test\n\n # Because in actor-learner traininig the model can be updated between\n # evaluation and saving, it is difficult to guarantee the learned\n # model successfully passes the test.\n # Thus we only check if the training was successful.\n\n # As the test can finish before running all the steps,\n # we can only test the range\n assert agent.cumulative_steps > 0\n assert agent.cumulative_steps <= steps + 1\n\n # Unlike the non-actor-learner cases, the step_hooks and\n # optimizer_step_hooks are only called when the update happens\n # when we do a fast test, the update may not be triggered due to\n # limited amount of experience, the call_count can be 0 in such case\n assert step_hook.call_count >= 0\n assert step_hook.call_count <= steps / agent.update_interval\n assert optimizer_step_hook.call_count == step_hook.call_count\n\n for i, call in enumerate(step_hook.call_args_list):\n args, kwargs = call\n assert args[0] is None\n assert args[1] is agent\n assert args[2] == (i + 1) * agent.update_interval\n\n for i, call in enumerate(optimizer_step_hook.call_args_list):\n args, kwargs = call\n assert args[0] is None\n assert args[1] is agent\n assert args[2] == i + 1\n\n successful_path = os.path.join(self.tmpdir, \"successful\")\n finished_path = os.path.join(self.tmpdir, \"{}_finish\".format(steps))\n if require_success:\n assert os.path.exists(successful_path)\n else:\n assert os.path.exists(successful_path) or os.path.exists(finished_path)\n\n @pytest.mark.async_\n @pytest.mark.slow\n @pytest.mark.gpu\n def test_actor_learner_training_gpu(self):\n self._test_actor_learner_training(0, steps=100000)\n\n @pytest.mark.async_\n @pytest.mark.slow\n def test_actor_learner_training_cpu(self):\n self._test_actor_learner_training(-1, steps=100000)\n\n @pytest.mark.async_\n @pytest.mark.gpu\n def test_actor_learner_training_gpu_fast(self):\n self._test_actor_learner_training(0, steps=10, require_success=False)\n\n @pytest.mark.async_\n def test_actor_learner_training_cpu_fast(self):\n self._test_actor_learner_training(-1, steps=10, require_success=False)\n", "from abc import ABCMeta, abstractmethod\n\nimport torch\n\nfrom pfrl.agents.dqn import DQN\nfrom pfrl.utils.recurrent import pack_and_forward\n\n\nclass AbstractDPP(DQN, metaclass=ABCMeta):\n \"\"\"Dynamic Policy Programming.\n\n See: https://arxiv.org/abs/1004.2027.\n \"\"\"\n\n @abstractmethod\n def _l_operator(self, qout):\n raise NotImplementedError()\n\n def _compute_target_values(self, exp_batch):\n\n batch_next_state = exp_batch[\"next_state\"]\n\n if self.recurrent:\n target_next_qout, _ = pack_and_forward(\n self.target_model,\n batch_next_state,\n exp_batch[\"next_recurrent_state\"],\n )\n else:\n target_next_qout = self.target_model(batch_next_state)\n next_q_expect = self._l_operator(target_next_qout)\n\n batch_rewards = exp_batch[\"reward\"]\n batch_terminal = exp_batch[\"is_state_terminal\"]\n\n return (\n batch_rewards + exp_batch[\"discount\"] * (1 - batch_terminal) * next_q_expect\n )\n\n def _compute_y_and_t(self, exp_batch):\n\n batch_state = exp_batch[\"state\"]\n batch_size = len(exp_batch[\"reward\"])\n\n if self.recurrent:\n qout, _ = pack_and_forward(\n self.model,\n batch_state,\n exp_batch[\"recurrent_state\"],\n )\n else:\n qout = self.model(batch_state)\n\n batch_actions = exp_batch[\"action\"]\n # Q(s_t,a_t)\n batch_q = qout.evaluate_actions(batch_actions).reshape((batch_size, 1))\n\n with torch.no_grad():\n # Compute target values\n if self.recurrent:\n target_qout, _ = pack_and_forward(\n self.target_model,\n batch_state,\n exp_batch[\"recurrent_state\"],\n )\n else:\n target_qout = self.target_model(batch_state)\n\n # Q'(s_t,a_t)\n target_q = target_qout.evaluate_actions(batch_actions).reshape(\n (batch_size, 1)\n )\n\n # LQ'(s_t,a)\n target_q_expect = self._l_operator(target_qout).reshape((batch_size, 1))\n\n # r + g * LQ'(s_{t+1},a)\n batch_q_target = self._compute_target_values(exp_batch).reshape(\n (batch_size, 1)\n )\n\n # Q'(s_t,a_t) + r + g * LQ'(s_{t+1},a) - LQ'(s_t,a)\n t = target_q + batch_q_target - target_q_expect\n\n return batch_q, t\n\n\nclass DPP(AbstractDPP):\n \"\"\"Dynamic Policy Programming with softmax operator.\n\n Args:\n eta (float): Positive constant.\n\n For other arguments, see DQN.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.eta = kwargs.pop(\"eta\", 1.0)\n super().__init__(*args, **kwargs)\n\n def _l_operator(self, qout):\n return qout.compute_expectation(self.eta)\n\n\nclass DPPL(AbstractDPP):\n \"\"\"Dynamic Policy Programming with L operator.\n\n Args:\n eta (float): Positive constant.\n\n For other arguments, see DQN.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.eta = kwargs.pop(\"eta\", 1.0)\n super().__init__(*args, **kwargs)\n\n def _l_operator(self, qout):\n return torch.logsumexp(self.eta * qout.q_values, dim=1) / self.eta\n\n\nclass DPPGreedy(AbstractDPP):\n \"\"\"Dynamic Policy Programming with max operator.\n\n This algorithm corresponds to DPP with eta = infinity.\n \"\"\"\n\n def _l_operator(self, qout):\n return qout.max\n", "import collections\nfrom numbers import Number\nfrom typing import (\n Any,\n Callable,\n Deque,\n Generic,\n List,\n Optional,\n Sequence,\n Tuple,\n TypeVar,\n)\n\nimport numpy as np\n\nfrom pfrl.utils.random import sample_n_k\n\nT = TypeVar(\"T\")\n\n\nclass PrioritizedBuffer(Generic[T]):\n def __init__(\n self,\n capacity: Optional[int] = None,\n wait_priority_after_sampling: bool = True,\n initial_max_priority: float = 1.0,\n ):\n self.capacity = capacity\n self.data: Deque = collections.deque()\n self.priority_sums = SumTreeQueue()\n self.priority_mins = MinTreeQueue()\n self.max_priority = initial_max_priority\n self.wait_priority_after_sampling = wait_priority_after_sampling\n self.flag_wait_priority = False\n\n def __len__(self) -> int:\n return len(self.data)\n\n def append(self, value: T, priority: Optional[float] = None) -> None:\n if self.capacity is not None and len(self) == self.capacity:\n self.popleft()\n if priority is None:\n # Append with the highest priority\n priority = self.max_priority\n\n self.data.append(value)\n self.priority_sums.append(priority)\n self.priority_mins.append(priority)\n\n def popleft(self) -> T:\n assert len(self) > 0\n self.priority_sums.popleft()\n self.priority_mins.popleft()\n return self.data.popleft()\n\n def _sample_indices_and_probabilities(\n self, n: int, uniform_ratio: float\n ) -> Tuple[List[int], List[float], float]:\n total_priority: float = self.priority_sums.sum()\n min_prob = self.priority_mins.min() / total_priority\n indices = []\n priorities = []\n if uniform_ratio > 0:\n # Mix uniform samples and prioritized samples\n n_uniform = np.random.binomial(n, uniform_ratio)\n un_indices, un_priorities = self.priority_sums.uniform_sample(\n n_uniform, remove=self.wait_priority_after_sampling\n )\n indices.extend(un_indices)\n priorities.extend(un_priorities)\n n -= n_uniform\n min_prob = uniform_ratio / len(self) + (1 - uniform_ratio) * min_prob\n\n pr_indices, pr_priorities = self.priority_sums.prioritized_sample(\n n, remove=self.wait_priority_after_sampling\n )\n indices.extend(pr_indices)\n priorities.extend(pr_priorities)\n\n probs = [\n uniform_ratio / len(self) + (1 - uniform_ratio) * pri / total_priority\n for pri in priorities\n ]\n return indices, probs, min_prob\n\n def sample(\n self, n: int, uniform_ratio: float = 0\n ) -> Tuple[List[T], List[float], float]:\n \"\"\"Sample data along with their corresponding probabilities.\n\n Args:\n n (int): Number of data to sample.\n uniform_ratio (float): Ratio of uniformly sampled data.\n Returns:\n sampled data (list)\n probabitilies (list)\n \"\"\"\n assert not self.wait_priority_after_sampling or not self.flag_wait_priority\n indices, probabilities, min_prob = self._sample_indices_and_probabilities(\n n, uniform_ratio=uniform_ratio\n )\n sampled = [self.data[i] for i in indices]\n self.sampled_indices = indices\n self.flag_wait_priority = True\n return sampled, probabilities, min_prob\n\n def set_last_priority(self, priority: Sequence[float]) -> None:\n assert not self.wait_priority_after_sampling or self.flag_wait_priority\n assert all([p > 0.0 for p in priority])\n assert len(self.sampled_indices) == len(priority)\n for i, p in zip(self.sampled_indices, priority):\n self.priority_sums[i] = p\n self.priority_mins[i] = p\n self.max_priority = max(self.max_priority, p)\n self.flag_wait_priority = False\n self.sampled_indices = []\n\n def _uniform_sample_indices_and_probabilities(\n self, n: int\n ) -> Tuple[List[int], List[float]]:\n indices = list(sample_n_k(len(self.data), n))\n probabilities = [1 / len(self)] * len(indices)\n return indices, probabilities\n\n\n# Implement operations on nodes of SumTreeQueue\n\n\n# node = left_child, right_child, value\nNode = List[Any]\nV = TypeVar(\"V\")\nAggregator = Callable[[Sequence[V]], V]\n\n\ndef _expand(node: Node) -> None:\n if not node:\n node[:] = [], [], None\n\n\ndef _reduce(node: Node, op: Aggregator) -> None:\n assert node\n left_node, right_node, _ = node\n parent_value = []\n if left_node:\n parent_value.append(left_node[2])\n if right_node:\n parent_value.append(right_node[2])\n if parent_value:\n node[2] = op(parent_value)\n else:\n del node[:]\n\n\ndef _write(\n index_left: int,\n index_right: int,\n node: Node,\n key: int,\n value: Optional[V],\n op: Aggregator,\n) -> Optional[V]:\n if index_right - index_left == 1:\n if node:\n ret = node[2]\n else:\n ret = None\n if value is None:\n del node[:]\n else:\n node[:] = None, None, value\n else:\n _expand(node)\n node_left, node_right, _ = node\n index_center = (index_left + index_right) // 2\n if key < index_center:\n ret = _write(index_left, index_center, node_left, key, value, op)\n else:\n ret = _write(index_center, index_right, node_right, key, value, op)\n _reduce(node, op)\n return ret\n\n\nclass TreeQueue(Generic[V]):\n \"\"\"Queue with Binary Indexed Tree cache\n\n queue-like data structure\n append, update are O(log n)\n reduction over an interval is O(log n) per query\n \"\"\"\n\n root: Node\n bounds: Tuple[int, int]\n\n def __init__(self, op: Aggregator):\n self.length = 0\n self.op = op\n\n def __setitem__(self, ix: int, val: V) -> None:\n assert 0 <= ix < self.length\n assert val is not None\n self._write(ix, val)\n\n def _write(self, ix: int, val: Optional[V]) -> Optional[V]:\n ixl, ixr = self.bounds\n return _write(ixl, ixr, self.root, ix, val, self.op)\n\n def append(self, value: V) -> None:\n if self.length == 0:\n self.root = [None, None, value]\n self.bounds = 0, 1\n self.length = 1\n return\n\n ixl, ixr = self.bounds\n root = self.root\n if ixr == self.length:\n _, _, root_value = root\n self.root = [self.root, [], root_value]\n ixr += ixr - ixl\n self.bounds = ixl, ixr\n ret = self._write(self.length, value)\n assert ret is None\n self.length += 1\n\n def popleft(self) -> Optional[V]:\n assert self.length > 0\n ret = self._write(0, None)\n ixl, ixr = self.bounds\n ixl -= 1\n ixr -= 1\n self.length -= 1\n if self.length == 0:\n del self.root\n del self.bounds\n return ret\n\n ixc = (ixl + ixr) // 2\n if ixc == 0:\n ixl = ixc\n _, self.root, _ = self.root\n self.bounds = ixl, ixr\n return ret\n\n\ndef _find(index_left: int, index_right: int, node: Node, pos: Number) -> int:\n if index_right - index_left == 1:\n return index_left\n else:\n node_left, node_right, _ = node\n index_center = (index_left + index_right) // 2\n if node_left:\n left_value = node_left[2]\n else:\n left_value = 0.0\n if pos < left_value:\n return _find(index_left, index_center, node_left, pos)\n else:\n return _find(index_center, index_right, node_right, pos - left_value)\n\n\nclass SumTreeQueue(TreeQueue[float]):\n \"\"\"Fast weighted sampling.\n\n queue-like data structure\n append, update are O(log n)\n summation over an interval is O(log n) per query\n \"\"\"\n\n def __init__(self):\n super().__init__(op=sum)\n\n def sum(self) -> float:\n if self.length == 0:\n return 0.0\n else:\n return self.root[2]\n\n def uniform_sample(self, n: int, remove: bool) -> Tuple[List[int], List[float]]:\n assert n >= 0\n ixs = list(sample_n_k(self.length, n))\n vals: List[float] = []\n if n > 0:\n for ix in ixs:\n val = self._write(ix, 0.0)\n assert val is not None\n vals.append(val)\n\n if not remove:\n for ix, val in zip(ixs, vals):\n self._write(ix, val)\n\n return ixs, vals\n\n def prioritized_sample(self, n: int, remove: bool) -> Tuple[List[int], List[float]]:\n assert n >= 0\n ixs: List[int] = []\n vals: List[float] = []\n if n > 0:\n root = self.root\n ixl, ixr = self.bounds\n for _ in range(n):\n ix = _find(ixl, ixr, root, np.random.uniform(0.0, root[2]))\n val = self._write(ix, 0.0)\n assert val is not None\n ixs.append(ix)\n vals.append(val)\n\n if not remove:\n for ix, val in zip(ixs, vals):\n self._write(ix, val)\n\n return ixs, vals\n\n\nclass MinTreeQueue(TreeQueue[float]):\n def __init__(self):\n super().__init__(op=min)\n\n def min(self) -> float:\n if self.length == 0:\n return np.inf\n else:\n return self.root[2]\n", "import torch\n\nfrom pfrl.nn.lmbda import Lambda\n\n\ndef concat_obs_and_action(obs_and_action):\n \"\"\"Concat observation and action to feed the critic.\"\"\"\n assert len(obs_and_action) == 2\n return torch.cat(obs_and_action, dim=-1)\n\n\nclass ConcatObsAndAction(Lambda):\n def __init__(self):\n return super().__init__(concat_obs_and_action)\n", "import basetest_dqn_like as base\nimport torch.nn as nn\nfrom basetest_training import _TestBatchTrainingMixin\n\nimport pfrl\nfrom pfrl.agents import CategoricalDoubleDQN\n\n\ndef make_distrib_ff_q_func(env):\n n_atoms = 51\n v_max = 10\n v_min = -10\n return pfrl.q_functions.DistributionalFCStateQFunctionWithDiscreteAction( # NOQA\n env.observation_space.low.size,\n env.action_space.n,\n n_atoms=n_atoms,\n v_min=v_min,\n v_max=v_max,\n n_hidden_channels=20,\n n_hidden_layers=1,\n )\n\n\ndef make_distrib_recurrent_q_func(env):\n n_atoms = 51\n v_max = 10\n v_min = -10\n return pfrl.nn.RecurrentSequential(\n nn.LSTM(input_size=env.observation_space.low.size, hidden_size=20),\n pfrl.q_functions.DistributionalFCStateQFunctionWithDiscreteAction( # NOQA\n 20,\n env.action_space.n,\n n_atoms=n_atoms,\n v_min=v_min,\n v_max=v_max,\n n_hidden_channels=None,\n n_hidden_layers=0,\n ),\n )\n\n\nclass TestCategoricalDoubleDQNOnDiscreteABC(\n _TestBatchTrainingMixin, base._TestDQNOnDiscreteABC\n):\n def make_q_func(self, env):\n return make_distrib_ff_q_func(env)\n\n def make_dqn_agent(self, env, q_func, opt, explorer, rbuf, gpu):\n return CategoricalDoubleDQN(\n q_func,\n opt,\n rbuf,\n gpu=gpu,\n gamma=0.9,\n explorer=explorer,\n replay_start_size=100,\n target_update_interval=100,\n )\n\n\n# Continuous action spaces are not supported\nclass TestCategoricalDoubleDQNOnDiscretePOABC(base._TestDQNOnDiscretePOABC):\n def make_q_func(self, env):\n return make_distrib_recurrent_q_func(env)\n\n def make_dqn_agent(self, env, q_func, opt, explorer, rbuf, gpu):\n return CategoricalDoubleDQN(\n q_func,\n opt,\n rbuf,\n gpu=gpu,\n gamma=0.9,\n explorer=explorer,\n replay_start_size=100,\n target_update_interval=100,\n recurrent=True,\n )\n" ]
[ [ "numpy.asarray" ], [ "torch.logsumexp", "torch.no_grad" ], [ "numpy.random.binomial", "numpy.random.uniform" ], [ "torch.cat" ], [ "torch.nn.LSTM" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
allenai/robustnav
[ "137a4f4c185f1d6f587874c91abb1dafb6de08ad" ]
[ "allenact/base_abstractions/sensor.py" ]
[ "# Original work Copyright (c) Facebook, Inc. and its affiliates.\n# Modified work Copyright (c) Allen Institute for AI\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom abc import abstractmethod, ABC\nfrom collections import OrderedDict\nfrom typing import (\n Generic,\n Dict,\n Any,\n Optional,\n TYPE_CHECKING,\n TypeVar,\n Sequence,\n cast,\n Tuple,\n Union,\n)\n\nimport PIL\nimport gym\nimport numpy as np\nfrom gym import spaces as gyms\nfrom torch.distributions.utils import lazy_property\nfrom torchvision import transforms\n\nimport copy\nimport allenact.base_abstractions.rgb_sensor_degradations as degradations\n\nfrom allenact.base_abstractions.misc import EnvType\nfrom allenact.utils import spaces_utils as su\nfrom allenact.utils.misc_utils import prepare_locals_for_super\nfrom allenact.utils.system import get_logger\nfrom allenact.utils.tensor_utils import ScaleBothSides\n\nif TYPE_CHECKING:\n from allenact.base_abstractions.task import SubTaskType\nelse:\n SubTaskType = TypeVar(\"SubTaskType\", bound=\"Task\")\n\nSpaceDict = gyms.Dict\n\n\nclass Sensor(Generic[EnvType, SubTaskType]):\n \"\"\"Represents a sensor that provides data from the environment to agent.\n The user of this class needs to implement the get_observation method and\n the user is also required to set the below attributes:\n\n # Attributes\n\n uuid : universally unique id.\n observation_space : ``gym.Space`` object corresponding to observation of\n sensor.\n \"\"\"\n\n uuid: str\n observation_space: gym.Space\n\n def __init__(self, uuid: str, observation_space: gym.Space, **kwargs: Any) -> None:\n self.uuid = uuid\n self.observation_space = observation_space\n\n def get_observation(\n self, env: EnvType, task: Optional[SubTaskType], *args: Any, **kwargs: Any\n ) -> Any:\n \"\"\"Returns observations from the environment (or task).\n\n # Parameters\n\n env : The environment the sensor is used upon.\n task : (Optionally) a Task from which the sensor should get data.\n\n # Returns\n\n Current observation for Sensor.\n \"\"\"\n raise NotImplementedError()\n\n\nclass SensorSuite(Generic[EnvType]):\n \"\"\"Represents a set of sensors, with each sensor being identified through a\n unique id.\n\n # Attributes\n\n sensors: list containing sensors for the environment, uuid of each\n sensor must be unique.\n \"\"\"\n\n sensors: Dict[str, Sensor[EnvType, Any]]\n observation_spaces: gyms.Dict\n\n def __init__(self, sensors: Sequence[Sensor]) -> None:\n \"\"\"Initializer.\n\n # Parameters\n\n param sensors: the sensors that will be included in the suite.\n \"\"\"\n self.sensors = OrderedDict()\n spaces: OrderedDict[str, gym.Space] = OrderedDict()\n for sensor in sensors:\n assert (\n sensor.uuid not in self.sensors\n ), \"'{}' is duplicated sensor uuid\".format(sensor.uuid)\n self.sensors[sensor.uuid] = sensor\n spaces[sensor.uuid] = sensor.observation_space\n self.observation_spaces = SpaceDict(spaces=spaces)\n\n def get(self, uuid: str) -> Sensor:\n \"\"\"Return sensor with the given `uuid`.\n\n # Parameters\n\n uuid : The unique id of the sensor\n\n # Returns\n\n The sensor with unique id `uuid`.\n \"\"\"\n return self.sensors[uuid]\n\n def get_observations(\n self, env: EnvType, task: Optional[SubTaskType], **kwargs: Any\n ) -> Dict[str, Any]:\n \"\"\"Get all observations corresponding to the sensors in the suite.\n\n # Parameters\n\n env : The environment from which to get the observation.\n task : (Optionally) the task from which to get the observation.\n\n # Returns\n\n Data from all sensors packaged inside a Dict.\n \"\"\"\n return {\n uuid: sensor.get_observation(env=env, task=task, **kwargs) # type: ignore\n for uuid, sensor in self.sensors.items()\n }\n\n\nclass ExpertActionSensor(Sensor[EnvType, SubTaskType]):\n \"\"\"A sensor that obtains the expert action for a given task (if\n available).\"\"\"\n\n def __init__(\n self,\n action_space: Optional[Union[gym.Space, int]] = None,\n uuid: str = \"expert_action\",\n expert_args: Optional[Dict[str, Any]] = None,\n nactions: Optional[int] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"Initialize an `ExpertActionSensor`.\n\n # Parameters\n action_space : The action space of the agent, this is necessary in order for this sensor\n to know what its output observation space is.\n uuid : A string specifying the unique ID of this sensor.\n expert_args : This sensor obtains an expert action from the task by talling the `query_expert`\n method of the task. `expert_args` are any keyword arguments that should be passed to the\n `query_expert` method when called.\n nactions : [DEPRECATED] The number of actions available to the agent, corresponds to an `action_space`\n of `gym.spaces.Discrete(nactions)`.\n \"\"\"\n if isinstance(action_space, int):\n action_space = gym.spaces.Discrete(action_space)\n elif action_space is None:\n assert (\n nactions is not None\n ), \"One of `action_space` or `nactions` must be not `None`.\"\n get_logger().warning(\n \"The `nactions` parameter to `ExpertActionSensor` is deprecated and will be removed, please use\"\n \" the `action_space` parameter instead.\"\n )\n action_space = gym.spaces.Discrete(nactions)\n self.action_space = action_space\n self.expert_args: Dict[str, Any] = expert_args or {}\n\n self.unflattened_observation_space = gym.spaces.Tuple(\n (self.action_space, gym.spaces.Discrete(2))\n )\n\n observation_space = self._get_observation_space()\n\n super().__init__(**prepare_locals_for_super(locals()))\n\n def _get_observation_space(self) -> gym.spaces.Box:\n \"\"\"The observation space of the expert action sensor.\n\n Will equal `gym.spaces.Tuple(gym.spaces.Discrete(num actions in\n task), gym.spaces.Discrete(2))` where the first entry of the\n tuple is the expert action index and the second equals 0 if and\n only if the expert failed to generate a true expert action. The\n value `num actions in task` should be in `config[\"nactions\"]`\n \"\"\"\n return su.flatten_space(self.unflattened_observation_space)\n\n @lazy_property\n def _zeroed_observation(self):\n return np.zeros_like(self.observation_space.sample())\n\n def get_observation(\n self, env: EnvType, task: SubTaskType, *args: Any, **kwargs: Any\n ) -> Any:\n # If the task is completed, we needn't (perhaps can't) find the expert\n # action from the (current) terminal state.\n if task.is_done():\n return self._zeroed_observation\n\n action, expert_was_successful = task.query_expert(**self.expert_args)\n\n if isinstance(action, int):\n assert isinstance(self.action_space, gym.spaces.Discrete)\n unflattened_action = action\n else:\n # Assume we receive a gym-flattened numpy action\n unflattened_action = gyms.unflatten(self.action_space, action)\n\n unflattened_torch = su.torch_point(\n self.unflattened_observation_space,\n (unflattened_action, expert_was_successful),\n )\n\n flattened_torch = su.flatten(\n self.unflattened_observation_space, unflattened_torch\n )\n return flattened_torch.cpu().numpy()\n\n\nclass ExpertPolicySensor(Sensor[EnvType, SubTaskType]):\n def __init__(\n self,\n nactions: int,\n uuid: str = \"expert_policy\",\n expert_args: Optional[Dict[str, Any]] = None,\n **kwargs: Any\n ) -> None:\n self.nactions = nactions\n self.expert_args: Dict[str, Any] = expert_args or {}\n\n super().__init__(**prepare_locals_for_super(locals()))\n\n def _get_observation_space(self) -> gym.spaces.Tuple:\n \"\"\"The observation space of the expert action sensor.\n\n Will equal `gym.spaces.Tuple(gym.spaces.Box(num actions in\n task), gym.spaces.Discrete(2))` where the first entry of the\n tuple is the expert policy and the second equals 0 if and only\n if the expert failed to generate a true expert action. The value\n `num actions in task` should be in `config[\"nactions\"]`\n \"\"\"\n return gym.spaces.Tuple(\n (\n gym.spaces.Box(\n low=np.float32(0.0), high=np.float32(1.0), shape=(self.nactions,),\n ),\n gym.spaces.Discrete(2),\n )\n )\n\n def get_observation(\n self, env: EnvType, task: SubTaskType, *args: Any, **kwargs: Any\n ) -> Any:\n policy, expert_was_successful = task.query_expert(**self.expert_args)\n assert isinstance(policy, np.ndarray) and policy.shape == (self.nactions,), (\n \"In expert action sensor, `task.query_expert()` \"\n \"did not return a valid numpy array.\"\n )\n return np.array(\n np.concatenate((policy, [expert_was_successful]), axis=-1), dtype=np.float32\n )\n\n\nclass RotationSensor(Sensor[EnvType, SubTaskType]):\n def __init__(self, uuid: str = \"rot_label\", **kwargs: Any):\n observation_space = self._get_observation_space()\n super().__init__(**prepare_locals_for_super(locals()))\n\n def _get_observation_space(self) -> gym.spaces.Discrete:\n return gym.spaces.Discrete(4)\n\n def get_observation(\n self, env: EnvType, task: SubTaskType, *args: Any, **kwargs: Any\n ) -> Any:\n return 0\n\n\nclass VisionSensor(Sensor[EnvType, SubTaskType]):\n def __init__(\n self,\n mean: Optional[np.ndarray] = None,\n stdev: Optional[np.ndarray] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n uuid: str = \"vision\",\n output_shape: Optional[Tuple[int, ...]] = None,\n output_channels: Optional[int] = None,\n unnormalized_infimum: float = -np.inf,\n unnormalized_supremum: float = np.inf,\n scale_first: bool = True,\n **kwargs: Any\n ):\n \"\"\"Initializer.\n\n # Parameters\n\n config : The images will be normalized\n with means `config[\"mean\"]` and standard deviations `config[\"stdev\"]`. If both `config[\"height\"]` and\n `config[\"width\"]` are non-negative integers then\n the image returned from the environment will be rescaled to have\n `config[\"height\"]` rows and `config[\"width\"]` columns using bilinear sampling. The universally unique\n identifier will be set as `config[\"uuid\"]`.\n args : Extra args. Currently unused.\n kwargs : Extra kwargs. Currently unused.\n \"\"\"\n\n print(\"Sensor UUID is \", uuid)\n\n def f(x, k, default):\n return x[k] if k in x else default\n\n self._random_crop: Optional[bool] = f(kwargs, \"random_crop\", False)\n self._crop_height: Optional[int] = f(kwargs, \"crop_height\", None)\n self._crop_width: Optional[int] = f(kwargs, \"crop_width\", None)\n self._jitter: Optional[bool] = f(kwargs, \"color_jitter\", False)\n self._tshift: Optional[bool] = f(kwargs, \"random_translate\", False)\n self._daug_mode: Optional[bool] = f(kwargs, \"data_augmentation_mode\", False)\n\n # Parse visual corruption details\n # Provided inputs are\n # - a list of corruptions\n # - a list of severties\n self._corruptions = f(kwargs, \"corruptions\", None)\n self._severities = f(kwargs, \"severities\", None)\n\n print(\"Applied corruptions are \")\n print(self._corruptions)\n print(self._severities)\n\n print(\"Random Crop state \", self._random_crop)\n print(\"Color Jitter state \", self._jitter)\n print(\"Random Translate \", self._tshift)\n\n # Whether to rotate the input observation or not\n # self._sep_rotate: bool = f(kwargs, \"sep_rotate\", False)\n self._rotate: bool = f(kwargs, \"rotate\", False)\n\n self._norm_means = mean\n self._norm_sds = stdev\n assert (self._norm_means is None) == (self._norm_sds is None), (\n \"In VisionSensor's config, \"\n \"either both mean/stdev must be None or neither.\"\n )\n self._should_normalize = self._norm_means is not None\n\n self._height = height\n self._width = width\n assert (self._width is None) == (self._height is None), (\n \"In VisionSensor's config, \"\n \"either both height/width must be None or neither.\"\n )\n\n self._scale_first = scale_first\n\n self.scaler: Optional[ScaleBothSides] = None\n if self._width is not None:\n self.scaler = ScaleBothSides(\n width=cast(int, self._width), height=cast(int, self._height)\n )\n\n # Data augmentation options\n self._random_cropper = (\n None\n if not self._random_crop\n else transforms.RandomCrop((self._crop_height, self._crop_width))\n )\n\n self._color_jitter = (\n None if not self._jitter else transforms.ColorJitter(0.4, 0.4, 0.4, 0.4)\n )\n\n self._random_translate = (\n None\n if not self._tshift\n else transforms.RandomAffine(degrees=0, translate=(0.2, 0.2))\n )\n\n self.to_pil = transforms.ToPILImage() # assumes mode=\"RGB\" for 3 channels\n\n self._observation_space = self._make_observation_space(\n output_shape=output_shape,\n output_channels=output_channels,\n unnormalized_infimum=unnormalized_infimum,\n unnormalized_supremum=unnormalized_supremum,\n )\n\n assert int(PIL.__version__.split(\".\")[0]) != 7, (\n \"We found that Pillow version >=7.* has broken scaling,\"\n \" please downgrade to version 6.2.1 or upgrade to >=8.0.0\"\n )\n\n observation_space = self._get_observation_space()\n\n super().__init__(**prepare_locals_for_super(locals()))\n\n def _make_observation_space(\n self,\n output_shape: Optional[Tuple[int, ...]],\n output_channels: Optional[int],\n unnormalized_infimum: float,\n unnormalized_supremum: float,\n ) -> gym.spaces.Box:\n assert output_shape is None or output_channels is None, (\n \"In VisionSensor's config, \"\n \"only one of output_shape and output_channels can be not None.\"\n )\n\n shape: Optional[Tuple[int, ...]] = None\n if output_shape is not None:\n shape = output_shape\n elif self._height is not None and output_channels is not None:\n shape = (\n cast(int, self._height),\n cast(int, self._width),\n cast(int, output_channels),\n )\n\n if not self._should_normalize or shape is None or len(shape) == 1:\n return gym.spaces.Box(\n low=np.float32(unnormalized_infimum),\n high=np.float32(unnormalized_supremum),\n shape=shape,\n )\n else:\n out_shape = shape[:-1] + (1,)\n low = np.tile(\n (unnormalized_infimum - cast(np.ndarray, self._norm_means))\n / cast(np.ndarray, self._norm_sds),\n out_shape,\n )\n high = np.tile(\n (unnormalized_supremum - cast(np.ndarray, self._norm_means))\n / cast(np.ndarray, self._norm_sds),\n out_shape,\n )\n return gym.spaces.Box(low=np.float32(low), high=np.float32(high))\n\n def _get_observation_space(self):\n return self._observation_space\n\n @property\n def height(self) -> Optional[int]:\n \"\"\"Height that input image will be rescale to have.\n\n # Returns\n\n The height as a non-negative integer or `None` if no rescaling is done.\n \"\"\"\n return self._height\n\n @property\n def width(self) -> Optional[int]:\n \"\"\"Width that input image will be rescale to have.\n\n # Returns\n\n The width as a non-negative integer or `None` if no rescaling is done.\n \"\"\"\n return self._width\n\n @abstractmethod\n def frame_from_env(self, env: EnvType, task: Optional[SubTaskType]) -> np.ndarray:\n raise NotImplementedError\n\n def get_observation(\n self, env: EnvType, task: Optional[SubTaskType], *args: Any, **kwargs: Any\n ) -> Any:\n im = self.frame_from_env(env=env, task=task)\n assert (\n im.dtype == np.float32 and (len(im.shape) == 2 or im.shape[-1] == 1)\n ) or (im.shape[-1] == 3 and im.dtype == np.uint8), (\n \"Input frame must either have 3 channels and be of\"\n \" type np.uint8 or have one channel and be of type np.float32\"\n )\n\n # Apply a sequence of corruptions to the RGB frames\n if self._corruptions is not None:\n im = degradations.apply_corruption_sequence(\n np.array(im), self._corruptions, self._severities\n )\n\n # Random translation\n if self._tshift:\n if isinstance(im, np.ndarray):\n im = self.to_pil(im)\n im = self._random_translate(im)\n\n # Random Crop Image\n if self._random_crop:\n if isinstance(im, np.ndarray):\n im = self.to_pil(im)\n im = self._random_cropper(im)\n\n # Color Jitter\n if self._jitter:\n if isinstance(im, np.ndarray):\n im = self.to_pil(im)\n im = self._color_jitter(im)\n\n if self._rotate:\n rot_im = copy.deepcopy(im)\n\n if self._rotate:\n if not isinstance(rot_im, np.ndarray):\n rot_im = np.array(im)\n rot_im, rot_label = degradations.rotate_single(rot_im)\n\n if self._scale_first:\n if not isinstance(im, np.ndarray):\n shape_condition = im.size[:2] != (self._height, self._width)\n else:\n shape_condition = im.shape[:2] != (self._height, self._width)\n im = self.to_pil(im)\n if self.scaler is not None and shape_condition:\n im = np.array(self.scaler(im), dtype=np.uint8) # hwc\n\n if self._rotate:\n if not isinstance(rot_im, np.ndarray):\n shape_condition = rot_im.size[:2] != (self._height, self._width)\n else:\n shape_condition = rot_im.shape[:2] != (self._height, self._width)\n rot_im = self.to_pil(rot_im)\n if self.scaler is not None and shape_condition:\n rot_im = np.array(self.scaler(rot_im), dtype=np.uint8) # hwc\n\n # Original\n if self._scale_first:\n if self.scaler is not None and im.shape[:2] != (self._height, self._width):\n im = np.array(self.scaler(self.to_pil(im)), dtype=im.dtype) # hwc\n\n assert im.dtype in [np.uint8, np.float32]\n\n if im.dtype == np.uint8:\n im = im.astype(np.float32) / 255.0\n\n if self._rotate:\n if rot_im.dtype == np.uint8:\n rot_im = rot_im.astype(np.float32) / 255.0\n\n if self._should_normalize:\n im -= self._norm_means\n im /= self._norm_sds\n\n if self._rotate:\n if self._should_normalize:\n rot_im -= self._norm_means\n rot_im /= self._norm_sds\n\n if not self._scale_first:\n if self.scaler is not None and im.shape[:2] != (self._height, self._width):\n im = np.array(self.scaler(self.to_pil(im)), dtype=np.float32) # hwc\n\n if self._rotate:\n if self.scaler is not None and rot_im.shape[:2] != (\n self._height,\n self._width,\n ):\n rot_im = np.array(\n self.scaler(self.to_pil(rot_im)), dtype=np.float32\n ) # hwc\n\n if self._rotate:\n return (rot_im, rot_label)\n else:\n return im\n\n\nclass RGBSensor(VisionSensor[EnvType, SubTaskType], ABC):\n def __init__(\n self,\n use_resnet_normalization: bool = False,\n mean: Optional[np.ndarray] = np.array(\n [[[0.485, 0.456, 0.406]]], dtype=np.float32\n ),\n stdev: Optional[np.ndarray] = np.array(\n [[[0.229, 0.224, 0.225]]], dtype=np.float32\n ),\n height: Optional[int] = None,\n width: Optional[int] = None,\n uuid: str = \"rgb\",\n output_shape: Optional[Tuple[int, ...]] = None,\n output_channels: int = 3,\n unnormalized_infimum: float = 0.0,\n unnormalized_supremum: float = 1.0,\n scale_first: bool = True,\n **kwargs: Any\n ):\n \"\"\"Initializer.\n\n # Parameters\n\n config : If `config[\"use_resnet_normalization\"]` is `True` then the RGB images will be normalized\n with means `[0.485, 0.456, 0.406]` and standard deviations `[0.229, 0.224, 0.225]` (i.e. using the standard\n resnet normalization). If both `config[\"height\"]` and `config[\"width\"]` are non-negative integers then\n the RGB image returned from the environment will be rescaled to have shape\n (config[\"height\"], config[\"width\"], 3) using bilinear sampling.\n args : Extra args. Currently unused.\n kwargs : Extra kwargs. Currently unused.\n \"\"\"\n\n if not use_resnet_normalization:\n mean, stdev = None, None\n\n super().__init__(**prepare_locals_for_super(locals()))\n\n\nclass DepthSensor(VisionSensor[EnvType, SubTaskType], ABC):\n def __init__(\n self,\n use_normalization: bool = False,\n mean: Optional[np.ndarray] = np.array([[0.5]], dtype=np.float32),\n stdev: Optional[np.ndarray] = np.array([[0.25]], dtype=np.float32),\n height: Optional[int] = None,\n width: Optional[int] = None,\n uuid: str = \"depth\",\n output_shape: Optional[Tuple[int, ...]] = None,\n output_channels: int = 1,\n unnormalized_infimum: float = 0.0,\n unnormalized_supremum: float = 5.0,\n scale_first: bool = True,\n **kwargs: Any\n ):\n \"\"\"Initializer.\n\n # Parameters\n\n config : If `config[\"use_normalization\"]` is `True` then the depth images will be normalized\n with mean 0.5 and standard deviation 0.25. If both `config[\"height\"]` and `config[\"width\"]` are\n non-negative integers then the depth image returned from the environment will be rescaled to have shape\n (config[\"height\"], config[\"width\"]) using bilinear sampling.\n args : Extra args. Currently unused.\n kwargs : Extra kwargs. Currently unused.\n \"\"\"\n\n if not use_normalization:\n mean, stdev = None, None\n\n super().__init__(**prepare_locals_for_super(locals()))\n\n def get_observation( # type: ignore\n self, env: EnvType, task: Optional[SubTaskType], *args: Any, **kwargs: Any\n ) -> Any:\n depth = super().get_observation(env, task, *args, **kwargs)\n depth = np.expand_dims(depth, 2)\n\n return depth\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.expand_dims", "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
heytitle/MASS-Learning
[ "0d40de5227c94d1a5e4b18e44d16374e12821ad2" ]
[ "start_training.py" ]
[ "import base64\nimport pickle\nimport pprint\nimport sys\nfrom math import ceil\n\nimport numpy as np\nimport torch\n\nimport experiments\nimport models\nfrom models.utils import save_model_kwargs, load_model_from_checkpoint\nfrom utils import get_dataloaders, setup_writer\n\n\ndef train_epoch(writer,\n model,\n train_loader,\n optimizer,\n scheduler,\n epoch,\n total_batches,\n train_experiments):\n for batch_idx, (data, target) in enumerate(train_loader):\n for ex in train_experiments:\n if writer.global_step % ex.run_interval == 0:\n ex.run(batch_idx, epoch)\n model.train()\n data, target = data.to(model.device), target.to(model.device)\n optimizer.zero_grad()\n loss, output = model.net_forward_and_loss(data, target)\n if torch.isnan(loss):\n raise ValueError('Training loss value for {} was NaN'.format(model.__class__.__name__))\n loss.backward()\n optimizer.step()\n if writer.global_step % writer.train_loss_plot_interval == 0:\n writer.add_scalar('Train Loss/Train Loss', loss.item(), writer.global_step)\n writer.global_step += 1\n if total_batches is not None and writer.global_step >= total_batches:\n for ex in train_experiments:\n if writer.global_step % ex.run_interval == 0:\n ex.run(batch_idx, epoch)\n break\n scheduler.step()\n\n\ndef train_model(\n writer,\n seed,\n dataset_name,\n model_class_name,\n model_kwargs,\n normalize_inputs,\n batch_size,\n train_size,\n val_size,\n epochs,\n total_batches,\n optimizer_class_name,\n optimizer_kwargs,\n lr_scheduler_class_name,\n lr_scheduler_kwargs,\n model_logdir=None,\n checkpoint=None,\n train_experiments_and_kwargs=[],\n device_id='cpu'):\n torch.manual_seed(seed)\n np.random.seed(seed)\n device = torch.device(device_id if torch.cuda.is_available() else \"cpu\")\n\n model_class = models.__dict__[model_class_name]\n train_loader, val_loader, _, in_shape, n_classes = get_dataloaders(dataset_name=dataset_name,\n batch_size=batch_size,\n train_size=train_size,\n val_size=val_size,\n device_id=device_id,\n normalize_inputs=normalize_inputs)\n\n if model_logdir or checkpoint:\n model = load_model_from_checkpoint(writer, model_logdir, checkpoint)\n else:\n model_kwargs['n_classes'] = n_classes\n model_kwargs['net_kwargs']['in_shape'] = in_shape\n model = model_class(writer, **model_kwargs)\n save_model_kwargs(writer, model_class_name, model_kwargs)\n\n optimizer = model.get_optimizer(optimizer_class_name, optimizer_kwargs)\n scheduler = torch.optim.lr_scheduler.__dict__[lr_scheduler_class_name](optimizer, **lr_scheduler_kwargs)\n\n train_experiments = []\n for ex in train_experiments_and_kwargs:\n train_experiments.append(experiments.__dict__[ex[0]](writer=writer,\n model=model,\n train_loader=train_loader,\n val_loader=val_loader,\n **ex[1]))\n model.initialize(train_loader)\n model.to(device)\n if epochs is None:\n epochs = ceil(total_batches / len(train_loader))\n for epoch in range(1, epochs + 1):\n train_epoch(writer,\n model,\n train_loader,\n optimizer,\n scheduler,\n epoch,\n total_batches,\n train_experiments)\n\n\ndef main(kwargs):\n # Workaround for pytorch bug where multiple gpu processes all like to use gpu0\n if 'cuda' in kwargs['device_id'] and torch.cuda.is_available():\n torch.cuda.set_device(int(kwargs['device_id'][-1]))\n\n assert kwargs['epochs'] is None or kwargs['total_batches'] is None, \\\n \"Specify either number of epochs to train for, or total batches to train for, not both.\"\n\n writer = setup_writer(kwargs.pop('log_dir'),\n kwargs.pop('debug_network'),\n kwargs.pop('train_loss_plot_interval'),\n kwargs.pop('absolute_logdir_path', False))\n writer.add_text('kwargs', pprint.pformat(kwargs).replace('\\n', '\\t\\n'))\n train_model(writer, **kwargs)\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n kwargs = dict(\n log_dir='debug',\n debug_network=False,\n seed=2,\n dataset_name='CIFAR10',\n model_class_name='ReducedJacMASSCE',\n model_kwargs=dict(\n net_name='SmallMLP',\n net_kwargs=dict(\n out_dim=10,\n nonlinearity='elu',\n batch_norm=True,\n dropout=False\n ),\n var_dist_init_strategy='zeros',\n beta=0.001,\n n_mixture_components=2,\n ),\n normalize_inputs=True,\n batch_size=256,\n epochs=None,\n total_batches=1e4,\n val_size=0.1,\n train_size='max',\n optimizer_class_name='Adam',\n optimizer_kwargs=dict(\n lr=3e-4,\n var_dist_optimizer_kwargs=dict(\n lr=5e-4\n )\n ),\n lr_scheduler_class_name='ExponentialLR',\n lr_scheduler_kwargs=dict(\n gamma=1.0\n ),\n train_loss_plot_interval=5,\n train_experiments_and_kwargs=[\n ('ModelLossAndAccuracy', dict(run_interval=1000))\n ],\n device_id='cuda:0')\n else:\n kwargs = pickle.loads(base64.b64decode(sys.argv[1]))\n main(kwargs)\n" ]
[ [ "torch.isnan", "torch.manual_seed", "torch.cuda.is_available", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nherbert25/Computational-Physics
[ "6fc01cf7bee566ca1e095877cc10d63bd678e21f", "6fc01cf7bee566ca1e095877cc10d63bd678e21f", "6fc01cf7bee566ca1e095877cc10d63bd678e21f", "6fc01cf7bee566ca1e095877cc10d63bd678e21f" ]
[ "HW10/problem1 copied.py", "HW1/Cromer Algorithm.py", "HW4/useful.py", "HW11/problem 2 new REAL - Copy.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 10 15:11:54 2019\n\n@author: Nate\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nN1 = int(1e4)\nx10, x20 = 1.1, 2.2\n\ntau = np.arange(1, 10.1, .5)\nr_max = 2\n\n\ndef G2(x1, x2, dt):\n return np.exp(-(x1-x2)**2/(2*dt)-dt/4*(x1*x1+x2*x2)) / np.sqrt(2*np.pi*dt)\n\ndef P1(x1, x2, dt):\n return dt*np.sqrt(4+dt*dt) * G2(x1,x2,dt) * G2(x2,x1,dt)\n\ndef Metropolis2P(DT, X10, X20, P, NITER, RMAX = r_max):\n X1 = np.append(X10, np.zeros(NITER))\n X2 = np.append(X20, np.zeros(NITER))\n NACC = 0\n \n for n in range(NITER):\n dR = RMAX*(np.random.rand(2)-.5)\n ratio = P(X1[n]+dR[0], X2[n]+dR[1], DT)/P(X1[n], X2[n], DT)\n ran = np.random.rand()\n if ratio>1 or ratio>ran:\n X1[n+1] = X1[n]+dR[0]\n X2[n+1] = X2[n]+dR[1]\n NACC += 1\n else:\n X1[n+1] = X1[n]\n X2[n+1] = X2[n]\n \n print(str(NACC/NITER*100)+'% accepted')\n \n return np.array([X1, X2])\n\nEL1_dat = np.zeros((len(tau), N1+1))\n\n\ndef Summand1(X1, X2, DT):\n return (-(X1*X1+X2*X2)*(DT**4+8) + (4*DT+8*X1*X2)*(2+DT*DT)) / (16*DT*DT)\n\nfor i in range(len(tau)):\n x1,x2 = Metropolis2P(tau[i]/2, x10, x20, P1, N1)\n EL1_dat[i] = Summand1(x1, x2, tau[i]/2)\n \nEL1_arr = np.average(EL1_dat, axis=1)\nerr1 = np.average(EL1_dat, axis=1)/np.sqrt(N1)\n\ntau1_x = np.linspace(1,10,100)\nEl1_anal = 1/2 + np.exp(-tau1_x)/(1 - np.exp(-tau1_x))\n\nplt.figure()\nplt.plot(tau1_x, El1_anal, 'y', label='Analytical')\nplt.plot(tau, EL1_arr, label='Monte-Carlo')\nplt.xlabel(r'$\\tau$')\nplt.ylabel(r'$\\langle E_L\\rangle (\\tau)$')\nplt.show()\n", "#Cromer Algorithm\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef cart2pol(x, y):\n rho = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n return(rho, phi)\n\ndef pol2cart(rho, phi):\n x = rho * np.cos(phi)\n y = rho * np.sin(phi)\n return(x, y)\n\n\n################################\n\nx_0 = 10\ny_0 = -50\nv_int = 1/10\n\n#initial conditions\n# r_0 = [(float(10), float(0))]\n# v_0 = [(float(0), float(1/10))]\n\nr_0 = [(x_0, 0)]\nv_0 = [(0, v_int)]\n\nr = r_0\nv = v_0\n\n\n############################################\n#Other Constants\n\nh = x_0*v_int\np = h**2\nE_0 = .5*v_int**2-1/x_0\na = -1/(2*E_0)\ne = (1-p/a)**.5\nP = 2*np.pi*a**(3/2)\nt = P/1000\n\n# print('x_0 =',x_0,'\\r\\nv_int =',v_int,'\\r\\nr_0 =',r_0,'\\r\\nv_0 =',v_0,'\\r\\nh =',h,'\\r\\np =',p)\n# print('E_0 =',E_0,'\\r\\na =',a,'\\r\\ne =',e)\n# print('P =',P,'t =',t)\n\n##############################################\n#Algorithm\n\n\nfor i in range(0,int(P*100)):\n\tr_mag = (r[len(r)-1][0]**2+r[len(r)-1][1]**2)**.5\n\tacc = np.asarray((-r[len(r)-1][0]/r_mag**3,-r[len(r)-1][1]/r_mag**3))\n\t\t\n\tv.append(tuple(map(sum, zip(v[len(v)-1],acc*t))))\n\tv_add = np.asarray(v[len(v)-1])\n\tr.append(tuple(map(sum, zip(r[len(r)-1],v_add*t))))\n\nr = np.asarray(r)\n\nl = []\nfor i in r:\n\tl.append(cart2pol(i[0],i[1]))\n\nx_val = [x[0] for x in l]\ny_val = [x[1] for x in l]\n\n\n##############################################\n#Exact Solution\n\ntheta_ex = np.arange(0, 2*np.pi+.01, 0.1)\nr_ex = p/(1-e*np.cos((theta_ex)))\n\n\n#################################################################\n#Graphing - Polar \n\nfig1, ax = plt.subplots()\nax = plt.subplot(111, projection='polar')\nax.plot(y_val,x_val)\nax.plot(theta_ex, r_ex, 'o', markerfacecolor='none', markeredgecolor='r')\nax.set_rmax(.5)\nax.set_rticks([3, 6, 9, 12]) # less radial ticks\nax.set_rlabel_position(-22.5) # get radial labels away from plotted line\nax.grid(True)\n\nax.set_title(\"Cromer Algorithm\", va='bottom')\n# plt.show()\n\n\n###################################################\n#Energy\n\nr_mag = []\nv_mag = []\nE_t = []\ntime = []\ntimePeriod = []\nE_rat = []\nfor i in r:\n\tr_mag.append((i[0]**2+i[1]**2)**.5)\nfor i in v:\n\tv_mag.append((i[0]**2+i[1]**2)**.5)\nfor i in range(len(r_mag)):\n\tE_t.append(.5*v_mag[i]**2-1/r_mag[i])\nfor i in range(0,int(P*100)+1):\n\tx = i*t\n\ttime.append(x)\nfor i in time:\n\tx = i/P\n\ttimePeriod.append(x)\nfor i in range(len(E_t)):\n\tE_rat.append(E_t[i]/E_0-1)\n\n#4.5 to 5.5\n# print(timePeriod[430],timePeriod[440],timePeriod[450],timePeriod[550],timePeriod[570])\n\nE_rat = E_rat[450:550]\ntimePeriod = timePeriod[450:550]\n\nfig2, ax2 = plt.subplots()\nax2.plot(timePeriod,E_rat)\nax2.set_ylabel('E(t)/E_0-1')\nax2.set_xlabel('Time/Period')\nax2.set_title(\"Energy Ratio\")\nplt.show()\n\n\n", "import numpy as np\nimport matplotlib.pyplot as plt\n\ndef cart2pol(x, y):\n rho = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n return(rho, phi)\n\ndef pol2cart(rho, phi):\n x = rho * np.cos(phi)\n y = rho * np.sin(phi)\n return(x, y)\n\t\ndef sum2(x, y):\n\treturn tuple(map(sum, zip(x,y)))\n\ndef sum3(x, y, z):\n\treturn tuple(map(sum, zip(x,y, z)))\n\n\n\n\ndef mag(x, y):\n\treturn(np.sqrt(x**2+y**2))\ndef acc(x, y):\n\treturn(-.5*x/mag(x,y)**3, -.5*y/mag(x,y)**3)\n\n\n\n\ndef mag_array(r):\n\treturn(np.sqrt(r[0]**2+r[1]**2))\ndef acc_array(r):\n\treturn(-.5*r/mag_array(r)**3)\n\n\n\ndef mag_multi(r1,r2):\n\treturn(np.sqrt((r2[0]-r1[0])**2+(r2[1]-r1[1])**2))\ndef acc_multi(r,r1,r2):\n\treturn(-.5*(r-r1)/mag_multi(r1,r)**3-.5*(r-r2)/mag_multi(r2,r)**3)\n\n\n\ndef int_q_array(r,v,dt):\n\tr = r + dt*v\n\treturn(r)\ndef int_v_array(r,r1,r2,v,t):\n\tv = v + t*acc_multi(r,r1,r2)\n\treturn(v)\n\n\ndef v_magnetic_calc(r, v, B, dt):\n\ttheta = B[2]*dt\n\tB_unit = B/np.linalg.norm(B)\n\treturn(v+np.sin(theta)*np.cross(B_unit,v)+(1-np.cos(theta))*np.cross(B_unit,np.cross(B_unit,v)))\n\n\ndef v_damped(r, v, w_0, dt):\n\tv - w_0**2*dt\n\treturn(v)\n\n\n\n\n\n\n\n\n\ndef plotting(x, y):\n\tfig2, ax5 = plt.subplots()\n\tax5.set_ylabel('(E(t)/E_0-1)/t^4')\n\tax5.set_xlabel('Time/Period')\n\tax5.set_title(\"Energy Ratio - Forest Ruth\")\n\tax5.plot(x,y)\n\tax5.legend(('Runge-Kutta', 'Forest Ruth'), loc='upper right')\n\tplt.show()\n\ndef plot_polar(r, theta):\n\tfig1, ax3 = plt.subplots()\n\tax3=fig1.add_subplot(111, projection='polar')\n\tax3=fig1.add_subplot(111)\n\tax3.plot(y_val,x_val)\n\tax3.plot(theta_comp, r_comp, 'o', markerfacecolor='none', markeredgecolor='r')\n\tax3.set_rmax(.5)\n\tax3.set_rticks([3, 6, 9, 12]) # less radial ticks\n\tax3.set_rlabel_position(-22.5) # get radial labels away from plotted line\n\tax3.grid(True)\n\tax3.set_title(\"Forest Ruth\", va='bottom')\n\tplt.show()", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 18 17:38:08 2019\n\n@author: Nate\n\nThe Generalized Metropolis algorithm removed the step-size error by an additional acceptance/rejection step, \nwhich adds substantial overhead. To improve on the firstorder Langevin algorithm, can you devise a second-order \nLangevin algorithm to reduce the step-size error dependence to (∆t)2?\n\nRepeat problem 2 of HW10 using this second order Langevin algorithm.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pdb\n\n\n##############################################################\n##############################################################\n#defining constants\n\nN2 = 10000\nalpha = 1.6875\n\ng1 = np.random.randn(N2)\ng2 = np.random.randn(N2)\ng3 = np.random.randn(N2)\ng4 = np.random.randn(N2)\ng5 = np.random.randn(N2)\ng6 = np.random.randn(N2)\n\n\ngauss = np.array([[i,j,k,l,m,n] for i,j,k,l,m,n in zip(g1,g2,g3,g4,g5,g6)])\nx0 = np.array([5,6,2,-2,-1,-8])\ntau = np.append(np.arange(0.001,0.01,0.003), np.arange(0.01, 0.07, 0.01))\nx = np.zeros((len(tau), N2, len(x0)))\nEL2_dat = np.zeros((len(tau), N2))\n\n##############################################################\n##############################################################\n#Function Definitions\n\ndef vel(x):\n r1 = np.sqrt(np.sum(x[:3]**2))\n r2 = np.sqrt(np.sum(x[3:]**2))\n v1 = -alpha*x[:3]/r1\n v2 = -alpha*x[3:]/r2\n return np.append(v1,v2)\n\ndef Langevin2(X0, VEL, GAUSS, DT, N):\n X = np.zeros((N,len(X0)))\n vel1 = VEL(X0)\n Y0 = X0 + VEL(X0+DT/4*vel1)*DT/2 + GAUSS[0]*np.sqrt(DT)\n vel2 = VEL(Y0)\n X[0] = Y0 + DT/2*VEL(Y0 + DT/4*vel2)\n \n for i in range(1,N):\n vel1 = VEL(X[i-1])\n Yi = X[i-1] + VEL(X[i-1] + DT/4*vel1)*DT/2 + GAUSS[i]*np.sqrt(DT)\n vel2 = VEL(Yi)\n X[i] = Yi + DT/2*VEL(Yi + DT/4*vel2)\n \n return X\n\ndef EL2(X1, Y1, Z1, X2, Y2, Z2, ALPHA = alpha):\n \n R1 = np.sqrt(X1**2 + Y1**2 + Z1**2)\n R2 = np.sqrt(X2**2 + Y2**2 + Z2**2)\n R12 = np.sqrt((X2-X1)**2 + (Y2-Y1)**2 + (Z2-Z1)**2)\n \n return ALPHA * (-ALPHA + 1/R1 + 1/R2) - 2/R1 - 2/R2 + 1/R12\n\n##############################################################\n##############################################################\n#Main Loop\n\nfor i in range(len(tau)):\n \n x[i] = Langevin2(x0, vel, gauss, tau[i], N2)\n \n x1 = np.array([x[i,j,0] for j in range(N2)])\n y1 = np.array([x[i,j,1] for j in range(N2)])\n z1 = np.array([x[i,j,2] for j in range(N2)])\n x2 = np.array([x[i,j,3] for j in range(N2)])\n y2 = np.array([x[i,j,4] for j in range(N2)])\n z2 = np.array([x[i,j,5] for j in range(N2)])\n \n EL2_dat[i] = EL2(x1, y1, z1, x2, y2, z2)\n \nEL2_arr = np.average(EL2_dat, axis=1)\nerr2 = np.std(EL2_dat, axis=1)/np.sqrt(N2/48**2)\n\n##############################################################\n##############################################################\n#Plotting\n\nfig1, axes1 = plt.subplots()\naxes1.plot(tau, EL2_arr, 'o-', label = '2nd Order Langevin')\naxes1.hlines(-729/256, 0, np.max(tau), linestyle='dashed', label = 'Theoretical')\naxes1.set_ylabel('Energy')\naxes1.set_xlabel('Time Step Sizes $\\Delta t$')\naxes1.set_title(\"Energy vs $\\Delta t$\", va='bottom')\naxes1.legend()\nplt.show()" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.arange", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "numpy.random.rand", "numpy.exp", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.average", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.sqrt", "numpy.asarray", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.cos", "numpy.sin", "numpy.arctan2", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show" ], [ "numpy.sqrt", "numpy.cos", "matplotlib.pyplot.subplots", "numpy.sin", "numpy.arctan2", "numpy.linalg.norm", "numpy.cross", "matplotlib.pyplot.show" ], [ "numpy.sqrt", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.max", "numpy.append", "numpy.std", "numpy.random.randn", "numpy.average", "numpy.array", "numpy.sum", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
brianjo/vision
[ "a8bde78130fd8c956780d85693d0f51912013732", "a8bde78130fd8c956780d85693d0f51912013732", "a8bde78130fd8c956780d85693d0f51912013732" ]
[ "test/test_cpp_models.py", "torchvision/models/detection/faster_rcnn.py", "torchvision/models/detection/ssd.py" ]
[ "import os\nimport sys\nimport unittest\n\nimport torch\nimport torchvision.transforms.functional as F\nfrom PIL import Image\nfrom torchvision import models\n\ntry:\n from torchvision import _C_tests\nexcept ImportError:\n _C_tests = None\n\n\ndef process_model(model, tensor, func, name):\n model.eval()\n traced_script_module = torch.jit.trace(model, tensor)\n traced_script_module.save(\"model.pt\")\n\n py_output = model.forward(tensor)\n cpp_output = func(\"model.pt\", tensor)\n\n assert torch.allclose(py_output, cpp_output), \"Output mismatch of \" + name + \" models\"\n\n\ndef read_image1():\n image_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"assets\", \"encode_jpeg\", \"grace_hopper_517x606.jpg\"\n )\n image = Image.open(image_path)\n image = image.resize((224, 224))\n x = F.pil_to_tensor(image)\n x = F.convert_image_dtype(x)\n return x.view(1, 3, 224, 224)\n\n\ndef read_image2():\n image_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"assets\", \"encode_jpeg\", \"grace_hopper_517x606.jpg\"\n )\n image = Image.open(image_path)\n image = image.resize((299, 299))\n x = F.pil_to_tensor(image)\n x = F.convert_image_dtype(x)\n x = x.view(1, 3, 299, 299)\n return torch.cat([x, x], 0)\n\n\[email protected](\n sys.platform == \"darwin\" or True,\n \"C++ models are broken on OS X at the moment, and there's a BC breakage on main; \"\n \"see https://github.com/pytorch/vision/issues/1191\",\n)\nclass Tester(unittest.TestCase):\n pretrained = False\n image = read_image1()\n\n def test_alexnet(self):\n process_model(models.alexnet(self.pretrained), self.image, _C_tests.forward_alexnet, \"Alexnet\")\n\n def test_vgg11(self):\n process_model(models.vgg11(self.pretrained), self.image, _C_tests.forward_vgg11, \"VGG11\")\n\n def test_vgg13(self):\n process_model(models.vgg13(self.pretrained), self.image, _C_tests.forward_vgg13, \"VGG13\")\n\n def test_vgg16(self):\n process_model(models.vgg16(self.pretrained), self.image, _C_tests.forward_vgg16, \"VGG16\")\n\n def test_vgg19(self):\n process_model(models.vgg19(self.pretrained), self.image, _C_tests.forward_vgg19, \"VGG19\")\n\n def test_vgg11_bn(self):\n process_model(models.vgg11_bn(self.pretrained), self.image, _C_tests.forward_vgg11bn, \"VGG11BN\")\n\n def test_vgg13_bn(self):\n process_model(models.vgg13_bn(self.pretrained), self.image, _C_tests.forward_vgg13bn, \"VGG13BN\")\n\n def test_vgg16_bn(self):\n process_model(models.vgg16_bn(self.pretrained), self.image, _C_tests.forward_vgg16bn, \"VGG16BN\")\n\n def test_vgg19_bn(self):\n process_model(models.vgg19_bn(self.pretrained), self.image, _C_tests.forward_vgg19bn, \"VGG19BN\")\n\n def test_resnet18(self):\n process_model(models.resnet18(self.pretrained), self.image, _C_tests.forward_resnet18, \"Resnet18\")\n\n def test_resnet34(self):\n process_model(models.resnet34(self.pretrained), self.image, _C_tests.forward_resnet34, \"Resnet34\")\n\n def test_resnet50(self):\n process_model(models.resnet50(self.pretrained), self.image, _C_tests.forward_resnet50, \"Resnet50\")\n\n def test_resnet101(self):\n process_model(models.resnet101(self.pretrained), self.image, _C_tests.forward_resnet101, \"Resnet101\")\n\n def test_resnet152(self):\n process_model(models.resnet152(self.pretrained), self.image, _C_tests.forward_resnet152, \"Resnet152\")\n\n def test_resnext50_32x4d(self):\n process_model(models.resnext50_32x4d(), self.image, _C_tests.forward_resnext50_32x4d, \"ResNext50_32x4d\")\n\n def test_resnext101_32x8d(self):\n process_model(models.resnext101_32x8d(), self.image, _C_tests.forward_resnext101_32x8d, \"ResNext101_32x8d\")\n\n def test_wide_resnet50_2(self):\n process_model(models.wide_resnet50_2(), self.image, _C_tests.forward_wide_resnet50_2, \"WideResNet50_2\")\n\n def test_wide_resnet101_2(self):\n process_model(models.wide_resnet101_2(), self.image, _C_tests.forward_wide_resnet101_2, \"WideResNet101_2\")\n\n def test_squeezenet1_0(self):\n process_model(\n models.squeezenet1_0(self.pretrained), self.image, _C_tests.forward_squeezenet1_0, \"Squeezenet1.0\"\n )\n\n def test_squeezenet1_1(self):\n process_model(\n models.squeezenet1_1(self.pretrained), self.image, _C_tests.forward_squeezenet1_1, \"Squeezenet1.1\"\n )\n\n def test_densenet121(self):\n process_model(models.densenet121(self.pretrained), self.image, _C_tests.forward_densenet121, \"Densenet121\")\n\n def test_densenet169(self):\n process_model(models.densenet169(self.pretrained), self.image, _C_tests.forward_densenet169, \"Densenet169\")\n\n def test_densenet201(self):\n process_model(models.densenet201(self.pretrained), self.image, _C_tests.forward_densenet201, \"Densenet201\")\n\n def test_densenet161(self):\n process_model(models.densenet161(self.pretrained), self.image, _C_tests.forward_densenet161, \"Densenet161\")\n\n def test_mobilenet_v2(self):\n process_model(models.mobilenet_v2(self.pretrained), self.image, _C_tests.forward_mobilenetv2, \"MobileNet\")\n\n def test_googlenet(self):\n process_model(models.googlenet(self.pretrained), self.image, _C_tests.forward_googlenet, \"GoogLeNet\")\n\n def test_mnasnet0_5(self):\n process_model(models.mnasnet0_5(self.pretrained), self.image, _C_tests.forward_mnasnet0_5, \"MNASNet0_5\")\n\n def test_mnasnet0_75(self):\n process_model(models.mnasnet0_75(self.pretrained), self.image, _C_tests.forward_mnasnet0_75, \"MNASNet0_75\")\n\n def test_mnasnet1_0(self):\n process_model(models.mnasnet1_0(self.pretrained), self.image, _C_tests.forward_mnasnet1_0, \"MNASNet1_0\")\n\n def test_mnasnet1_3(self):\n process_model(models.mnasnet1_3(self.pretrained), self.image, _C_tests.forward_mnasnet1_3, \"MNASNet1_3\")\n\n def test_inception_v3(self):\n self.image = read_image2()\n process_model(models.inception_v3(self.pretrained), self.image, _C_tests.forward_inceptionv3, \"Inceptionv3\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "import torch.nn.functional as F\nfrom torch import nn\nfrom torchvision.ops import MultiScaleRoIAlign\n\nfrom ..._internally_replaced_utils import load_state_dict_from_url\nfrom ...ops import misc as misc_nn_ops\nfrom ..mobilenetv3 import mobilenet_v3_large\nfrom ..resnet import resnet50\nfrom ._utils import overwrite_eps\nfrom .anchor_utils import AnchorGenerator\nfrom .backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers, _mobilenet_extractor\nfrom .generalized_rcnn import GeneralizedRCNN\nfrom .roi_heads import RoIHeads\nfrom .rpn import RPNHead, RegionProposalNetwork\nfrom .transform import GeneralizedRCNNTransform\n\n\n__all__ = [\n \"FasterRCNN\",\n \"fasterrcnn_resnet50_fpn\",\n \"fasterrcnn_mobilenet_v3_large_320_fpn\",\n \"fasterrcnn_mobilenet_v3_large_fpn\",\n]\n\n\nclass FasterRCNN(GeneralizedRCNN):\n \"\"\"\n Implements Faster R-CNN.\n\n The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each\n image, and should be in 0-1 range. Different images can have different sizes.\n\n The behavior of the model changes depending if it is in training or evaluation mode.\n\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with\n ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.\n - labels (Int64Tensor[N]): the class label for each ground-truth box\n\n The model returns a Dict[Tensor] during training, containing the classification and regression\n losses for both the RPN and the R-CNN.\n\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as\n follows:\n - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with\n ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.\n - labels (Int64Tensor[N]): the predicted labels for each image\n - scores (Tensor[N]): the scores or each prediction\n\n Args:\n backbone (nn.Module): the network used to compute the features for the model.\n It should contain a out_channels attribute, which indicates the number of output\n channels that each feature map has (and it should be the same for all feature maps).\n The backbone should return a single Tensor or and OrderedDict[Tensor].\n num_classes (int): number of output classes of the model (including the background).\n If box_predictor is specified, num_classes should be None.\n min_size (int): minimum size of the image to be rescaled before feeding it to the backbone\n max_size (int): maximum size of the image to be rescaled before feeding it to the backbone\n image_mean (Tuple[float, float, float]): mean values used for input normalization.\n They are generally the mean values of the dataset on which the backbone has been trained\n on\n image_std (Tuple[float, float, float]): std values used for input normalization.\n They are generally the std values of the dataset on which the backbone has been trained on\n rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature\n maps.\n rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN\n rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training\n rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing\n rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training\n rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing\n rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals\n rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be\n considered as positive during training of the RPN.\n rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be\n considered as negative during training of the RPN.\n rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN\n for computing the loss\n rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training\n of the RPN\n rpn_score_thresh (float): during inference, only return proposals with a classification score\n greater than rpn_score_thresh\n box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in\n the locations indicated by the bounding boxes\n box_head (nn.Module): module that takes the cropped feature maps as input\n box_predictor (nn.Module): module that takes the output of box_head and returns the\n classification logits and box regression deltas.\n box_score_thresh (float): during inference, only return proposals with a classification score\n greater than box_score_thresh\n box_nms_thresh (float): NMS threshold for the prediction head. Used during inference\n box_detections_per_img (int): maximum number of detections per image, for all classes.\n box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be\n considered as positive during training of the classification head\n box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be\n considered as negative during training of the classification head\n box_batch_size_per_image (int): number of proposals that are sampled during training of the\n classification head\n box_positive_fraction (float): proportion of positive proposals in a mini-batch during training\n of the classification head\n bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the\n bounding boxes\n\n Example::\n\n >>> import torch\n >>> import torchvision\n >>> from torchvision.models.detection import FasterRCNN\n >>> from torchvision.models.detection.rpn import AnchorGenerator\n >>> # load a pre-trained model for classification and return\n >>> # only the features\n >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features\n >>> # FasterRCNN needs to know the number of\n >>> # output channels in a backbone. For mobilenet_v2, it's 1280\n >>> # so we need to add it here\n >>> backbone.out_channels = 1280\n >>>\n >>> # let's make the RPN generate 5 x 3 anchors per spatial\n >>> # location, with 5 different sizes and 3 different aspect\n >>> # ratios. We have a Tuple[Tuple[int]] because each feature\n >>> # map could potentially have different sizes and\n >>> # aspect ratios\n >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),\n >>> aspect_ratios=((0.5, 1.0, 2.0),))\n >>>\n >>> # let's define what are the feature maps that we will\n >>> # use to perform the region of interest cropping, as well as\n >>> # the size of the crop after rescaling.\n >>> # if your backbone returns a Tensor, featmap_names is expected to\n >>> # be ['0']. More generally, the backbone should return an\n >>> # OrderedDict[Tensor], and in featmap_names you can choose which\n >>> # feature maps to use.\n >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],\n >>> output_size=7,\n >>> sampling_ratio=2)\n >>>\n >>> # put the pieces together inside a FasterRCNN model\n >>> model = FasterRCNN(backbone,\n >>> num_classes=2,\n >>> rpn_anchor_generator=anchor_generator,\n >>> box_roi_pool=roi_pooler)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n \"\"\"\n\n def __init__(\n self,\n backbone,\n num_classes=None,\n # transform parameters\n min_size=800,\n max_size=1333,\n image_mean=None,\n image_std=None,\n # RPN parameters\n rpn_anchor_generator=None,\n rpn_head=None,\n rpn_pre_nms_top_n_train=2000,\n rpn_pre_nms_top_n_test=1000,\n rpn_post_nms_top_n_train=2000,\n rpn_post_nms_top_n_test=1000,\n rpn_nms_thresh=0.7,\n rpn_fg_iou_thresh=0.7,\n rpn_bg_iou_thresh=0.3,\n rpn_batch_size_per_image=256,\n rpn_positive_fraction=0.5,\n rpn_score_thresh=0.0,\n # Box parameters\n box_roi_pool=None,\n box_head=None,\n box_predictor=None,\n box_score_thresh=0.05,\n box_nms_thresh=0.5,\n box_detections_per_img=100,\n box_fg_iou_thresh=0.5,\n box_bg_iou_thresh=0.5,\n box_batch_size_per_image=512,\n box_positive_fraction=0.25,\n bbox_reg_weights=None,\n ):\n\n if not hasattr(backbone, \"out_channels\"):\n raise ValueError(\n \"backbone should contain an attribute out_channels \"\n \"specifying the number of output channels (assumed to be the \"\n \"same for all the levels)\"\n )\n\n assert isinstance(rpn_anchor_generator, (AnchorGenerator, type(None)))\n assert isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None)))\n\n if num_classes is not None:\n if box_predictor is not None:\n raise ValueError(\"num_classes should be None when box_predictor is specified\")\n else:\n if box_predictor is None:\n raise ValueError(\"num_classes should not be None when box_predictor is not specified\")\n\n out_channels = backbone.out_channels\n\n if rpn_anchor_generator is None:\n anchor_sizes = ((32,), (64,), (128,), (256,), (512,))\n aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)\n rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)\n if rpn_head is None:\n rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0])\n\n rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)\n rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)\n\n rpn = RegionProposalNetwork(\n rpn_anchor_generator,\n rpn_head,\n rpn_fg_iou_thresh,\n rpn_bg_iou_thresh,\n rpn_batch_size_per_image,\n rpn_positive_fraction,\n rpn_pre_nms_top_n,\n rpn_post_nms_top_n,\n rpn_nms_thresh,\n score_thresh=rpn_score_thresh,\n )\n\n if box_roi_pool is None:\n box_roi_pool = MultiScaleRoIAlign(featmap_names=[\"0\", \"1\", \"2\", \"3\"], output_size=7, sampling_ratio=2)\n\n if box_head is None:\n resolution = box_roi_pool.output_size[0]\n representation_size = 1024\n box_head = TwoMLPHead(out_channels * resolution ** 2, representation_size)\n\n if box_predictor is None:\n representation_size = 1024\n box_predictor = FastRCNNPredictor(representation_size, num_classes)\n\n roi_heads = RoIHeads(\n # Box\n box_roi_pool,\n box_head,\n box_predictor,\n box_fg_iou_thresh,\n box_bg_iou_thresh,\n box_batch_size_per_image,\n box_positive_fraction,\n bbox_reg_weights,\n box_score_thresh,\n box_nms_thresh,\n box_detections_per_img,\n )\n\n if image_mean is None:\n image_mean = [0.485, 0.456, 0.406]\n if image_std is None:\n image_std = [0.229, 0.224, 0.225]\n transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)\n\n super().__init__(backbone, rpn, roi_heads, transform)\n\n\nclass TwoMLPHead(nn.Module):\n \"\"\"\n Standard heads for FPN-based models\n\n Args:\n in_channels (int): number of input channels\n representation_size (int): size of the intermediate representation\n \"\"\"\n\n def __init__(self, in_channels, representation_size):\n super().__init__()\n\n self.fc6 = nn.Linear(in_channels, representation_size)\n self.fc7 = nn.Linear(representation_size, representation_size)\n\n def forward(self, x):\n x = x.flatten(start_dim=1)\n\n x = F.relu(self.fc6(x))\n x = F.relu(self.fc7(x))\n\n return x\n\n\nclass FastRCNNPredictor(nn.Module):\n \"\"\"\n Standard classification + bounding box regression layers\n for Fast R-CNN.\n\n Args:\n in_channels (int): number of input channels\n num_classes (int): number of output classes (including background)\n \"\"\"\n\n def __init__(self, in_channels, num_classes):\n super().__init__()\n self.cls_score = nn.Linear(in_channels, num_classes)\n self.bbox_pred = nn.Linear(in_channels, num_classes * 4)\n\n def forward(self, x):\n if x.dim() == 4:\n assert list(x.shape[2:]) == [1, 1]\n x = x.flatten(start_dim=1)\n scores = self.cls_score(x)\n bbox_deltas = self.bbox_pred(x)\n\n return scores, bbox_deltas\n\n\nmodel_urls = {\n \"fasterrcnn_resnet50_fpn_coco\": \"https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth\",\n \"fasterrcnn_mobilenet_v3_large_320_fpn_coco\": \"https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_320_fpn-907ea3f9.pth\",\n \"fasterrcnn_mobilenet_v3_large_fpn_coco\": \"https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_fpn-fb6a3cc7.pth\",\n}\n\n\ndef fasterrcnn_resnet50_fpn(\n pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs\n):\n \"\"\"\n Constructs a Faster R-CNN model with a ResNet-50-FPN backbone.\n\n Reference: `\"Faster R-CNN: Towards Real-Time Object Detection with\n Region Proposal Networks\" <https://arxiv.org/abs/1506.01497>`_.\n\n The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each\n image, and should be in ``0-1`` range. Different images can have different sizes.\n\n The behavior of the model changes depending if it is in training or evaluation mode.\n\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n\n - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with\n ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.\n - labels (``Int64Tensor[N]``): the class label for each ground-truth box\n\n The model returns a ``Dict[Tensor]`` during training, containing the classification and regression\n losses for both the RPN and the R-CNN.\n\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as\n follows, where ``N`` is the number of detections:\n\n - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with\n ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.\n - labels (``Int64Tensor[N]``): the predicted labels for each detection\n - scores (``Tensor[N]``): the scores of each detection\n\n For more details on the output, you may refer to :ref:`instance_seg_output`.\n\n Faster R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.\n\n Example::\n\n >>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\n >>> # For training\n >>> images, boxes = torch.rand(4, 3, 600, 1200), torch.rand(4, 11, 4)\n >>> boxes[:, :, 2:4] = boxes[:, :, 0:2] + boxes[:, :, 2:4]\n >>> labels = torch.randint(1, 91, (4, 11))\n >>> images = list(image for image in images)\n >>> targets = []\n >>> for i in range(len(images)):\n >>> d = {}\n >>> d['boxes'] = boxes[i]\n >>> d['labels'] = labels[i]\n >>> targets.append(d)\n >>> output = model(images, targets)\n >>> # For inference\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n >>>\n >>> # optionally, if you want to export the model to ONNX:\n >>> torch.onnx.export(model, x, \"faster_rcnn.onnx\", opset_version = 11)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is\n passed (the default) this value is set to 3.\n \"\"\"\n is_trained = pretrained or pretrained_backbone\n trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)\n norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d\n\n if pretrained:\n # no need to download the backbone if pretrained is set\n pretrained_backbone = False\n\n backbone = resnet50(pretrained=pretrained_backbone, progress=progress, norm_layer=norm_layer)\n backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers)\n model = FasterRCNN(backbone, num_classes, **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[\"fasterrcnn_resnet50_fpn_coco\"], progress=progress)\n model.load_state_dict(state_dict)\n overwrite_eps(model, 0.0)\n return model\n\n\ndef _fasterrcnn_mobilenet_v3_large_fpn(\n weights_name,\n pretrained=False,\n progress=True,\n num_classes=91,\n pretrained_backbone=True,\n trainable_backbone_layers=None,\n **kwargs,\n):\n is_trained = pretrained or pretrained_backbone\n trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 6, 3)\n norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d\n\n if pretrained:\n pretrained_backbone = False\n\n backbone = mobilenet_v3_large(pretrained=pretrained_backbone, progress=progress, norm_layer=norm_layer)\n backbone = _mobilenet_extractor(backbone, True, trainable_backbone_layers)\n\n anchor_sizes = (\n (\n 32,\n 64,\n 128,\n 256,\n 512,\n ),\n ) * 3\n aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)\n\n model = FasterRCNN(\n backbone, num_classes, rpn_anchor_generator=AnchorGenerator(anchor_sizes, aspect_ratios), **kwargs\n )\n if pretrained:\n if model_urls.get(weights_name, None) is None:\n raise ValueError(f\"No checkpoint is available for model {weights_name}\")\n state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress)\n model.load_state_dict(state_dict)\n return model\n\n\ndef fasterrcnn_mobilenet_v3_large_320_fpn(\n pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs\n):\n \"\"\"\n Constructs a low resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone tunned for mobile use-cases.\n It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See\n :func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more\n details.\n\n Example::\n\n >>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=True)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. If ``None`` is\n passed (the default) this value is set to 3.\n \"\"\"\n weights_name = \"fasterrcnn_mobilenet_v3_large_320_fpn_coco\"\n defaults = {\n \"min_size\": 320,\n \"max_size\": 640,\n \"rpn_pre_nms_top_n_test\": 150,\n \"rpn_post_nms_top_n_test\": 150,\n \"rpn_score_thresh\": 0.05,\n }\n\n kwargs = {**defaults, **kwargs}\n return _fasterrcnn_mobilenet_v3_large_fpn(\n weights_name,\n pretrained=pretrained,\n progress=progress,\n num_classes=num_classes,\n pretrained_backbone=pretrained_backbone,\n trainable_backbone_layers=trainable_backbone_layers,\n **kwargs,\n )\n\n\ndef fasterrcnn_mobilenet_v3_large_fpn(\n pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs\n):\n \"\"\"\n Constructs a high resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone.\n It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See\n :func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more\n details.\n\n Example::\n\n >>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=True)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. If ``None`` is\n passed (the default) this value is set to 3.\n \"\"\"\n weights_name = \"fasterrcnn_mobilenet_v3_large_fpn_coco\"\n defaults = {\n \"rpn_score_thresh\": 0.05,\n }\n\n kwargs = {**defaults, **kwargs}\n return _fasterrcnn_mobilenet_v3_large_fpn(\n weights_name,\n pretrained=pretrained,\n progress=progress,\n num_classes=num_classes,\n pretrained_backbone=pretrained_backbone,\n trainable_backbone_layers=trainable_backbone_layers,\n **kwargs,\n )\n", "import warnings\nfrom collections import OrderedDict\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\n\nfrom ..._internally_replaced_utils import load_state_dict_from_url\nfrom ...ops import boxes as box_ops\nfrom ...utils import _log_api_usage_once\nfrom .. import vgg\nfrom . import _utils as det_utils\nfrom .anchor_utils import DefaultBoxGenerator\nfrom .backbone_utils import _validate_trainable_layers\nfrom .transform import GeneralizedRCNNTransform\n\n__all__ = [\"SSD\", \"ssd300_vgg16\"]\n\nmodel_urls = {\n \"ssd300_vgg16_coco\": \"https://download.pytorch.org/models/ssd300_vgg16_coco-b556d3b4.pth\",\n}\n\nbackbone_urls = {\n # We port the features of a VGG16 backbone trained by amdegroot because unlike the one on TorchVision, it uses the\n # same input standardization method as the paper. Ref: https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth\n # Only the `features` weights have proper values, those on the `classifier` module are filled with nans.\n \"vgg16_features\": \"https://download.pytorch.org/models/vgg16_features-amdegroot-88682ab5.pth\"\n}\n\n\ndef _xavier_init(conv: nn.Module):\n for layer in conv.modules():\n if isinstance(layer, nn.Conv2d):\n torch.nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n torch.nn.init.constant_(layer.bias, 0.0)\n\n\nclass SSDHead(nn.Module):\n def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int):\n super().__init__()\n self.classification_head = SSDClassificationHead(in_channels, num_anchors, num_classes)\n self.regression_head = SSDRegressionHead(in_channels, num_anchors)\n\n def forward(self, x: List[Tensor]) -> Dict[str, Tensor]:\n return {\n \"bbox_regression\": self.regression_head(x),\n \"cls_logits\": self.classification_head(x),\n }\n\n\nclass SSDScoringHead(nn.Module):\n def __init__(self, module_list: nn.ModuleList, num_columns: int):\n super().__init__()\n self.module_list = module_list\n self.num_columns = num_columns\n\n def _get_result_from_module_list(self, x: Tensor, idx: int) -> Tensor:\n \"\"\"\n This is equivalent to self.module_list[idx](x),\n but torchscript doesn't support this yet\n \"\"\"\n num_blocks = len(self.module_list)\n if idx < 0:\n idx += num_blocks\n out = x\n for i, module in enumerate(self.module_list):\n if i == idx:\n out = module(x)\n return out\n\n def forward(self, x: List[Tensor]) -> Tensor:\n all_results = []\n\n for i, features in enumerate(x):\n results = self._get_result_from_module_list(features, i)\n\n # Permute output from (N, A * K, H, W) to (N, HWA, K).\n N, _, H, W = results.shape\n results = results.view(N, -1, self.num_columns, H, W)\n results = results.permute(0, 3, 4, 1, 2)\n results = results.reshape(N, -1, self.num_columns) # Size=(N, HWA, K)\n\n all_results.append(results)\n\n return torch.cat(all_results, dim=1)\n\n\nclass SSDClassificationHead(SSDScoringHead):\n def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int):\n cls_logits = nn.ModuleList()\n for channels, anchors in zip(in_channels, num_anchors):\n cls_logits.append(nn.Conv2d(channels, num_classes * anchors, kernel_size=3, padding=1))\n _xavier_init(cls_logits)\n super().__init__(cls_logits, num_classes)\n\n\nclass SSDRegressionHead(SSDScoringHead):\n def __init__(self, in_channels: List[int], num_anchors: List[int]):\n bbox_reg = nn.ModuleList()\n for channels, anchors in zip(in_channels, num_anchors):\n bbox_reg.append(nn.Conv2d(channels, 4 * anchors, kernel_size=3, padding=1))\n _xavier_init(bbox_reg)\n super().__init__(bbox_reg, 4)\n\n\nclass SSD(nn.Module):\n \"\"\"\n Implements SSD architecture from `\"SSD: Single Shot MultiBox Detector\" <https://arxiv.org/abs/1512.02325>`_.\n\n The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each\n image, and should be in 0-1 range. Different images can have different sizes but they will be resized\n to a fixed size before passing it to the backbone.\n\n The behavior of the model changes depending if it is in training or evaluation mode.\n\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with\n ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.\n - labels (Int64Tensor[N]): the class label for each ground-truth box\n\n The model returns a Dict[Tensor] during training, containing the classification and regression\n losses.\n\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as\n follows, where ``N`` is the number of detections:\n\n - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with\n ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.\n - labels (Int64Tensor[N]): the predicted labels for each detection\n - scores (Tensor[N]): the scores for each detection\n\n Args:\n backbone (nn.Module): the network used to compute the features for the model.\n It should contain an out_channels attribute with the list of the output channels of\n each feature map. The backbone should return a single Tensor or an OrderedDict[Tensor].\n anchor_generator (DefaultBoxGenerator): module that generates the default boxes for a\n set of feature maps.\n size (Tuple[int, int]): the width and height to which images will be rescaled before feeding them\n to the backbone.\n num_classes (int): number of output classes of the model (including the background).\n image_mean (Tuple[float, float, float]): mean values used for input normalization.\n They are generally the mean values of the dataset on which the backbone has been trained\n on\n image_std (Tuple[float, float, float]): std values used for input normalization.\n They are generally the std values of the dataset on which the backbone has been trained on\n head (nn.Module, optional): Module run on top of the backbone features. Defaults to a module containing\n a classification and regression module.\n score_thresh (float): Score threshold used for postprocessing the detections.\n nms_thresh (float): NMS threshold used for postprocessing the detections.\n detections_per_img (int): Number of best detections to keep after NMS.\n iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be\n considered as positive during training.\n topk_candidates (int): Number of best detections to keep before NMS.\n positive_fraction (float): a number between 0 and 1 which indicates the proportion of positive\n proposals used during the training of the classification head. It is used to estimate the negative to\n positive ratio.\n \"\"\"\n\n __annotations__ = {\n \"box_coder\": det_utils.BoxCoder,\n \"proposal_matcher\": det_utils.Matcher,\n }\n\n def __init__(\n self,\n backbone: nn.Module,\n anchor_generator: DefaultBoxGenerator,\n size: Tuple[int, int],\n num_classes: int,\n image_mean: Optional[List[float]] = None,\n image_std: Optional[List[float]] = None,\n head: Optional[nn.Module] = None,\n score_thresh: float = 0.01,\n nms_thresh: float = 0.45,\n detections_per_img: int = 200,\n iou_thresh: float = 0.5,\n topk_candidates: int = 400,\n positive_fraction: float = 0.25,\n ):\n super().__init__()\n _log_api_usage_once(self)\n\n self.backbone = backbone\n\n self.anchor_generator = anchor_generator\n\n self.box_coder = det_utils.BoxCoder(weights=(10.0, 10.0, 5.0, 5.0))\n\n if head is None:\n if hasattr(backbone, \"out_channels\"):\n out_channels = backbone.out_channels\n else:\n out_channels = det_utils.retrieve_out_channels(backbone, size)\n\n assert len(out_channels) == len(anchor_generator.aspect_ratios)\n\n num_anchors = self.anchor_generator.num_anchors_per_location()\n head = SSDHead(out_channels, num_anchors, num_classes)\n self.head = head\n\n self.proposal_matcher = det_utils.SSDMatcher(iou_thresh)\n\n if image_mean is None:\n image_mean = [0.485, 0.456, 0.406]\n if image_std is None:\n image_std = [0.229, 0.224, 0.225]\n self.transform = GeneralizedRCNNTransform(\n min(size), max(size), image_mean, image_std, size_divisible=1, fixed_size=size\n )\n\n self.score_thresh = score_thresh\n self.nms_thresh = nms_thresh\n self.detections_per_img = detections_per_img\n self.topk_candidates = topk_candidates\n self.neg_to_pos_ratio = (1.0 - positive_fraction) / positive_fraction\n\n # used only on torchscript mode\n self._has_warned = False\n\n @torch.jit.unused\n def eager_outputs(\n self, losses: Dict[str, Tensor], detections: List[Dict[str, Tensor]]\n ) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]:\n if self.training:\n return losses\n\n return detections\n\n def compute_loss(\n self,\n targets: List[Dict[str, Tensor]],\n head_outputs: Dict[str, Tensor],\n anchors: List[Tensor],\n matched_idxs: List[Tensor],\n ) -> Dict[str, Tensor]:\n bbox_regression = head_outputs[\"bbox_regression\"]\n cls_logits = head_outputs[\"cls_logits\"]\n\n # Match original targets with default boxes\n num_foreground = 0\n bbox_loss = []\n cls_targets = []\n for (\n targets_per_image,\n bbox_regression_per_image,\n cls_logits_per_image,\n anchors_per_image,\n matched_idxs_per_image,\n ) in zip(targets, bbox_regression, cls_logits, anchors, matched_idxs):\n # produce the matching between boxes and targets\n foreground_idxs_per_image = torch.where(matched_idxs_per_image >= 0)[0]\n foreground_matched_idxs_per_image = matched_idxs_per_image[foreground_idxs_per_image]\n num_foreground += foreground_matched_idxs_per_image.numel()\n\n # Calculate regression loss\n matched_gt_boxes_per_image = targets_per_image[\"boxes\"][foreground_matched_idxs_per_image]\n bbox_regression_per_image = bbox_regression_per_image[foreground_idxs_per_image, :]\n anchors_per_image = anchors_per_image[foreground_idxs_per_image, :]\n target_regression = self.box_coder.encode_single(matched_gt_boxes_per_image, anchors_per_image)\n bbox_loss.append(\n torch.nn.functional.smooth_l1_loss(bbox_regression_per_image, target_regression, reduction=\"sum\")\n )\n\n # Estimate ground truth for class targets\n gt_classes_target = torch.zeros(\n (cls_logits_per_image.size(0),),\n dtype=targets_per_image[\"labels\"].dtype,\n device=targets_per_image[\"labels\"].device,\n )\n gt_classes_target[foreground_idxs_per_image] = targets_per_image[\"labels\"][\n foreground_matched_idxs_per_image\n ]\n cls_targets.append(gt_classes_target)\n\n bbox_loss = torch.stack(bbox_loss)\n cls_targets = torch.stack(cls_targets)\n\n # Calculate classification loss\n num_classes = cls_logits.size(-1)\n cls_loss = F.cross_entropy(cls_logits.view(-1, num_classes), cls_targets.view(-1), reduction=\"none\").view(\n cls_targets.size()\n )\n\n # Hard Negative Sampling\n foreground_idxs = cls_targets > 0\n num_negative = self.neg_to_pos_ratio * foreground_idxs.sum(1, keepdim=True)\n # num_negative[num_negative < self.neg_to_pos_ratio] = self.neg_to_pos_ratio\n negative_loss = cls_loss.clone()\n negative_loss[foreground_idxs] = -float(\"inf\") # use -inf to detect positive values that creeped in the sample\n values, idx = negative_loss.sort(1, descending=True)\n # background_idxs = torch.logical_and(idx.sort(1)[1] < num_negative, torch.isfinite(values))\n background_idxs = idx.sort(1)[1] < num_negative\n\n N = max(1, num_foreground)\n return {\n \"bbox_regression\": bbox_loss.sum() / N,\n \"classification\": (cls_loss[foreground_idxs].sum() + cls_loss[background_idxs].sum()) / N,\n }\n\n def forward(\n self, images: List[Tensor], targets: Optional[List[Dict[str, Tensor]]] = None\n ) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]:\n if self.training and targets is None:\n raise ValueError(\"In training mode, targets should be passed\")\n\n if self.training:\n assert targets is not None\n for target in targets:\n boxes = target[\"boxes\"]\n if isinstance(boxes, torch.Tensor):\n if len(boxes.shape) != 2 or boxes.shape[-1] != 4:\n raise ValueError(f\"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.\")\n else:\n raise ValueError(f\"Expected target boxes to be of type Tensor, got {type(boxes)}.\")\n\n # get the original image sizes\n original_image_sizes: List[Tuple[int, int]] = []\n for img in images:\n val = img.shape[-2:]\n assert len(val) == 2\n original_image_sizes.append((val[0], val[1]))\n\n # transform the input\n images, targets = self.transform(images, targets)\n\n # Check for degenerate boxes\n if targets is not None:\n for target_idx, target in enumerate(targets):\n boxes = target[\"boxes\"]\n degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]\n if degenerate_boxes.any():\n bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]\n degen_bb: List[float] = boxes[bb_idx].tolist()\n raise ValueError(\n \"All bounding boxes should have positive height and width.\"\n f\" Found invalid box {degen_bb} for target at index {target_idx}.\"\n )\n\n # get the features from the backbone\n features = self.backbone(images.tensors)\n if isinstance(features, torch.Tensor):\n features = OrderedDict([(\"0\", features)])\n\n features = list(features.values())\n\n # compute the ssd heads outputs using the features\n head_outputs = self.head(features)\n\n # create the set of anchors\n anchors = self.anchor_generator(images, features)\n\n losses = {}\n detections: List[Dict[str, Tensor]] = []\n if self.training:\n assert targets is not None\n\n matched_idxs = []\n for anchors_per_image, targets_per_image in zip(anchors, targets):\n if targets_per_image[\"boxes\"].numel() == 0:\n matched_idxs.append(\n torch.full((anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device)\n )\n continue\n\n match_quality_matrix = box_ops.box_iou(targets_per_image[\"boxes\"], anchors_per_image)\n matched_idxs.append(self.proposal_matcher(match_quality_matrix))\n\n losses = self.compute_loss(targets, head_outputs, anchors, matched_idxs)\n else:\n detections = self.postprocess_detections(head_outputs, anchors, images.image_sizes)\n detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)\n\n if torch.jit.is_scripting():\n if not self._has_warned:\n warnings.warn(\"SSD always returns a (Losses, Detections) tuple in scripting\")\n self._has_warned = True\n return losses, detections\n return self.eager_outputs(losses, detections)\n\n def postprocess_detections(\n self, head_outputs: Dict[str, Tensor], image_anchors: List[Tensor], image_shapes: List[Tuple[int, int]]\n ) -> List[Dict[str, Tensor]]:\n bbox_regression = head_outputs[\"bbox_regression\"]\n pred_scores = F.softmax(head_outputs[\"cls_logits\"], dim=-1)\n\n num_classes = pred_scores.size(-1)\n device = pred_scores.device\n\n detections: List[Dict[str, Tensor]] = []\n\n for boxes, scores, anchors, image_shape in zip(bbox_regression, pred_scores, image_anchors, image_shapes):\n boxes = self.box_coder.decode_single(boxes, anchors)\n boxes = box_ops.clip_boxes_to_image(boxes, image_shape)\n\n image_boxes = []\n image_scores = []\n image_labels = []\n for label in range(1, num_classes):\n score = scores[:, label]\n\n keep_idxs = score > self.score_thresh\n score = score[keep_idxs]\n box = boxes[keep_idxs]\n\n # keep only topk scoring predictions\n num_topk = det_utils._topk_min(score, self.topk_candidates, 0)\n score, idxs = score.topk(num_topk)\n box = box[idxs]\n\n image_boxes.append(box)\n image_scores.append(score)\n image_labels.append(torch.full_like(score, fill_value=label, dtype=torch.int64, device=device))\n\n image_boxes = torch.cat(image_boxes, dim=0)\n image_scores = torch.cat(image_scores, dim=0)\n image_labels = torch.cat(image_labels, dim=0)\n\n # non-maximum suppression\n keep = box_ops.batched_nms(image_boxes, image_scores, image_labels, self.nms_thresh)\n keep = keep[: self.detections_per_img]\n\n detections.append(\n {\n \"boxes\": image_boxes[keep],\n \"scores\": image_scores[keep],\n \"labels\": image_labels[keep],\n }\n )\n return detections\n\n\nclass SSDFeatureExtractorVGG(nn.Module):\n def __init__(self, backbone: nn.Module, highres: bool):\n super().__init__()\n\n _, _, maxpool3_pos, maxpool4_pos, _ = (i for i, layer in enumerate(backbone) if isinstance(layer, nn.MaxPool2d))\n\n # Patch ceil_mode for maxpool3 to get the same WxH output sizes as the paper\n backbone[maxpool3_pos].ceil_mode = True\n\n # parameters used for L2 regularization + rescaling\n self.scale_weight = nn.Parameter(torch.ones(512) * 20)\n\n # Multiple Feature maps - page 4, Fig 2 of SSD paper\n self.features = nn.Sequential(*backbone[:maxpool4_pos]) # until conv4_3\n\n # SSD300 case - page 4, Fig 2 of SSD paper\n extra = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv2d(1024, 256, kernel_size=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=2), # conv8_2\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(512, 128, kernel_size=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2), # conv9_2\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(256, 128, kernel_size=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 256, kernel_size=3), # conv10_2\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(256, 128, kernel_size=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 256, kernel_size=3), # conv11_2\n nn.ReLU(inplace=True),\n ),\n ]\n )\n if highres:\n # Additional layers for the SSD512 case. See page 11, footernote 5.\n extra.append(\n nn.Sequential(\n nn.Conv2d(256, 128, kernel_size=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 256, kernel_size=4), # conv12_2\n nn.ReLU(inplace=True),\n )\n )\n _xavier_init(extra)\n\n fc = nn.Sequential(\n nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=False), # add modified maxpool5\n nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3, padding=6, dilation=6), # FC6 with atrous\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=1), # FC7\n nn.ReLU(inplace=True),\n )\n _xavier_init(fc)\n extra.insert(\n 0,\n nn.Sequential(\n *backbone[maxpool4_pos:-1], # until conv5_3, skip maxpool5\n fc,\n ),\n )\n self.extra = extra\n\n def forward(self, x: Tensor) -> Dict[str, Tensor]:\n # L2 regularization + Rescaling of 1st block's feature map\n x = self.features(x)\n rescaled = self.scale_weight.view(1, -1, 1, 1) * F.normalize(x)\n output = [rescaled]\n\n # Calculating Feature maps for the rest blocks\n for block in self.extra:\n x = block(x)\n output.append(x)\n\n return OrderedDict([(str(i), v) for i, v in enumerate(output)])\n\n\ndef _vgg_extractor(backbone: vgg.VGG, highres: bool, trainable_layers: int):\n backbone = backbone.features\n # Gather the indices of maxpools. These are the locations of output blocks.\n stage_indices = [0] + [i for i, b in enumerate(backbone) if isinstance(b, nn.MaxPool2d)][:-1]\n num_stages = len(stage_indices)\n\n # find the index of the layer from which we wont freeze\n assert 0 <= trainable_layers <= num_stages\n freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers]\n\n for b in backbone[:freeze_before]:\n for parameter in b.parameters():\n parameter.requires_grad_(False)\n\n return SSDFeatureExtractorVGG(backbone, highres)\n\n\ndef ssd300_vgg16(\n pretrained: bool = False,\n progress: bool = True,\n num_classes: int = 91,\n pretrained_backbone: bool = True,\n trainable_backbone_layers: Optional[int] = None,\n **kwargs: Any,\n):\n \"\"\"Constructs an SSD model with input size 300x300 and a VGG16 backbone.\n\n Reference: `\"SSD: Single Shot MultiBox Detector\" <https://arxiv.org/abs/1512.02325>`_.\n\n The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each\n image, and should be in 0-1 range. Different images can have different sizes but they will be resized\n to a fixed size before passing it to the backbone.\n\n The behavior of the model changes depending if it is in training or evaluation mode.\n\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n\n - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with\n ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.\n - labels (Int64Tensor[N]): the class label for each ground-truth box\n\n The model returns a Dict[Tensor] during training, containing the classification and regression\n losses.\n\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as\n follows, where ``N`` is the number of detections:\n\n - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with\n ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.\n - labels (Int64Tensor[N]): the predicted labels for each detection\n - scores (Tensor[N]): the scores for each detection\n\n Example:\n\n >>> model = torchvision.models.detection.ssd300_vgg16(pretrained=True)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 300), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is\n passed (the default) this value is set to 4.\n \"\"\"\n if \"size\" in kwargs:\n warnings.warn(\"The size of the model is already fixed; ignoring the argument.\")\n\n trainable_backbone_layers = _validate_trainable_layers(\n pretrained or pretrained_backbone, trainable_backbone_layers, 5, 4\n )\n\n if pretrained:\n # no need to download the backbone if pretrained is set\n pretrained_backbone = False\n\n # Use custom backbones more appropriate for SSD\n backbone = vgg.vgg16(pretrained=False, progress=progress)\n if pretrained_backbone:\n state_dict = load_state_dict_from_url(backbone_urls[\"vgg16_features\"], progress=progress)\n backbone.load_state_dict(state_dict)\n\n backbone = _vgg_extractor(backbone, False, trainable_backbone_layers)\n anchor_generator = DefaultBoxGenerator(\n [[2], [2, 3], [2, 3], [2, 3], [2], [2]],\n scales=[0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05],\n steps=[8, 16, 32, 64, 100, 300],\n )\n\n defaults = {\n # Rescale the input in a way compatible to the backbone\n \"image_mean\": [0.48235, 0.45882, 0.40784],\n \"image_std\": [1.0 / 255.0, 1.0 / 255.0, 1.0 / 255.0], # undo the 0-1 scaling of toTensor\n }\n kwargs = {**defaults, **kwargs}\n model = SSD(backbone, anchor_generator, (300, 300), num_classes, **kwargs)\n if pretrained:\n weights_name = \"ssd300_vgg16_coco\"\n if model_urls.get(weights_name, None) is None:\n raise ValueError(f\"No checkpoint is available for model {weights_name}\")\n state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress)\n model.load_state_dict(state_dict)\n return model\n" ]
[ [ "torch.allclose", "torch.jit.trace", "torch.cat" ], [ "torch.nn.Linear" ], [ "torch.nn.functional.normalize", "torch.nn.Sequential", "torch.nn.functional.softmax", "torch.ones", "torch.cat", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.full_like", "torch.jit.is_scripting", "torch.where", "torch.nn.init.xavier_uniform_", "torch.stack", "torch.nn.functional.smooth_l1_loss", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dmis-lab/BERN2
[ "0eaf635672b6c952984e16a165ce7e7f7805c675" ]
[ "multi_ner/training/run_eval.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Fine-tuning the library models for named entity recognition on CoNLL-2003. \"\"\"\n\n\nimport logging\nimport os\nimport sys\nimport pdb\nimport subprocess\n\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\nfrom seqeval.metrics import f1_score, precision_score, recall_score\nfrom torch import nn\n\nfrom transformers import (\n AutoConfig,\n AutoModelForTokenClassification,\n AutoModel,\n AutoTokenizer,\n EvalPrediction,\n HfArgumentParser,\n Trainer,\n TrainingArguments,\n set_seed,\n)\nimport wandb\nfrom utils_ner import *\nfrom modeling import *\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n use_fast: bool = field(default=False, metadata={\"help\": \"Set this flag to use fast tokenization.\"})\n # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,\n # or just modify its tokenizer_config.json.\n cache_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n data_dir: str = field(\n metadata={\"help\": \"The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.\"}\n )\n labels: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.\"},\n )\n max_seq_length: int = field(\n default=128,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n wandb_name: str = field(\n default=None, metadata={'help': \"Name of Wandb runs\"},\n )\n is_pmi: bool = field(\n default=False, metadata={'help': \"To use Pointwise Mututal Information in debiasing\"}\n )\n eval_data_name: str = field(\n default=\"mem\", metadata={'help': \"Evaluation oo Memorization, Synonym, Concept, Seen, and Unseen\"}\n )\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n wandb.init(project=\"bio-saliency\", name=data_args.wandb_name)\n\n if (\n os.path.exists(training_args.output_dir)\n and os.listdir(training_args.output_dir)\n and training_args.do_train\n and not training_args.overwrite_output_dir\n ):\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.\"\n )\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n training_args.local_rank,\n training_args.device,\n training_args.n_gpu,\n bool(training_args.local_rank != -1),\n training_args.fp16,\n )\n logger.info(\"Training/evaluation parameters %s\", training_args)\n\n # Set seed\n set_seed(training_args.seed)\n\n # Prepare CONLL-2003 task\n entity_name = data_args.data_dir.split('/')[-1]\n if entity_name in [\"CoNLL2003NER\", \"OntoNotes5.0\", \"WNUT2017\"]:\n labels = get_labels(data_args.labels)\n else:\n labels = get_bio_labels(data_args.labels)\n label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}\n num_labels = len(labels)\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n num_labels=num_labels,\n id2label=label_map,\n label2id={label: i for i, label in enumerate(labels)},\n cache_dir=model_args.cache_dir,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=model_args.use_fast,\n )\n model = NER.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n num_labels=num_labels,\n cache_dir=model_args.cache_dir,\n )\n '''\n model_to_save = AutoModel.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n )\n model_to_save.save_pretrained(training_args.output_dir)\n tokenizer.save_pretrained(training_args.output_dir)\n import pdb; pdb.set_trace()\n '''\n\n if data_args.eval_data_name == 'mem':\n Split = Split_mem\n elif data_args.eval_data_name == 'syn':\n Split = Split_syn\n elif data_args.eval_data_name == 'con':\n Split = Split_con\n elif data_args.eval_data_name == 'seen':\n Split = Split_seen\n elif data_args.eval_data_name == 'unseen':\n Split = Split_unseen\n elif data_args.eval_data_name == 'len1':\n Split = Split_len1\n elif data_args.eval_data_name == 'len2':\n Split = Split_len2\n elif data_args.eval_data_name == 'len3':\n Split = Split_len3\n elif data_args.eval_data_name == 'len4':\n Split = Split_len4\n elif data_args.eval_data_name == 'len5':\n Split = Split_len5\n elif data_args.eval_data_name == 'len6':\n Split = Split_len6\n elif data_args.eval_data_name == 'len7':\n Split = Split_len7\n elif data_args.eval_data_name == 'len8':\n Split = Split_len8\n\n \n # Get datasets\n train_dataset = (\n NerDataset(\n data_dir=data_args.data_dir,\n tokenizer=tokenizer,\n labels=labels,\n model_type=config.model_type,\n max_seq_length=data_args.max_seq_length,\n overwrite_cache=data_args.overwrite_cache,\n mode=Split.train,\n is_pmi=data_args.is_pmi,\n )\n if training_args.do_train\n else None\n )\n eval_dataset = (\n NerDataset(\n data_dir=data_args.data_dir,\n tokenizer=tokenizer,\n labels=labels,\n model_type=config.model_type,\n max_seq_length=data_args.max_seq_length,\n overwrite_cache=data_args.overwrite_cache,\n mode=Split.dev,\n is_pmi=data_args.is_pmi,\n eval_data_name=data_args.eval_data_name,\n )\n if training_args.do_eval\n else None\n )\n\n def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> Tuple[List[int], List[int]]:\n preds = np.argmax(predictions, axis=2)\n\n batch_size, seq_len = preds.shape\n\n out_label_list = [[] for _ in range(batch_size)]\n preds_list = [[] for _ in range(batch_size)]\n \n for i in range(batch_size):\n for j in range(seq_len):\n if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:\n out_label_list[i].append(label_map[label_ids[i][j]])\n preds_list[i].append(label_map[preds[i][j]])\n\n return preds_list, out_label_list\n\n def compute_metrics(p: EvalPrediction) -> Dict:\n preds_list, out_label_list = align_predictions(p.predictions, p.label_ids)\n \n return {\n \"precision\": precision_score(out_label_list, preds_list),\n \"recall\": recall_score(out_label_list, preds_list),\n \"f1\": f1_score(out_label_list, preds_list),\n }\n\n # Initialize our Trainer\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n compute_metrics=compute_metrics,\n )\n\n # Training\n if training_args.do_train:\n trainer.train(\n model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None\n )\n # trainer.save_model()\n # For convenience, we also re-save the tokenizer to the same directory,\n # so that you can share your model easily on huggingface.co/models =)\n if trainer.is_world_master():\n tokenizer.save_pretrained(training_args.output_dir)\n\n # Evaluation\n results = {}\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n result = trainer.evaluate()\n \n output_eval_file = os.path.join(training_args.output_dir, \"eval_results.txt\")\n if trainer.is_world_master():\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key, value in result.items():\n logger.info(\" %s = %s\", key, value)\n writer.write(\"%s = %s\\n\" % (key, value))\n\n results.update(result)\n \n \n # Predict\n if training_args.do_predict:\n test_dataset = NerDataset(\n data_dir=data_args.data_dir,\n tokenizer=tokenizer,\n labels=labels,\n model_type=config.model_type,\n max_seq_length=data_args.max_seq_length,\n overwrite_cache=data_args.overwrite_cache,\n mode=Split.test,\n is_pmi=data_args.is_pmi,\n eval_data_name=data_args.eval_data_name,\n )\n\n predictions, label_ids, metrics = trainer.predict(test_dataset)\n preds_list, _ = align_predictions(predictions, label_ids)\n \n # Save predictions\n output_test_results_file = os.path.join(training_args.output_dir, \"test_results.txt\")\n if trainer.is_world_master():\n with open(output_test_results_file, \"w\") as writer:\n logger.info(\"***** Test results *****\")\n for key, value in metrics.items():\n logger.info(\" %s = %s\", key, value)\n writer.write(\"%s = %s\\n\" % (key, value))\n \n if data_args.eval_data_name in ['len1', 'len2', 'len3', 'len4', 'len5', 'len6', 'len7', 'len8']:\n wandb.log({\"test_precision\": metrics['eval_precision'],\n \"test_recall\": metrics['eval_recall'],\n \"test_f1\": metrics['eval_f1']})\n else:\n wandb.log({\"test_recall\": metrics['eval_recall']})\n \n output_test_predictions_file = os.path.join(training_args.output_dir, \"test_predictions.txt\")\n if trainer.is_world_master():\n with open(output_test_predictions_file, \"w\") as writer:\n with open(os.path.join(data_args.data_dir, \"test_%s.txt\" % data_args.eval_data_name), \"r\") as f:\n example_id = 0\n for line in f:\n if line.startswith(\"-DOCSTART-\") or line == \"\" or line == \"\\n\":\n writer.write(line)\n if not preds_list[example_id]:\n example_id += 1\n elif preds_list[example_id]:\n entity_label = preds_list[example_id].pop(0)\n if entity_name == 'WNUT2017':\n output_line = line.split()[0] + \"\\t\" + line.split()[1] + \"\\t\" + entity_label + \"\\n\"\n else:\n output_line = line.split()[0] + \" \" + entity_label + \"\\n\"\n writer.write(output_line)\n else:\n logger.warning(\n \"Maximum sequence length exceeded: No prediction for '%s'.\", line.split()[0]\n )\n \n\n return results\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cosunae/postproc_np_products
[ "90d66f8e6bed1cae10f4b1b5ff21bf4e4aad26a1" ]
[ "thetavi.py" ]
[ "#!/usr/bin/python\nimport numpy as np\nimport cfgrib\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport cProfile\nimport pstats\nimport io\nimport time\nfrom pstats import SortKey\nimport dask\n\n\ndask.config.set(scheduler=\"threads\", num_workers=8)\n\n# pr = cProfile.Profile()\n# pr.enable()\n\npc_g = 9.80665\n\n\ndef destagger(u, du):\n du[1:-1, :] += u[2:, :] + u[0:-2, :]\n\n\ndef level_range(index, short_name):\n # print(index.header_values)\n levels = index.subindex(\n filter_by_keys={\"shortName\": short_name, \"typeOfLevel\": \"generalVerticalLayer\"}\n ).header_values[\"level:float\"]\n\n return (min(levels), max(levels))\n\n\ndef fthetav(p, t, qv):\n pc_r_d = 287.05\n pc_r_v = 461.51 # Gas constant for water vapour[J kg-1 K-1]\n pc_cp_d = 1005.0\n pc_rvd = pc_r_v / pc_r_d\n\n pc_rdocp = pc_r_d / pc_cp_d\n pc_rvd_o = pc_rvd - 1.0\n\n # Reference surface pressure for computation of potential temperature\n p0 = 1.0e5\n return (p0 / p) ** pc_rdocp * t * (1.0 + (pc_rvd_o * qv / (1.0 - qv)))\n\n\ndef fbrn(p, t, qv, u, v, hhl, hsurf):\n thetav = fthetav(p, t, qv)\n # thetav.data.visualize(filename='thetav.svg')\n\n thetav_sum = thetav.isel(generalVerticalLayer=slice(None, None, -1)).cumsum(\n dim=\"generalVerticalLayer\"\n )\n\n # dask.delayed(thetav_sum.data).visualize(filename='thetasum.svg')\n\n nlevels_xr = xr.DataArray(\n data=np.arange(nlevels, 0, -1), dims=[\"generalVerticalLayer\"]\n )\n\n brn = (\n pc_g\n * (hhl - hsurf)\n * (thetav - thetav.isel(generalVerticalLayer=79))\n / ((thetav_sum / nlevels_xr) * (u ** 2 + v ** 2))\n )\n return brn\n\n\ndef fbrn2(p, t, qv, u, v, hhl, hsurf):\n thetav = fthetav(p, t, qv)\n # thetav.data.visualize(filename='thetav.svg')\n\n # thetav_sum = thetav.isel(generalVerticalLayer=slice(None, None, -1)).cumsum(\n # dim=\"generalVerticalLayer\"\n # )\n\n # dask.delayed(thetav_sum.data).visualize(filename='thetasum.svg')\n\n # nlevels_xr = xr.DataArray(\n # data=np.arange(nlevels, 0, -1), dims=[\"generalVerticalLayer\"]\n # )\n\n brn = (\n pc_g\n * (hhl - hsurf)\n * (thetav - thetav.isel(generalVerticalLayer=79))\n / (u ** 2 + v ** 2)\n )\n return brn\n\n\ndef profile(pr):\n pr.disable()\n s = io.StringIO()\n sortby = SortKey.CUMULATIVE\n ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\n ps.print_stats()\n print(s.getvalue())\n\n\ndef load_data(fields, chunk_size=10):\n\n chunk_arg = {}\n if chunk_size:\n chunk_arg = {\"chunks\": {\"generalVerticalLayer\": chunk_size}}\n\n dss = cfgrib.open_datasets(\n data_dir + \"/lfff00000000\",\n backend_kwargs={\n \"read_keys\": [\"typeOfLevel\", \"gridType\"],\n \"filter_by_keys\": {\"typeOfLevel\": \"generalVerticalLayer\"},\n },\n encode_cf=(\"time\", \"geography\", \"vertical\"),\n **chunk_arg\n )\n\n massds = dss[0]\n uds = cfgrib.open_dataset(\n data_dir + \"/lfff00000000\",\n backend_kwargs={\n \"read_keys\": [\"cfVarName\"],\n \"filter_by_keys\": {\"cfVarName\": \"u\"},\n },\n encode_cf=(\"time\", \"geography\", \"vertical\"),\n **chunk_arg\n )\n vds = cfgrib.open_dataset(\n data_dir + \"/lfff00000000\",\n backend_kwargs={\n \"read_keys\": [\"cfVarName\"],\n \"filter_by_keys\": {\"cfVarName\": \"v\"},\n },\n encode_cf=(\"time\", \"geography\", \"vertical\"),\n **chunk_arg\n )\n hsurf_ds = cfgrib.open_dataset(\n data_dir + \"/lfff00000000c\",\n backend_kwargs={\n \"read_keys\": [\"shortName\"],\n \"filter_by_keys\": {\"shortName\": \"HSURF\"},\n },\n encode_cf=(\"time\", \"geography\", \"vertical\"),\n **chunk_arg\n )\n if chunk_size:\n chunk_arg = {\"chunks\": {\"generalVertical\": chunk_size}}\n\n cds = cfgrib.open_dataset(\n data_dir + \"/lfff00000000c\",\n backend_kwargs={\n \"read_keys\": [\"typeOfLevel\", \"gridType\"],\n \"filter_by_keys\": {\"typeOfLevel\": \"generalVertical\"},\n },\n encode_cf=(\"time\", \"geography\", \"vertical\"),\n **chunk_arg\n )\n hhl = cds[\"HHL\"].rename({\"generalVertical\": \"generalVerticalLayer\"})\n\n return (\n massds[\"P\"],\n massds[\"T\"],\n massds[\"QV\"],\n hhl,\n hsurf_ds[\"HSURF\"],\n uds[\"U\"],\n vds[\"V\"],\n )\n\n\nif __name__ == \"__main__\":\n\n scheduler = \"synchronous\"\n cluster = None\n if scheduler == \"distributed\":\n from dask.distributed import Client\n\n from dask_jobqueue import SLURMCluster\n\n cluster = SLURMCluster(\n queue=\"postproc\",\n cores=2,\n memory=\"24GB\",\n job_extra=[\"--exclusive\"],\n )\n cluster.scale(jobs=4)\n\n client = None\n client = Client(cluster)\n elif scheduler == \"localcluster\":\n from dask.distributed import Client, LocalCluster\n\n cluster = LocalCluster(n_workers=16, threads_per_worker=2)\n client = Client(cluster)\n elif scheduler == \"threads\":\n from multiprocessing.pool import ThreadPool\n\n dask.config.set(pool=ThreadPool(1))\n # dask.config.set(scheduler=\"threads\")\n elif scheduler == \"synchronous\":\n dask.config.set(\n scheduler=\"synchronous\"\n ) # overwrite default with single-threaded scheduler\n elif scheduler == \"processes\":\n from multiprocessing.pool import Pool\n\n dask.config.set(pool=Pool(2))\n\n data_dir = \"/scratch/cosuna/postproc_np_products/grib_files/cosmo-1e/\"\n\n index = cfgrib.open_fileindex(\n \"grib_files/cosmo-1e/lfff00000000\",\n index_keys=cfgrib.dataset.INDEX_KEYS\n + [\"time\", \"step\"]\n + [\"shortName\", \"paramId\"],\n )\n\n if cluster:\n while cluster.status != dask.distributed.core.Status.running:\n time.sleep(1)\n print(\"CLUSTER ALLOCATED\", cluster.status, cluster.workers)\n\n import sys\n\n sys.stdin.read(1)\n\n levels = level_range(index, \"T\")\n nlevels = int(levels[1]) - int(levels[0]) + 1\n\n start = time.time()\n # with dask.distributed.get_task_stream(\n # plot=\"save\", filename=\"task-stream_localc_p16_2t_chunk4.html\"\n # ) as ts:\n # pr = cProfile.Profile()\n # pr.enable()\n p, t, qv, hhl, hsurf, u, v = load_data([], chunk_size=10)\n # profile(pr)\n\n end = time.time()\n print(\"Time elapsed (load data):\", end - start)\n\n thetav_ds = [\n xr.Dataset(data_vars={\"thetav\": fthetav(p * (1 + i * 0.01), t, qv)})\n for i in range(10)\n ]\n\n paths = [\"thetav_\" + str(i) + \".nc\" for i in range(len(thetav_ds))]\n xr.save_mfdataset(thetav_ds, paths=paths, format=\"NETCDF4\")\n\n # client.profile(filename=\"dask-profile.html\")\n # history = ts.data\n end = time.time()\n print(\"Time elapsed (compute):\", end - start)\n" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chris-tng/minitorch
[ "f5facd366aac6a4d3a437796c43ac02a9b7069ff" ]
[ "tests/strategies.py" ]
[ "# +\nimport sys\nsys.path.append(\"..\")\n\nimport minitorch\nfrom hypothesis import settings\nfrom hypothesis.strategies import composite, floats, integers, lists\nimport numpy as np\n# -\n\nsettings.register_profile(\"ci\", deadline=None)\nsettings.load_profile(\"ci\")\n\n\nsmall_ints = integers(min_value=1, max_value=3)\nsmall_floats = floats(min_value=-100, max_value=100)\n\n\n@composite\ndef vals(draw, size, number):\n pts = draw(lists(number, min_size=size, max_size=size,))\n return minitorch.tensor(pts)\n\n\n@composite\ndef scalars(draw, min_value=-100000, max_value=100000):\n val = draw(floats(min_value=min_value, max_value=max_value))\n return minitorch.Scalar(val)\n\n\n@composite\ndef shapes(draw):\n lsize = draw(lists(small_ints, min_size=1, max_size=4))\n return tuple(lsize)\n\n\n@composite\ndef tensor_data(draw, numbers=floats()):\n shape = draw(shapes())\n size = int(minitorch.prod(shape))\n data = draw(lists(numbers, min_size=size, max_size=size))\n return minitorch.TensorData(data, shape)\n\n\n@composite\ndef indices(draw, layout):\n return tuple((draw(integers(min_value=0, max_value=s - 1)) for s in layout.shape))\n\n\n@composite\ndef tensors(draw, numbers=floats(allow_nan=False, min_value=-100, max_value=100)):\n td = draw(tensor_data(numbers))\n return minitorch.Tensor(td)\n\n\n@composite\ndef shaped_tensors(\n draw, n, numbers=floats(allow_nan=False, min_value=-100, max_value=100)\n):\n td = draw(tensor_data(numbers))\n values = []\n for i in range(n):\n data = draw(lists(numbers, min_size=td.size, max_size=td.size))\n values.append(minitorch.Tensor(minitorch.TensorData(data, td.shape)))\n return values\n\n\n@composite\ndef matmul_tensors(\n draw, numbers=floats(allow_nan=False, min_value=-100, max_value=100)\n):\n\n i, j, k = [draw(integers(min_value=1, max_value=10)) for _ in range(3)]\n\n l1 = (i, j)\n l2 = (j, k)\n values = []\n for shape in [l1, l2]:\n size = int(minitorch.prod(shape))\n data = draw(lists(numbers, min_size=size, max_size=size))\n values.append(minitorch.Tensor(minitorch.TensorData(data, shape)))\n return values\n\n\ndef assert_isclose(a, b):\n return np.testing.assert_allclose(a, b, 1e-2, 1e-2)\n" ]
[ [ "numpy.testing.assert_allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MentorEmbedded/autobahn-python
[ "299a6560cc8bd8e3ad7c02acf6cf15cf15cee87b" ]
[ "autobahn/xbr/_config.py" ]
[ "###############################################################################\n#\n# The MIT License (MIT)\n#\n# Copyright (c) Crossbar.io Technologies GmbH\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n###############################################################################\n\nimport os\nimport io\nimport sys\nimport uuid\nimport struct\nimport binascii\nimport configparser\nfrom typing import Optional, List, Dict\n\nimport click\nimport nacl\nimport web3\nimport numpy as np\nfrom time import time_ns\nfrom eth_utils.conversions import hexstr_if_str, to_hex\n\nfrom autobahn.websocket.util import parse_url\nfrom autobahn.xbr._wallet import pkm_from_argon2_secret\n\n_HAS_COLOR_TERM = False\ntry:\n import colorama\n\n # https://github.com/tartley/colorama/issues/48\n term = None\n if sys.platform == 'win32' and 'TERM' in os.environ:\n term = os.environ.pop('TERM')\n\n colorama.init()\n _HAS_COLOR_TERM = True\n\n if term:\n os.environ['TERM'] = term\n\nexcept ImportError:\n pass\n\n\nclass Profile(object):\n \"\"\"\n User profile, stored as named section in ``${HOME}/.xbrnetwork/config.ini``:\n\n .. code-block:: INI\n\n [default]\n # username used with this profile\n username=joedoe\n\n # user email used with the profile (e.g. for verification emails)\n [email protected]\n\n # XBR network node used as a directory server and gateway to XBR smart contracts\n network_url=ws://localhost:8090/ws\n\n # WAMP realm on network node, usually \"xbrnetwork\"\n network_realm=xbrnetwork\n\n # user private WAMP-cryptosign key (for client authentication)\n cskey=0xb18bbe88ca0e189689e99f87b19addfb179d46aab3d59ec5d93a15286b949eb6\n\n # user private Ethereum key (for signing transactions and e2e data encryption)\n ethkey=0xfbada363e724d4db2faa2eeaa7d7aca37637b1076dd8cf6fefde13983abaa2ef\n \"\"\"\n\n def __init__(self,\n path: Optional[str] = None,\n name: Optional[str] = None,\n member_adr: Optional[str] = None,\n ethkey: Optional[bytes] = None,\n cskey: Optional[bytes] = None,\n username: Optional[str] = None,\n email: Optional[str] = None,\n network_url: Optional[str] = None,\n network_realm: Optional[str] = None,\n member_oid: Optional[uuid.UUID] = None,\n vaction_oid: Optional[uuid.UUID] = None,\n vaction_requested: Optional[np.datetime64] = None,\n vaction_verified: Optional[np.datetime64] = None,\n data_url: Optional[str] = None,\n data_realm: Optional[str] = None,\n infura_url: Optional[str] = None,\n infura_network: Optional[str] = None,\n infura_key: Optional[str] = None,\n infura_secret: Optional[str] = None):\n \"\"\"\n\n :param path:\n :param name:\n :param member_adr:\n :param ethkey:\n :param cskey:\n :param username:\n :param email:\n :param network_url:\n :param network_realm:\n :param member_oid:\n :param vaction_oid:\n :param vaction_requested:\n :param vaction_verified:\n :param data_url:\n :param data_realm:\n :param infura_url:\n :param infura_network:\n :param infura_key:\n :param infura_secret:\n \"\"\"\n from txaio import make_logger\n self.log = make_logger()\n\n self.path = path\n self.name = name\n\n self.member_adr = member_adr\n self.ethkey = ethkey\n self.cskey = cskey\n self.username = username\n self.email = email\n self.network_url = network_url\n self.network_realm = network_realm\n self.member_oid = member_oid\n self.vaction_oid = vaction_oid\n self.vaction_requested = vaction_requested\n self.vaction_verified = vaction_verified\n self.data_url = data_url\n self.data_realm = data_realm\n self.infura_url = infura_url\n self.infura_network = infura_network\n self.infura_key = infura_key\n self.infura_secret = infura_secret\n\n def marshal(self):\n obj = {}\n obj['member_adr'] = self.member_adr or ''\n obj['ethkey'] = '0x{}'.format(binascii.b2a_hex(self.ethkey).decode()) if self.ethkey else ''\n obj['cskey'] = '0x{}'.format(binascii.b2a_hex(self.cskey).decode()) if self.cskey else ''\n obj['username'] = self.username or ''\n obj['email'] = self.email or ''\n obj['network_url'] = self.network_url or ''\n obj['network_realm'] = self.network_realm or ''\n obj['member_oid'] = str(self.member_oid) if self.member_oid else ''\n obj['vaction_oid'] = str(self.vaction_oid) if self.vaction_oid else ''\n obj['vaction_requested'] = str(self.vaction_requested) if self.vaction_requested else ''\n obj['vaction_verified'] = str(self.vaction_verified) if self.vaction_verified else ''\n obj['data_url'] = self.data_url or ''\n obj['data_realm'] = self.data_realm or ''\n obj['infura_url'] = self.infura_url or ''\n obj['infura_network'] = self.infura_network or ''\n obj['infura_key'] = self.infura_key or ''\n obj['infura_secret'] = self.infura_secret or ''\n return obj\n\n @staticmethod\n def parse(path, name, items):\n member_adr = None\n ethkey = None\n cskey = None\n username = None\n email = None\n network_url = None\n network_realm = None\n member_oid = None\n vaction_oid = None\n vaction_requested = None\n vaction_verified = None\n data_url = None\n data_realm = None\n infura_network = None\n infura_key = None\n infura_secret = None\n infura_url = None\n for k, v in items:\n if k == 'network_url':\n network_url = str(v)\n elif k == 'network_realm':\n network_realm = str(v)\n elif k == 'vaction_oid':\n if type(v) == str and v != '':\n vaction_oid = uuid.UUID(v)\n else:\n vaction_oid = None\n elif k == 'member_adr':\n if type(v) == str and v != '':\n member_adr = v\n else:\n member_adr = None\n elif k == 'member_oid':\n if type(v) == str and v != '':\n member_oid = uuid.UUID(v)\n else:\n member_oid = None\n elif k == 'vaction_requested':\n if type(v) == int and v:\n vaction_requested = np.datetime64(v, 'ns')\n else:\n vaction_requested = v\n elif k == 'vaction_verified':\n if type(v) == int:\n vaction_verified = np.datetime64(v, 'ns')\n else:\n vaction_verified = v\n elif k == 'data_url':\n data_url = str(v)\n elif k == 'data_realm':\n data_realm = str(v)\n elif k == 'ethkey':\n ethkey = binascii.a2b_hex(v[2:])\n elif k == 'cskey':\n cskey = binascii.a2b_hex(v[2:])\n elif k == 'username':\n username = str(v)\n elif k == 'email':\n email = str(v)\n elif k == 'infura_network':\n infura_network = str(v)\n elif k == 'infura_key':\n infura_key = str(v)\n elif k == 'infura_secret':\n infura_secret = str(v)\n elif k == 'infura_url':\n infura_url = str(v)\n elif k in ['path', 'name']:\n pass\n else:\n # skip unknown attribute\n print('unprocessed config attribute \"{}\"'.format(k))\n\n return Profile(path, name,\n member_adr, ethkey, cskey,\n username, email,\n network_url, network_realm,\n member_oid,\n vaction_oid, vaction_requested, vaction_verified,\n data_url, data_realm,\n infura_url, infura_network, infura_key, infura_secret)\n\n\nclass UserConfig(object):\n \"\"\"\n Local user configuration file. The data is either a plain text (unencrypted)\n .ini file, or such a file encrypted with XSalsa20-Poly1305, and with a\n binary file header of 48 octets.\n \"\"\"\n def __init__(self, config_path):\n \"\"\"\n\n :param config_path: The user configuration file path.\n \"\"\"\n from txaio import make_logger\n self.log = make_logger()\n self._config_path = os.path.abspath(config_path)\n self._profiles = {}\n\n @property\n def config_path(self) -> List[str]:\n \"\"\"\n Return the path to the user configuration file exposed by this object.,\n\n :return: Local filesystem path.\n \"\"\"\n return self._config_path\n\n @property\n def profiles(self) -> Dict[str, object]:\n \"\"\"\n Access to a map of user profiles in this user configuration.\n\n :return: Map of user profiles.\n \"\"\"\n return self._profiles\n\n def save(self, password: Optional[str] = None):\n \"\"\"\n Save this user configuration to the underlying configuration file. The user\n configuration file can be encrypted using Argon2id when a ``password`` is given.\n\n :param password: The optional Argon2id password.\n\n :return: Number of octets written to the user configuration file.\n \"\"\"\n written = 0\n config = configparser.ConfigParser()\n for profile_name, profile in self._profiles.items():\n if profile_name not in config.sections():\n config.add_section(profile_name)\n written += 1\n pd = profile.marshal()\n for option, value in pd.items():\n config.set(profile_name, option, value)\n\n with io.StringIO() as fp1:\n config.write(fp1)\n config_data = fp1.getvalue().encode('utf8')\n\n if password:\n # binary file format header (48 bytes):\n #\n # * 8 bytes: 0xdeadbeef 0x00000666 magic number (big endian)\n # * 4 bytes: 0x00000001 encryption type 1 for \"argon2id\"\n # * 4 bytes data length (big endian)\n # * 8 bytes created timestamp ns (big endian)\n # * 8 bytes unused (filled 0x00 currently)\n # * 16 bytes salt\n #\n salt = os.urandom(16)\n context = 'xbrnetwork-config'\n priv_key = pkm_from_argon2_secret(email='', password=password, context=context, salt=salt)\n box = nacl.secret.SecretBox(priv_key)\n config_data_ciphertext = box.encrypt(config_data)\n dl = [\n b'\\xde\\xad\\xbe\\xef',\n b'\\x00\\x00\\x06\\x66',\n b'\\x00\\x00\\x00\\x01',\n struct.pack('>L', len(config_data_ciphertext)),\n struct.pack('>Q', time_ns()),\n b'\\x00' * 8,\n salt,\n config_data_ciphertext,\n ]\n data = b''.join(dl)\n else:\n data = config_data\n\n with open(self._config_path, 'wb') as fp2:\n fp2.write(data)\n\n self.log.debug('configuration with {sections} sections, {bytes_written} bytes written to {written_to}',\n sections=written, bytes_written=len(data), written_to=self._config_path)\n\n return len(data)\n\n def load(self, cb_get_password=None) -> List[str]:\n \"\"\"\n Load this object from the underlying user configuration file. When the\n file is encrypted, call back into ``cb_get_password`` to get the user password.\n\n :param cb_get_password: Callback called when password is needed.\n\n :return: List of profiles loaded.\n \"\"\"\n if not os.path.exists(self._config_path) or not os.path.isfile(self._config_path):\n raise RuntimeError('config path \"{}\" cannot be loaded: so such file'.format(self._config_path))\n\n with open(self._config_path, 'rb') as fp:\n data = fp.read()\n\n if len(data) >= 48 and data[:8] == b'\\xde\\xad\\xbe\\xef\\x00\\x00\\x06\\x66':\n # binary format detected\n header = data[:48]\n body = data[48:]\n\n algo = struct.unpack('>L', header[8:12])[0]\n data_len = struct.unpack('>L', header[12:16])[0]\n created = struct.unpack('>Q', header[16:24])[0]\n # created_ts = np.datetime64(created, 'ns')\n\n assert algo in [0, 1]\n assert data_len == len(body)\n assert created < time_ns()\n\n salt = header[32:48]\n context = 'xbrnetwork-config'\n if cb_get_password:\n password = cb_get_password()\n else:\n password = ''\n priv_key = pkm_from_argon2_secret(email='', password=password, context=context, salt=salt)\n box = nacl.secret.SecretBox(priv_key)\n body = box.decrypt(body)\n\n else:\n header = None\n body = data\n\n config = configparser.ConfigParser()\n config.read_string(body.decode('utf8'))\n\n profiles = {}\n for profile_name in config.sections():\n citems = config.items(profile_name)\n profile = Profile.parse(self._config_path, profile_name, citems)\n profiles[profile_name] = profile\n self._profiles = profiles\n\n loaded_profiles = sorted(self.profiles.keys())\n return loaded_profiles\n\n\nif 'CROSSBAR_FABRIC_URL' in os.environ:\n _DEFAULT_CFC_URL = os.environ['CROSSBAR_FABRIC_URL']\nelse:\n _DEFAULT_CFC_URL = u'wss://master.xbr.network/ws'\n\n\ndef style_error(text):\n if _HAS_COLOR_TERM:\n return click.style(text, fg='red', bold=True)\n else:\n return text\n\n\ndef style_ok(text):\n if _HAS_COLOR_TERM:\n return click.style(text, fg='green', bold=True)\n else:\n return text\n\n\nclass WampUrl(click.ParamType):\n \"\"\"\n WAMP transport URL validator.\n \"\"\"\n\n name = 'WAMP transport URL'\n\n def __init__(self):\n click.ParamType.__init__(self)\n\n def convert(self, value, param, ctx):\n try:\n parse_url(value)\n except Exception as e:\n self.fail(style_error(str(e)))\n else:\n return value\n\n\ndef prompt_for_wamp_url(msg, default=None):\n \"\"\"\n Prompt user for WAMP transport URL (eg \"wss://planet.xbr.network/ws\").\n \"\"\"\n value = click.prompt(msg, type=WampUrl(), default=default)\n return value\n\n\nclass EthereumAddress(click.ParamType):\n \"\"\"\n Ethereum address validator.\n \"\"\"\n\n name = 'Ethereum address'\n\n def __init__(self):\n click.ParamType.__init__(self)\n\n def convert(self, value, param, ctx):\n try:\n value = web3.Web3.toChecksumAddress(value)\n adr = binascii.a2b_hex(value[2:])\n if len(value) != 20:\n raise ValueError('Ethereum addres must be 20 bytes (160 bit), but was {} bytes'.format(len(adr)))\n except Exception as e:\n self.fail(style_error(str(e)))\n else:\n return value\n\n\ndef prompt_for_ethereum_address(msg):\n \"\"\"\n Prompt user for an Ethereum (public) address.\n \"\"\"\n value = click.prompt(msg, type=EthereumAddress())\n return value\n\n\nclass PrivateKey(click.ParamType):\n \"\"\"\n Private key (32 bytes in HEX) validator.\n \"\"\"\n\n name = 'Private key'\n\n def __init__(self, key_len):\n click.ParamType.__init__(self)\n self._key_len = key_len\n\n def convert(self, value, param, ctx):\n try:\n value = hexstr_if_str(to_hex, value)\n if value[:2] in ['0x', '\\\\x']:\n key = binascii.a2b_hex(value[2:])\n else:\n key = binascii.a2b_hex(value)\n if len(key) != self._key_len:\n raise ValueError('key length must be {} bytes, but was {} bytes'.format(self._key_len, len(key)))\n except Exception as e:\n self.fail(style_error(str(e)))\n else:\n return value\n\n\ndef prompt_for_key(msg, key_len, default=None):\n \"\"\"\n Prompt user for a binary key of given length (in HEX).\n \"\"\"\n value = click.prompt(msg, type=PrivateKey(key_len), default=default)\n return value\n\n\n# default configuration stored in $HOME/.xbrnetwork/config.ini\n_DEFAULT_CONFIG = \"\"\"[default]\n# username used with this profile\nusername={username}\n\n# user email used with the profile (e.g. for verification emails)\nemail={email}\n\n# XBR network node used as a directory server and gateway to XBR smart contracts\nnetwork_url={network_url}\n\n# WAMP realm on network node, usually \"xbrnetwork\"\nnetwork_realm={network_realm}\n\n# user private WAMP-cryptosign key (for client authentication)\ncskey={cskey}\n\n# user private Ethereum key (for signing transactions and e2e data encryption)\nethkey={ethkey}\n\"\"\"\n\n# # default XBR market URL to connect to\n# market_url={market_url}\n# market_realm={market_realm}\n# # Infura blockchain gateway configuration\n# infura_url={infura_url}\n# infura_network={infura_network}\n# infura_key={infura_key}\n# infura_secret={infura_secret}\n\n\ndef load_or_create_profile(dotdir=None, profile=None, default_url=None, default_realm=None, default_email=None, default_username=None):\n dotdir = dotdir or '~/.xbrnetwork'\n profile = profile or 'default'\n default_url = default_url or 'wss://planet.xbr.network/ws'\n default_realm = default_realm or 'xbrnetwork'\n\n config_dir = os.path.expanduser(dotdir)\n if not os.path.isdir(config_dir):\n os.mkdir(config_dir)\n click.echo('created new local user directory {}'.format(style_ok(config_dir)))\n\n config_path = os.path.join(config_dir, 'config.ini')\n if not os.path.isfile(config_path):\n click.echo('creating new user profile \"{}\"'.format(style_ok(profile)))\n with open(config_path, 'w') as f:\n network_url = prompt_for_wamp_url('enter the WAMP router URL of the network directory node', default=default_url)\n network_realm = click.prompt('enter the WAMP realm to join on the network directory node', type=str, default=default_realm)\n cskey = prompt_for_key('your private WAMP client key', 32, default='0x' + binascii.b2a_hex(os.urandom(32)).decode())\n ethkey = prompt_for_key('your private Etherum key', 32, default='0x' + binascii.b2a_hex(os.urandom(32)).decode())\n email = click.prompt('user email used for with profile', type=str, default=default_email)\n username = click.prompt('user name used with this profile', type=str, default=default_username)\n f.write(_DEFAULT_CONFIG.format(network_url=network_url, network_realm=network_realm, ethkey=ethkey,\n cskey=cskey, email=email, username=username))\n click.echo('created new local user configuration {}'.format(style_ok(config_path)))\n\n config_obj = UserConfig(config_path)\n config_obj.load()\n profile_obj = config_obj.profiles.get(profile, None)\n\n if not profile_obj:\n raise click.ClickException('no such profile: \"{}\"'.format(profile))\n\n return profile_obj\n" ]
[ [ "numpy.datetime64" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dungxibo123/NeuralCDE
[ "19f7ed24223f5822142c676127c92d818d290903" ]
[ "experiments/models/metamodel.py" ]
[ "import pathlib\nimport sys\nimport torch\n\nhere = pathlib.Path(__file__).resolve().parent\nsys.path.append(str(here / '..' / '..'))\n\nimport controldiffeq\n\n\nclass NeuralCDE(torch.nn.Module):\n \"\"\"A Neural CDE model. Provides a wrapper around the lower-level cdeint function, to get a flexible Neural CDE\n model.\n\n Specifically, considering the CDE\n ```\n z_t = z_{t_0} + \\int_{t_0}^t f(z_s)dX_s\n ```\n where X is determined by the data, and given some terminal time t_N, then this model first computes z_{t_N}, then\n performs a linear function on it, and then outputs the result.\n\n It's known that linear functions on CDEs are universal approximators, so this is a very general type of model.\n \"\"\"\n def __init__(self, func, input_channels, hidden_channels, output_channels, initial=True):\n \"\"\"\n Arguments:\n func: As cdeint.\n input_channels: How many channels there are in the input.\n hidden_channels: The number of hidden channels, i.e. the size of z_t.\n output_channels: How many channels to perform a linear map to at the end.\n initial: Whether to automatically construct the initial value from data (in which case z0 must not be passed\n during forward()), or to use the one supplied during forward (in which case z0 must be passed during\n forward()).\n \"\"\"\n if isinstance(func, ContinuousRNNConverter): # ugly hack\n hidden_channels = hidden_channels + input_channels\n\n super(NeuralCDE, self).__init__()\n self.input_channels = input_channels\n self.hidden_channels = hidden_channels\n self.output_channels = output_channels\n\n self.func = func\n self.initial = initial\n if initial and not isinstance(func, ContinuousRNNConverter): # very ugly hack\n self.initial_network = torch.nn.Linear(input_channels, hidden_channels)\n self.linear = torch.nn.Linear(hidden_channels, output_channels)\n self.sigmoid = nn.Sigmoid()\n def extra_repr(self):\n return \"input_channels={}, hidden_channels={}, output_channels={}, initial={}\" \\\n \"\".format(self.input_channels, self.hidden_channels, self.output_channels, self.initial)\n\n def forward(self, times, coeffs, final_index, z0=None, stream=False, **kwargs):\n \"\"\"\n Arguments:\n times: The times of the observations for the input path X, e.g. as passed as an argument to\n `controldiffeq.natural_cubic_spline_coeffs`.\n coeffs: The coefficients describing the input path X, e.g. as returned by\n `controldiffeq.natural_cubic_spline_coeffs`.\n final_index: Each batch element may have a different final time. This defines the index within the tensor\n `times` of where the final time for each batch element is.\n z0: See the 'initial' argument to __init__.\n stream: Whether to return the result of the Neural CDE model at all times (True), or just the final time\n (False). Defaults to just the final time. The `final_index` argument is ignored if stream is True.\n **kwargs: Will be passed to cdeint.\n\n Returns:\n If stream is False, then this will return the terminal time z_T. If stream is True, then this will return\n all intermediate times z_t, for those t for which there was data.\n \"\"\"\n\n # Extract the sizes of the batch dimensions from the coefficients\n coeff, _, _, _ = coeffs\n batch_dims = coeff.shape[:-2]\n if not stream:\n assert batch_dims == final_index.shape, \"coeff.shape[:-2] must be the same as final_index.shape. \" \\\n \"coeff.shape[:-2]={}, final_index.shape={}\" \\\n \"\".format(batch_dims, final_index.shape)\n\n cubic_spline = controldiffeq.NaturalCubicSpline(times, coeffs)\n\n if z0 is None:\n assert self.initial, \"Was not expecting to be given no value of z0.\"\n if isinstance(self.func, ContinuousRNNConverter): # still an ugly hack\n z0 = torch.zeros(*batch_dims, self.hidden_channels, dtype=coeff.dtype, device=coeff.device)\n else:\n z0 = self.initial_network(cubic_spline.evaluate(times[0]))\n else:\n assert not self.initial, \"Was expecting to be given a value of z0.\"\n if isinstance(self.func, ContinuousRNNConverter): # continuing adventures in ugly hacks\n z0_extra = torch.zeros(*batch_dims, self.input_channels, dtype=z0.dtype, device=z0.device)\n z0 = torch.cat([z0_extra, z0], dim=-1)\n\n # Figure out what times we need to solve for\n if stream:\n t = times\n else:\n # faff around to make sure that we're outputting at all the times we need for final_index.\n sorted_final_index, inverse_final_index = final_index.unique(sorted=True, return_inverse=True)\n if 0 in sorted_final_index:\n sorted_final_index = sorted_final_index[1:]\n final_index = inverse_final_index\n else:\n final_index = inverse_final_index + 1\n if len(times) - 1 in sorted_final_index:\n sorted_final_index = sorted_final_index[:-1]\n t = torch.cat([times[0].unsqueeze(0), times[sorted_final_index], times[-1].unsqueeze(0)])\n\n # Switch default solver\n if 'method' not in kwargs:\n kwargs['method'] = 'rk4'\n if kwargs['method'] == 'rk4':\n if 'options' not in kwargs:\n kwargs['options'] = {}\n options = kwargs['options']\n if 'step_size' not in options and 'grid_constructor' not in options:\n time_diffs = times[1:] - times[:-1]\n options['step_size'] = time_diffs.min().item()\n\n # Actually solve the CDE\n z_t = controldiffeq.cdeint(dX_dt=cubic_spline.derivative,\n z0=z0,\n func=self.func,\n t=t,\n **kwargs)\n\n # Organise the output\n if stream:\n # z_t is a tensor of shape (times, ..., channels), so change this to (..., times, channels)\n for i in range(len(z_t.shape) - 2, 0, -1):\n z_t = z_t.transpose(0, i)\n else:\n # final_index is a tensor of shape (...)\n # z_t is a tensor of shape (times, ..., channels)\n final_index_indices = final_index.unsqueeze(-1).expand(z_t.shape[1:]).unsqueeze(0)\n z_t = z_t.gather(dim=0, index=final_index_indices).squeeze(0)\n\n # Linear map and return\n pred_y = self.linear(z_t)\n pred_y = self.sigmoid(pred_y)\n return pred_y\n\n\n# Note that this relies on the first channel being time\nclass ContinuousRNNConverter(torch.nn.Module):\n def __init__(self, input_channels, hidden_channels, model):\n super(ContinuousRNNConverter, self).__init__()\n\n self.input_channels = input_channels\n self.hidden_channels = hidden_channels\n self.model = model\n\n out_base = torch.zeros(self.input_channels + self.hidden_channels, self.input_channels)\n for i in range(self.input_channels):\n out_base[i, i] = 1\n self.register_buffer('out_base', out_base)\n\n def extra_repr(self):\n return \"input_channels: {}, hidden_channels: {}\".format(self.input_channels, self.hidden_channels)\n\n def forward(self, z):\n # z is a tensor of shape (..., input_channels + hidden_channels)\n x = z[..., :self.input_channels]\n h = z[..., self.input_channels:]\n # In theory the hidden state must lie in this region. And most of the time it does anyway! Very occasionally\n # it escapes this and breaks everything, though. (Even when using adaptive solvers or small step sizes.) Which\n # is kind of surprising given how similar the GRU-ODE is to a standard negative exponential problem, we'd\n # expect to get absolute stability without too much difficulty. Maybe there's a bug in the implementation\n # somewhere, but not that I've been able to find... (and h does only escape this region quite rarely.)\n h = h.clamp(-1, 1)\n # model_out is a tensor of shape (..., hidden_channels)\n model_out = self.model(x, h)\n batch_dims = model_out.shape[:-1]\n out = self.out_base.repeat(*batch_dims, 1, 1).clone()\n out[..., self.input_channels:, 0] = model_out\n return out\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dendisuhubdy/pytorch-saliency
[ "dcb3499be127637435a577cb42161b3e096aa28d", "dcb3499be127637435a577cb42161b3e096aa28d" ]
[ "saliency/deconv/saliency.py", "saliency/lime/saliency.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom saliency.saliency import Saliency\n\nclass DeconvSaliency(Saliency):\n \"\"\"docstring for DeconvSaliency.\"\"\"\n def __init__(self, model):\n super(DeconvSaliency, self).__init__(model)\n\n\n def guided_relu_hook(self, module, grad_in, grad_out):\n return (torch.nn.functional.relu(grad_in[0]), )\n\n\n def generate_saliency(self, input, target):\n input.requires_grad = True\n\n self.model.zero_grad()\n\n for module in self.model.modules():\n if type(module) == nn.ReLU:\n module.register_backward_hook(self.guided_relu_hook)\n\n output = self.model(input)\n\n grad_outputs = torch.zeros_like(output)\n\n grad_outputs[:, target] = 1\n\n output.backward(gradient = grad_outputs)\n\n input.requires_grad = False\n #print(input)\n\n return (input.grad.clone()[0] * input)\n", "import torch\nfrom saliency.saliency import Saliency\nimport numpy as np\nfrom scipy.ndimage import label\nimport torchvision\nfrom torch.autograd import Variable\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nimport random\nimport copy\n\n\nclass LimeSaliency(Saliency):\n\n def __init__(self, model):\n super(LimeSaliency, self).__init__(model)\n\n\n def generate_saliency(self, input, target):\n\n #self.model.zero_grad()\n\n output = self.model(input)\n\n #index: 0 layer: HP\n # index: 1 layer: Ship\n # index: 2 layer: Small Towers\n # index: 3 layer: Big Towers\n # index: 4 layer: Small Cities\n # index: 5 layer: Big Cities\n # index: 6 layer: Friend\n # index: 7 layer: Enemy\n\n\n #return (input.grad.clone()[0] * input)\n input2 = input.clone()\n image = np.zeros((40, 40))\n input2 = input2.view(40, 40, 8)\n input3 = input.clone()\n\n #logical or of the input images to get the original image\n #only do or over small big cities and towers to get proper coordinates of cities and towers\n #otherwise it will include enemy ship too if enemy channel is included which will cause object to\n #be merged with tower or city object\n for i in range(2,6):\n image = np.logical_or(image, input2[:, :, i].numpy())*1\n\n #get the number of objects in the image\n labeled_array, num_objects = label(image)\n\n indices = []\n for i in range(num_objects):\n indices.append(np.argwhere(labeled_array == i+1))\n #self.generate_file(labeled_array, 'labeled_array')\n\n # print('object 1\\n')\n # print(indices[0])\n #print('object 2\\n')\n #print(indices[1])\n #print('object 3\\n')\n #print(indices[2])\n #print('object 4\\n')\n #print(indices[3])\n #print('object 5\\n')\n #print(indices[4])\n\n #hp\n hp_array, hp = label(input2[:, :, 0].numpy())\n hp_indices = []\n for i in range(hp):\n hp_indices.append(np.argwhere(hp_array == i+1))\n #self.generate_file(hp_array, 'hp_array')\n\n #ship\n #remove agent because we don't want to perturb that\n ship_image = input2[:, :, 1].clone().numpy()\n ship_image[19][20] = 0\n ship_image[20][20] = 0\n ship_image[21][20] = 0\n ship_array, ships = label(ship_image)\n # print('ships ', ships )\n ship_indices = []\n for i in range(ships):\n ship_indices.append(np.argwhere(ship_array == i+1))\n #self.generate_file(ship_array, 'ship_array')\n\n\n\n values = torch.zeros(40*40*5)\n values = values.view(40, 40, 5)\n input2 = input.clone()\n input2 = input2.view(40, 40, 8)\n features = self.generate_features(input2)\n #print(features)\n outputs = output[:, target].data.numpy()\n\n # index: 0 layer: HP\n # index: 1 layer: Ship\n # index: 2 layer: Small Towers\n # index: 3 layer: Big Towers\n # index: 4 layer: Small Cities\n # index: 5 layer: Big Cities\n # index: 6 layer: Friend\n # index: 7 layer: Enemy\n\n #output layers:\n #0 HP\n #1 agent\n #2 size\n #3 type\n #4 friend/enemy\n\n #here i refers to the output salient layers\n #print('num_objects', num_objects)\n\n for i in range(5):\n if i==0:# output HP\n for j in range(hp):\n for k in range(hp_indices[j].shape[0]):\n x = hp_indices[j][k][0]\n y = hp_indices[j][k][1]\n temp = 0.3*input2[:, :, 0][x][y]\n input2[:, :, 0][x][y] += temp\n perturbed_output = self.model(input2.view(1, 12800))\n feature = self.generate_features(input2)\n features = np.concatenate((features, feature))\n outputs = np.concatenate((outputs, perturbed_output[:, target].data.numpy()))\n input2 = input.clone()\n input2 = input2.view(40, 40, 8)\n\n elif i==1:#output agent\n for j in range(ships):\n for k in range(ship_indices[j].shape[0]):\n x = ship_indices[j][k][0]\n y = ship_indices[j][k][1]\n if input2[:, :, 1][x][y] == 1:\n input2[:, :, 1][x][y] = 0\n perturbed_output = self.model(input2.view(1, 12800))\n feature = self.generate_features(input2)\n features = np.concatenate((features, feature))\n outputs = np.concatenate((outputs, perturbed_output[:, target].data.numpy()))\n input2 = input.clone()\n input2 = input2.view(40, 40, 8)\n\n elif i==2: #output size\n #print('in i== 2')\n for l in range(2, 6):\n #print('layer: ',l)\n for j in range(num_objects):\n # print('object: ',j)\n s = 0\n for k in range(indices[j].shape[0]):\n x = indices[j][k][0]\n y = indices[j][k][1]\n # print('x: '+str(x)+' y: '+str(y))\n # print('Value of input: '+str(input2[:, :, i][x][y]))\n # print(input2[:, :, l][x][y])\n if l == 2 or l==4: #small tower/city\n if input2[:, :, l][x][y] == 1:\n s = 1\n input2[:, :, l][x][y] = 0\n input2[:, :, l+1][x][y] = 1\n else: #big tower/city\n if input2[:, :, l ][x][y] == 1:\n s = 1\n input2[:, :, l][x][y] = 0\n input2[:, :, l-1][x][y] = 1\n\n\n\n #print(saliency)\n\n if s==1:\n perturbed_output = self.model(input2.view(1, 12800))\n feature = self.generate_features(input2)\n features = np.concatenate((features, feature))\n outputs = np.concatenate((outputs, perturbed_output[:, target].data.numpy()))\n input2 = input.clone()\n input2 = input2.view(40, 40, 8)\n #print(saliency[0][target])\n elif i==3: #output type\n for l in range(2, 6):\n for j in range(num_objects):\n s = 0\n for k in range(indices[j].shape[0]):\n x = indices[j][k][0]\n y = indices[j][k][1]\n # print('x: '+str(x)+' y: '+str(y))\n # print('Value of input: '+str(input2[:, :, i][x][y]))\n if l == 2 or l == 3: #small tower/city\n if input2[:, :, l][x][y] == 1:\n s = 1\n input2[:, :, l][x][y] = 0\n input2[:, :, l+2][x][y] = 1\n else: #big tower/city\n if input2[:, :, l ][x][y] == 1:\n s = 1\n input2[:, :, l][x][y] = 0\n input2[:, :, l-2][x][y] = 1\n\n\n #print(saliency)\n\n if s==1:\n perturbed_output = self.model(input2.view(1, 12800))\n feature = self.generate_features(input2)\n features = np.concatenate((features, feature))\n outputs = np.concatenate((outputs, perturbed_output[:, target].data.numpy()))\n input2 = input.clone()\n input2 = input2.view(40, 40, 8)\n\n else:# output frenemy\n for l in range(6, 8):\n for j in range(num_objects):\n s = 0\n for k in range(indices[j].shape[0]):\n x = indices[j][k][0]\n y = indices[j][k][1]\n\n if l == 6:\n if input2[:, :, l][x][y] == 1:\n s = 1\n input2[:, :, l][x][y] = 0\n input2[:, :, l+1][x][y] = 1\n else:\n if input2[:, :, l][x][y] == 1:\n s = 1\n input2[:, :, l][x][y] = 0\n input2[:, :, l-1][x][y] = 1\n\n if s==1:\n perturbed_output = self.model(input2.view(1, 12800))\n feature = self.generate_features(input2)\n features = np.concatenate((features, feature))\n outputs = np.concatenate((outputs, perturbed_output[:, target].data.numpy()))\n input2 = input.clone()\n input2 = input2.view(40, 40, 8)\n\n\n #print(features)\n #print(outputs)\n linear_model = LinearRegressionModel(21, 1)\n linear_model.train()\n criterion = nn.L1Loss()\n optimiser = torch.optim.SGD(linear_model.parameters(), lr = 0.01)\n epochs = 5000\n for epoch in range(epochs):\n inputs = Variable(torch.from_numpy(features).float())\n labels = Variable(torch.from_numpy(outputs))\n optimiser.zero_grad()\n pred = linear_model.forward(inputs)\n loss = criterion(pred, labels)\n loss.backward()\n optimiser.step()\n #print('epoch {}, loss {}'.format(epoch,loss.item()))\n #train_loss = eval_net(features, outputs, linear_model)\n # print('train_loss: %.5f ' %\n # (train_loss))\n\n # weights_ = []\n # for name, param in linear_model.parameters() :\n # weights_.append(param.data.numpy())\n #new_model = copy.deepcopy(linear_model)\n weights = linear_model.linear.weight.clone()\n weights = weights.data.numpy()\n #print(weights)\n\n\n\n # print('weights')\n # print(weights_)\n # weights_ = np.asarray(weights_)\n\n values = self.plot_saliency(weights, input3)\n\n return (values.view(1, 40*40*5))\n\n\n#0 HP\n#1 enemy ship\n#2 size\n#3 type\n#4 friend/enemy\n\n def generate_features(self, input):\n hp_array, hp = label(input[:, :, 0].numpy())\n hp_indices = []\n for i in range(hp):\n hp_indices.append(np.argwhere(hp_array == i+1))\n image = np.zeros((40, 40))\n for i in range(2,6):\n image = np.logical_or(image, input[:, :, i].numpy())*1\n feature = np.zeros((1, 21))\n #put hp of agent. agent will always be object 3 - 1\n feature[0][20] = input[:, :, 0][hp_indices[2][0][0]][hp_indices[2][0][1]]\n #print(feature[0][20])\n #self.generate_file(hp_array, 'feature_hp_array')\n #zero out the agent\n ship_image = input[:, :, 1].clone().numpy()\n ship_image[19][20] = 0\n ship_image[20][20] = 0\n ship_image[21][20] = 0\n ship_image, _ = label(ship_image)\n #self.generate_file(ship_array, 'mod_ship_array')\n #self.generate_file(image, 'image')\n #self.generate_file(ship_image, 'ship_image')\n counter = 0\n #slicing the hp_array quadrant vise\n for i in range(2):\n for j in range(2):\n #array = hp_array[0 + 20*i :19 + 20*i, 0 + 20*j :19 + 20*j]\n #labeled_array, num_objects = label(array)\n indices = np.argwhere(image[0 + 20*i :20 + 20*i, 0 + 20*j :20 + 20*j] > 0)\n # print(indices)\n # print('\\n\\n')\n # print(indices[0][0])\n # print(indices[0][1])\n x = indices[0][0] + 20*i\n y = indices[0][1] + 20*j\n # print('x ',x)\n # print('y ',y)\n #first feature will be HP\n feature[0][counter + 0] = input[:, :, 0][x][y]\n #second feature will be checking prescence of enemy ship\n _, objs = label(ship_image[0 + 20*i :20 + 20*i, 0 + 20*j :20 + 20*j])\n feature[0][counter + 1] = (1 if objs>0 else 0)\n #third feature check size 1 if big 0 if small\n feature[0][counter + 2] = (1 if input[:, :, 3][x][y] == 1\n or input[:, :, 5][x][y] == 1 else 0)\n #fourth feature will check type. 1 if city 0 if tower\n feature[0][counter + 3] = (1 if input[:, :, 4][x][y] == 1\n or input[:, :, 5][x][y] == 1 else 0)\n #fifth feature will check frie\\nd/enemy. 1 if friend 0 if enemy\n feature[0][counter + 4] = (1 if input[:, :, 6][x][y] == 1 else 0)\n\n\n counter += 5\n return feature\n\n def generate_file(self, array, name):\n f = open(str(name)+'.txt', 'w')\n f.write('\\n\\n\\n')\n\n for i in range(array.shape[0]):\n for j in range(array.shape[1]):\n f.write(str(array[i,j]))\n f.write('\\n')\n f.close()\n\n def plot_saliency(self, feature, input3):\n print('in plot ')\n values = torch.zeros(40*40*5)\n values = values.view(40, 40, 5)\n input3 = input3.view(40,40,8)\n feature = torch.from_numpy(feature).float()\n print('feature: ')\n print(feature)\n #this will give you dimensions of only objects\n image = np.zeros((40, 40))\n for i in range(2,6):\n image = np.logical_or(image, input3[:, :, i].numpy())*1\n labeled_array, num_objects = label(image)\n self.generate_file(image, 'image')\n ship_image = input3[:, :, 1].clone().numpy()\n ship_image[19][20] = 0\n ship_image[20][20] = 0\n ship_image[21][20] = 0\n ship_image, _ = label(ship_image)\n self.generate_file(ship_image, 'ship_image')\n counter = 0\n #slicing the hp_array quadrant vise\n for i in range(2):\n for j in range(2):\n #array = hp_array[0 + 20*i :19 + 20*i, 0 + 20*j :19 + 20*j]\n #labeled_array, num_objects = label(array)\n indices = np.argwhere(image[0 + 20*i :20 + 20*i, 0 + 20*j :20 + 20*j] > 0)\n #second feature will be checking prescence of enemy ship\n print('i ',i)\n print('j ',j)\n print(indices)\n print('\\n\\n')\n # print(indices[0][0])\n # print(indices[0][1])\n #first take care of HP\n for k in range(indices.shape[0]):\n x = indices[k][0] + 20*i\n y = indices[k][1] + 20*j\n print('x ',x)\n print('y ',y)\n #first feature will be HP\n values[:, :, 0][x][y] = feature[0][counter + 0]\n values[:, :, 2][x][y] = feature[0][counter + 2]\n values[:, :, 3][x][y] = feature[0][counter + 3]\n values[:, :, 4][x][y] = feature[0][counter + 4]\n\n #second feature will be checking prescence of enemy ship\n _, objs = label(ship_image[0 + 20*i :20 + 20*i, 0 + 20*j :20 + 20*j])\n enemytank_indices = np.argwhere(ship_image[0 + 20*i :20 + 20*i, 0 + 20*j :20 + 20*j]>0)\n if objs > 0:\n print('objs ')\n print(objs)\n for k in range(enemytank_indices.shape[0]):\n x = enemytank_indices[k][0] + 20*i\n y = enemytank_indices[k][1] + 20*j\n print('x ',x)\n print('y ',y)\n values[:, :, 1][x][y] = feature[0][counter + 1]\n values[:, :, 0][x][y] = feature[0][counter + 0]\n\n\n # #third feature check size 1 if big 0 if small\n # feature[0][counter + 2] = (1 if input[:, :, 3][x][y] == 1\n # or input[:, :, 5][x][y] == 1 else 0)\n # #fourth feature will check type. 1 if city 0 if tower\n # feature[0][counter + 3] = (1 if input[:, :, 4][x][y] == 1\n # or input[:, :, 5][x][y] == 1 else 0)\n # #fifth feature will check friend/enemy. 1 if friend 0 if enemy\n # feature[0][counter + 4] = (1 if input[:, :, 6][x][y] == 1 else 0)\n\n\n counter += 5\n\n values[:, :, 0][19][20] = feature[0][20]\n values[:, :, 0][20][20] = feature[0][20]\n values[:, :, 0][21][20] = feature[0][20]\n return values\n\n\nclass LinearRegressionModel(nn.Module):\n def __init__(self, input_dim, output_dim):\n super(LinearRegressionModel, self).__init__()\n self.linear = nn.Linear(input_dim, output_dim)\n\n def forward(self, x):\n out = self.linear(x)\n return out\n\ndef eval_net(x, y, model):\n correct = 0\n total = 0\n total_loss = 0\n model.eval() # set model to evaluation mode\n criterion = nn.L1Loss()\n for i, (x1, y1) in enumerate(zip(x, y)):\n inputs = Variable(torch.from_numpy(x1).float())\n labels = Variable(torch.from_numpy(y1))\n pred = model.forward(inputs)\n total += labels.size(0)\n #correct += (pred == labels.data).sum()\n loss = criterion(pred, labels)\n total_loss += loss.item()\n #total_loss += loss.item()\n model.train() # set model back to train mode\n return total_loss / total\n" ]
[ [ "torch.nn.functional.relu", "torch.zeros_like" ], [ "torch.zeros", "torch.from_numpy", "numpy.argwhere", "numpy.concatenate", "torch.nn.Linear", "scipy.ndimage.label", "numpy.zeros", "torch.nn.L1Loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
Jsakkos/microtools
[ "5800cb86631d2533def5d040f076295aab9536f1" ]
[ "Scripts/imgfileutils.py" ]
[ "# -*- coding: utf-8 -*-\n\n#################################################################\n# File : imgfileutils.py\n# Version : 1.4.5\n# Author : czsrh\n# Date : 10.12.2020\n# Institution : Carl Zeiss Microscopy GmbH\n# Location : https://github.com/zeiss-microscopy/OAD/blob/master/jupyter_notebooks/Read_CZI_and_OMETIFF_and_Napari/modules/imgfileutils.py\n# Copyright (c) 2020 Carl Zeiss AG, Germany. All Rights Reserved.\n#################################################################\n\n\nimport czifile as zis\nfrom apeer_ometiff_library import omexmlClass\nimport os\nfrom pathlib import Path\nfrom matplotlib import pyplot as plt, cm, use\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport xmltodict\nimport numpy as np\nfrom collections import Counter\nfrom lxml import etree as ET\nimport time\nimport re\nimport sys\nfrom aicsimageio import AICSImage, imread, imread_dask\nfrom aicsimageio.writers import ome_tiff_writer\nfrom aicspylibczi import CziFile\nimport dask.array as da\nimport pandas as pd\nimport tifffile\nimport pydash\n\ntry:\n import javabridge as jv\n import bioformats\nexcept (ImportError, ModuleNotFoundError) as error:\n # Output expected ImportErrors.\n print(error.__class__.__name__ + \": \" + error.msg)\n print('Python-BioFormats cannot be used')\n\ntry:\n import napari\nexcept ModuleNotFoundError as error:\n print(error.__class__.__name__ + \": \" + error.msg)\n\nfrom PyQt5.QtWidgets import (\n\n QHBoxLayout,\n QVBoxLayout,\n QFileSystemModel,\n QFileDialog,\n QTreeView,\n QDialogButtonBox,\n QWidget,\n QTableWidget,\n QTableWidgetItem,\n QAbstractItemView\n\n)\nfrom PyQt5.QtCore import Qt, QDir, QSortFilterProxyModel\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QFont\n\n\ndef get_imgtype(imagefile):\n \"\"\"Returns the type of the image based on the file extension - no magic\n\n :param imagefile: filename of the image\n :type imagefile: str\n :return: string specifying the image type\n :rtype: str\n \"\"\"\n\n imgtype = None\n\n if imagefile.lower().endswith('.ome.tiff') or imagefile.lower().endswith('.ome.tif'):\n # it is on OME-TIFF based on the file extension ... :-)\n imgtype = 'ometiff'\n\n elif imagefile.lower().endswith('.tiff') or imagefile.lower().endswith('.tif'):\n # it is on OME-TIFF based on the file extension ... :-)\n imgtype = 'tiff'\n\n elif imagefile.lower().endswith('.czi'):\n # it is on CZI based on the file extension ... :-)\n imgtype = 'czi'\n\n elif imagefile.lower().endswith('.png'):\n # it is on CZI based on the file extension ... :-)\n imgtype = 'png'\n\n elif imagefile.lower().endswith('.jpg') or imagefile.lower().endswith('.jpeg'):\n # it is on OME-TIFF based on the file extension ... :-)\n imgtype = 'jpg'\n\n return imgtype\n\n\ndef create_metadata_dict():\n \"\"\"A Python dictionary will be created to hold the relevant metadata.\n\n :return: dictionary with keys for the relevant metadata\n :rtype: dict\n \"\"\"\n\n metadata = {'Directory': None,\n 'Filename': None,\n 'Extension': None,\n 'ImageType': None,\n 'AcqDate': None,\n 'TotalSeries': None,\n 'SizeX': None,\n 'SizeY': None,\n 'SizeZ': 1,\n 'SizeC': 1,\n 'SizeT': 1,\n 'SizeS': 1,\n 'SizeB': 1,\n 'SizeM': 1,\n 'Sizes BF': None,\n 'DimOrder BF': None,\n 'DimOrder BF Array': None,\n 'Axes_czifile': None,\n 'Shape_czifile': None,\n 'czi_isRGB': None,\n 'czi_isMosaic': None,\n 'ObjNA': [],\n 'ObjMag': [],\n 'ObjID': [],\n 'ObjName': [],\n 'ObjImmersion': [],\n 'TubelensMag': [],\n 'ObjNominalMag': [],\n 'XScale': None,\n 'YScale': None,\n 'ZScale': None,\n 'XScaleUnit': None,\n 'YScaleUnit': None,\n 'ZScaleUnit': None,\n 'DetectorModel': [],\n 'DetectorName': [],\n 'DetectorID': [],\n 'DetectorType': [],\n 'InstrumentID': [],\n 'Channels': [],\n 'ChannelNames': [],\n 'ChannelColors': [],\n 'ImageIDs': [],\n 'NumPy.dtype': None\n }\n\n return metadata\n\n\ndef get_metadata(imagefile,\n omeseries=0,\n round_values=False):\n \"\"\"Returns a dictionary with metadata depending on the image type.\n Only CZI and OME-TIFF are currently supported.\n\n :param imagefile: filename of the image\n :type imagefile: str\n :param omeseries: series of OME-TIFF file, , defaults to 0\n :type omeseries: int, optional\n :param round_values: option to round some values, defaults to TrueFalse\n :type round_values: bool, optional\n :return: metadata - dict with the metainformation\n :rtype: dict\n :return: additional_mdczi - dict with additional the metainformation for CZI only\n :rtype: dict\n \"\"\"\n\n # get the image type\n imgtype = get_imgtype(imagefile)\n print('Detected Image Type (based on extension): ', imgtype)\n\n md = {}\n additional_md = {}\n\n if imgtype == 'ometiff':\n\n # parse the OME-XML and return the metadata dictionary and additional info\n md = get_metadata_ometiff(imagefile, series=omeseries)\n\n elif imgtype == 'czi':\n\n # parse the CZI metadata return the metadata dictionary and additional info\n md = get_metadata_czi(imagefile, dim2none=False)\n additional_md = get_additional_metadata_czi(imagefile)\n\n # TODO - Remove this when issue is fixed\n if round_values:\n # temporary workaround for slider / floating point issue in Napari viewer\n # https://forum.image.sc/t/problem-with-dimension-slider-when-adding-array-as-new-layer-for-ome-tiff/39092/2?u=sebi06\n\n md['XScale'] = np.round(md['XScale'], 3)\n md['YScale'] = np.round(md['YScale'], 3)\n md['ZScale'] = np.round(md['ZScale'], 3)\n else:\n # no metadate will be returned\n print('Scales will not be rounded.')\n\n return md, additional_md\n\n\ndef get_metadata_ometiff(filename, series=0):\n \"\"\"Returns a dictionary with OME-TIFF metadata.\n\n :param filename: filename of the OME-TIFF image\n :type filename: str\n :param series: Image Series, defaults to 0\n :type series: int, optional\n :return: dictionary with the relevant OME-TIFF metainformation\n :rtype: dict\n \"\"\"\n\n with tifffile.TiffFile(filename) as tif:\n try:\n # get OME-XML metadata as string the old way\n omexml_string = tif[0].image_description.decode('utf-8')\n except TypeError as e:\n print(e)\n omexml_string = tif.ome_metadata\n\n # get the OME-XML using the apeer-ometiff-library\n omemd = omexmlClass.OMEXML(omexml_string)\n\n # create dictionary for metadata and get OME-XML data\n metadata = create_metadata_dict()\n\n # get directory and filename etc.\n metadata['Directory'] = os.path.dirname(filename)\n metadata['Filename'] = os.path.basename(filename)\n metadata['Extension'] = 'ome.tiff'\n metadata['ImageType'] = 'ometiff'\n metadata['AcqDate'] = omemd.image(series).AcquisitionDate\n metadata['Name'] = omemd.image(series).Name\n\n # get image dimensions TZCXY\n metadata['SizeT'] = omemd.image(series).Pixels.SizeT\n metadata['SizeZ'] = omemd.image(series).Pixels.SizeZ\n metadata['SizeC'] = omemd.image(series).Pixels.SizeC\n metadata['SizeX'] = omemd.image(series).Pixels.SizeX\n metadata['SizeY'] = omemd.image(series).Pixels.SizeY\n\n # get number of image series\n metadata['TotalSeries'] = omemd.get_image_count()\n metadata['Sizes BF'] = [metadata['TotalSeries'],\n metadata['SizeT'],\n metadata['SizeZ'],\n metadata['SizeC'],\n metadata['SizeY'],\n metadata['SizeX']]\n\n # get dimension order\n metadata['DimOrder BF'] = omemd.image(series).Pixels.DimensionOrder\n\n # reverse the order to reflect later the array shape\n metadata['DimOrder BF Array'] = metadata['DimOrder BF'][::-1]\n\n # get the scaling\n metadata['XScale'] = omemd.image(series).Pixels.PhysicalSizeX\n metadata['XScale'] = np.round(metadata['XScale'], 3)\n # metadata['XScaleUnit'] = omemd.image(series).Pixels.PhysicalSizeXUnit\n metadata['YScale'] = omemd.image(series).Pixels.PhysicalSizeY\n metadata['YScale'] = np.round(metadata['YScale'], 3)\n # metadata['YScaleUnit'] = omemd.image(series).Pixels.PhysicalSizeYUnit\n metadata['ZScale'] = omemd.image(series).Pixels.PhysicalSizeZ\n metadata['ZScale'] = np.round(metadata['ZScale'], 3)\n # metadata['ZScaleUnit'] = omemd.image(series).Pixels.PhysicalSizeZUnit\n\n # get all image IDs\n for i in range(omemd.get_image_count()):\n metadata['ImageIDs'].append(i)\n\n # get information about the instrument and objective\n try:\n metadata['InstrumentID'] = omemd.instrument(series).get_ID()\n except (KeyError, AttributeError) as e:\n print('Key not found:', e)\n metadata['InstrumentID'] = None\n\n try:\n metadata['DetectorModel'] = omemd.instrument(series).Detector.get_Model()\n metadata['DetectorID'] = omemd.instrument(series).Detector.get_ID()\n metadata['DetectorModel'] = omemd.instrument(series).Detector.get_Type()\n except (KeyError, AttributeError) as e:\n print('Key not found:', e)\n metadata['DetectorModel'] = None\n metadata['DetectorID'] = None\n metadata['DetectorModel'] = None\n\n try:\n metadata['ObjNA'] = omemd.instrument(series).Objective.get_LensNA()\n metadata['ObjID'] = omemd.instrument(series).Objective.get_ID()\n metadata['ObjMag'] = omemd.instrument(series).Objective.get_NominalMagnification()\n except (KeyError, AttributeError) as e:\n print('Key not found:', e)\n metadata['ObjNA'] = None\n metadata['ObjID'] = None\n metadata['ObjMag'] = None\n\n # get channel names\n for c in range(metadata['SizeC']):\n metadata['Channels'].append(omemd.image(series).Pixels.Channel(c).Name)\n\n # add axes and shape information using aicsimageio package\n ometiff_aics = AICSImage(filename)\n metadata['Axes_aics'] = ometiff_aics.dims\n metadata['Shape_aics'] = ometiff_aics.shape\n metadata['SizeX_aics'] = ometiff_aics.size_x\n metadata['SizeY_aics'] = ometiff_aics.size_y\n metadata['SizeC_aics'] = ometiff_aics.size_c\n metadata['SizeZ_aics'] = ometiff_aics.size_t\n metadata['SizeT_aics'] = ometiff_aics.size_t\n metadata['SizeS_aics'] = ometiff_aics.size_s\n\n # close AICSImage object\n ometiff_aics.close()\n\n # check for None inside Scaling to avoid issues later one ...\n metadata = checkmdscale_none(metadata,\n tocheck=['XScale', 'YScale', 'ZScale'],\n replace=[1.0, 1.0, 1.0])\n\n return metadata\n\n\ndef checkmdscale_none(md, tocheck=['ZScale'], replace=[1.0]):\n \"\"\"Check scaling entries for None to avoid issues later on\n\n :param md: original metadata\n :type md: dict\n :param tocheck: list with entries to check for None, defaults to ['ZScale']\n :type tocheck: list, optional\n :param replace: list with values replacing the None, defaults to [1.0]\n :type replace: list, optional\n :return: modified metadata where None entries where replaces by\n :rtype: [type]\n \"\"\"\n for tc, rv in zip(tocheck, replace):\n if md[tc] is None:\n md[tc] = rv\n\n return md\n\n\ndef get_metadata_czi(filename, dim2none=False,\n forceDim=False,\n forceDimname='SizeC',\n forceDimvalue=2,\n convert_scunit=True):\n \"\"\"\n Returns a dictionary with CZI metadata.\n\n Information CZI Dimension Characters:\n - '0': 'Sample', # e.g. RGBA\n - 'X': 'Width',\n - 'Y': 'Height',\n - 'C': 'Channel',\n - 'Z': 'Slice', # depth\n - 'T': 'Time',\n - 'R': 'Rotation',\n - 'S': 'Scene', # contiguous regions of interest in a mosaic image\n - 'I': 'Illumination', # direction\n - 'B': 'Block', # acquisition\n - 'M': 'Mosaic', # index of tile for compositing a scene\n - 'H': 'Phase', # e.g. Airy detector fibers\n - 'V': 'View', # e.g. for SPIM\n\n :param filename: filename of the CZI image\n :type filename: str\n :param dim2none: option to set non-existing dimension to None, defaults to False\n :type dim2none: bool, optional\n :param forceDim: option to force to not read certain dimensions, defaults to False\n :type forceDim: bool, optional\n :param forceDimname: name of the dimension not to read, defaults to SizeC\n :type forceDimname: str, optional\n :param forceDimvalue: index of the dimension not to read, defaults to 2\n :type forceDimvalue: int, optional\n :param convert_scunit: convert scale unit string from 'µm' to 'micron', defaults to False\n :type convert_scunit: bool, optional\n :return: metadata - dictionary with the relevant CZI metainformation\n :rtype: dict\n \"\"\"\n\n # get CZI object\n czi = zis.CziFile(filename)\n\n # parse the XML into a dictionary\n metadatadict_czi = czi.metadata(raw=False)\n\n # initialize metadata dictionary\n metadata = create_metadata_dict()\n\n # get directory and filename etc.\n metadata['Directory'] = os.path.dirname(filename)\n metadata['Filename'] = os.path.basename(filename)\n metadata['Extension'] = 'czi'\n metadata['ImageType'] = 'czi'\n\n # add axes and shape information using czifile package\n metadata['Axes_czifile'] = czi.axes\n metadata['Shape_czifile'] = czi.shape\n\n # add axes and shape information using aicsimageio package\n czi_aics = AICSImage(filename)\n metadata['Axes_aics'] = czi_aics.dims\n try:\n metadata['Shape_aics'] = czi_aics.shape\n metadata['SizeX_aics'] = czi_aics.size_x\n metadata['SizeY_aics'] = czi_aics.size_y\n metadata['SizeC_aics'] = czi_aics.size_c\n metadata['SizeZ_aics'] = czi_aics.size_t\n metadata['SizeT_aics'] = czi_aics.size_t\n metadata['SizeS_aics'] = czi_aics.size_s\n except KeyError as e:\n metadata['Shape_aics'] = None\n metadata['SizeX_aics'] = None\n metadata['SizeY_aics'] = None\n metadata['SizeC_aics'] = None\n metadata['SizeZ_aics'] = None\n metadata['SizeT_aics'] = None\n metadata['SizeS_aics'] = None\n\n # get additional data by using pylibczi directly\n # Get the shape of the data, the coordinate pairs are (start index, size)\n aics_czi = CziFile(filename)\n metadata['dims_aicspylibczi'] = aics_czi.dims_shape()[0]\n metadata['dimorder_aicspylibczi'] = aics_czi.dims\n metadata['size_aicspylibczi'] = aics_czi.size\n metadata['czi_isMosaic'] = aics_czi.is_mosaic()\n\n # determine pixel type for CZI array\n metadata['NumPy.dtype'] = czi.dtype\n\n # check if the CZI image is an RGB image depending\n # on the last dimension entry of axes\n if czi.shape[-1] == 3:\n metadata['czi_isRGB'] = True\n\n try:\n metadata['PixelType'] = metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['PixelType']\n except KeyError as e:\n print('Key not found:', e)\n metadata['PixelType'] = None\n try:\n metadata['SizeX'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeX'])\n except KeyError as e:\n metadata['SizeX'] = None\n try:\n metadata['SizeY'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeY'])\n except KeyError as e:\n metadata['SizeY'] = None\n\n try:\n metadata['SizeZ'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeZ'])\n except Exception as e:\n # print('Exception:', e)\n if dim2none:\n metadata['SizeZ'] = None\n if not dim2none:\n metadata['SizeZ'] = 1\n\n # for special cases do not read the SizeC from the metadata\n if forceDim and forceDimname == 'SizeC':\n metadata[forceDimname] = forceDimvalue\n\n if not forceDim:\n\n try:\n metadata['SizeC'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeC'])\n except Exception as e:\n # print('Exception:', e)\n if dim2none:\n metadata['SizeC'] = None\n if not dim2none:\n metadata['SizeC'] = 1\n\n # create empty lists for channel related information\n channels = []\n channels_names = []\n channels_colors = []\n\n # in case of only one channel\n if metadata['SizeC'] == 1:\n # get name for dye\n try:\n channels.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']\n ['Channels']['Channel']['ShortName'])\n except KeyError as e:\n print('Exception:', e)\n try:\n channels.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']\n ['Channels']['Channel']['DyeName'])\n except KeyError as e:\n print('Exception:', e)\n channels.append('Dye-CH1')\n\n # get channel name\n try:\n channels_names.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']\n ['Channels']['Channel']['Name'])\n except KeyError as e:\n print('Exception:', e)\n channels_names.append['CH1']\n\n # get channel color\n try:\n channels_colors.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']\n ['Channels']['Channel']['Color'])\n except KeyError as e:\n print('Exception:', e)\n channels_colors.append('#80808000')\n\n # in case of two or more channels\n if metadata['SizeC'] > 1:\n # loop over all channels\n for ch in range(metadata['SizeC']):\n # get name for dyes\n try:\n channels.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']\n ['Channels']['Channel'][ch]['ShortName'])\n except KeyError as e:\n print('Exception:', e)\n try:\n channels.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']\n ['Channels']['Channel'][ch]['DyeName'])\n except KeyError as e:\n print('Exception:', e)\n channels.append('Dye-CH' + str(ch))\n\n # get channel names\n try:\n channels_names.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']\n ['Channels']['Channel'][ch]['Name'])\n except KeyError as e:\n print('Exception:', e)\n channels_names.append('CH' + str(ch))\n\n # get channel colors\n try:\n channels_colors.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']\n ['Channels']['Channel'][ch]['Color'])\n except KeyError as e:\n print('Exception:', e)\n # use grayscale instead\n channels_colors.append('80808000')\n\n # write channels information (as lists) into metadata dictionary\n metadata['Channels'] = channels\n metadata['ChannelNames'] = channels_names\n metadata['ChannelColors'] = channels_colors\n\n try:\n metadata['SizeT'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeT'])\n except Exception as e:\n # print('Exception:', e)\n if dim2none:\n metadata['SizeT'] = None\n if not dim2none:\n metadata['SizeT'] = 1\n\n try:\n metadata['SizeM'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeM'])\n except Exception as e:\n # print('Exception:', e)\n if dim2none:\n metadata['SizeM'] = None\n if not dim2none:\n metadata['SizeM'] = 1\n\n try:\n metadata['SizeB'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeB'])\n except Exception as e:\n # print('Exception:', e)\n if dim2none:\n metadata['SizeB'] = None\n if not dim2none:\n metadata['SizeB'] = 1\n\n try:\n metadata['SizeS'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeS'])\n except Exception as e:\n # print('Exception:', e)\n if dim2none:\n metadata['SizeS'] = None\n if not dim2none:\n metadata['SizeS'] = 1\n\n try:\n metadata['SizeH'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeH'])\n except Exception as e:\n # print('Exception:', e)\n if dim2none:\n metadata['SizeH'] = None\n if not dim2none:\n metadata['SizeH'] = 1\n\n try:\n metadata['SizeI'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeI'])\n except Exception as e:\n # print('Exception:', e)\n if dim2none:\n metadata['SizeI'] = None\n if not dim2none:\n metadata['SizeI'] = 1\n\n try:\n metadata['SizeV'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeV'])\n except Exception as e:\n # print('Exception:', e)\n if dim2none:\n metadata['SizeV'] = None\n if not dim2none:\n metadata['SizeV'] = 1\n\n # get the scaling information\n try:\n # metadata['Scaling'] = metadatadict_czi['ImageDocument']['Metadata']['Scaling']\n metadata['XScale'] = float(metadatadict_czi['ImageDocument']['Metadata']['Scaling']['Items']['Distance'][0]['Value']) * 1000000\n metadata['YScale'] = float(metadatadict_czi['ImageDocument']['Metadata']['Scaling']['Items']['Distance'][1]['Value']) * 1000000\n metadata['XScale'] = np.round(metadata['XScale'], 3)\n metadata['YScale'] = np.round(metadata['YScale'], 3)\n try:\n metadata['XScaleUnit'] = metadatadict_czi['ImageDocument']['Metadata']['Scaling']['Items']['Distance'][0]['DefaultUnitFormat']\n metadata['YScaleUnit'] = metadatadict_czi['ImageDocument']['Metadata']['Scaling']['Items']['Distance'][1]['DefaultUnitFormat']\n except KeyError as e:\n print('Key not found:', e)\n metadata['XScaleUnit'] = None\n metadata['YScaleUnit'] = None\n try:\n metadata['ZScale'] = float(metadatadict_czi['ImageDocument']['Metadata']['Scaling']['Items']['Distance'][2]['Value']) * 1000000\n metadata['ZScale'] = np.round(metadata['ZScale'], 3)\n # additional check for faulty z-scaling\n if metadata['ZScale'] == 0.0:\n metadata['ZScale'] = 1.0\n try:\n metadata['ZScaleUnit'] = metadatadict_czi['ImageDocument']['Metadata']['Scaling']['Items']['Distance'][2]['DefaultUnitFormat']\n except KeyError as e:\n print('Key not found:', e)\n metadata['ZScaleUnit'] = metadata['XScaleUnit']\n except Exception as e:\n # print('Exception:', e)\n if dim2none:\n metadata['ZScale'] = None\n metadata['ZScaleUnit'] = None\n if not dim2none:\n # set to isotropic scaling if it was single plane only\n metadata['ZScale'] = metadata['XScale']\n metadata['ZScaleUnit'] = metadata['XScaleUnit']\n except Exception as e:\n print('Exception:', e)\n print('Scaling Data could not be found.')\n\n # try to get software version\n try:\n metadata['SW-Name'] = metadatadict_czi['ImageDocument']['Metadata']['Information']['Application']['Name']\n metadata['SW-Version'] = metadatadict_czi['ImageDocument']['Metadata']['Information']['Application']['Version']\n except (KeyError, TypeError) as e:\n print(e)\n metadata['SW-Name'] = None\n metadata['SW-Version'] = None\n\n try:\n metadata['AcqDate'] = metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['AcquisitionDateAndTime']\n except (KeyError, TypeError) as e:\n print(e)\n metadata['AcqDate'] = None\n\n # get objective data\n try:\n if isinstance(metadatadict_czi['ImageDocument']['Metadata']['Information']['Instrument']['Objectives']['Objective'], list):\n num_obj = len(metadatadict_czi['ImageDocument']['Metadata']['Information']['Instrument']['Objectives']['Objective'])\n else:\n num_obj = 1\n except (KeyError, TypeError) as e:\n print(e)\n num_obj = 1\n\n # if there is only one objective found\n if num_obj == 1:\n try:\n metadata['ObjName'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Objectives']['Objective']['Name'])\n except (KeyError, TypeError) as e:\n print(e)\n metadata['ObjName'].append(None)\n\n try:\n metadata['ObjImmersion'] = metadatadict_czi['ImageDocument']['Metadata']['Information']['Instrument']['Objectives']['Objective']['Immersion']\n except (KeyError, TypeError) as e:\n print(e)\n metadata['ObjImmersion'] = None\n\n try:\n metadata['ObjNA'] = np.float(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Objectives']['Objective']['LensNA'])\n except (KeyError, TypeError) as e:\n print(e)\n metadata['ObjNA'] = None\n\n try:\n metadata['ObjID'] = metadatadict_czi['ImageDocument']['Metadata']['Information']['Instrument']['Objectives']['Objective']['Id']\n except (KeyError, TypeError) as e:\n print(e)\n metadata['ObjID'] = None\n\n try:\n metadata['TubelensMag'] = np.float(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['TubeLenses']['TubeLens']['Magnification'])\n except (KeyError, TypeError) as e:\n print(e, 'Using Default Value = 1.0 for Tublens Magnification.')\n metadata['TubelensMag'] = 1.0\n\n try:\n metadata['ObjNominalMag'] = np.float(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Objectives']['Objective']['NominalMagnification'])\n except (KeyError, TypeError) as e:\n print(e, 'Using Default Value = 1.0 for Nominal Magnification.')\n metadata['ObjNominalMag'] = 1.0\n\n try:\n if metadata['TubelensMag'] is not None:\n metadata['ObjMag'] = metadata['ObjNominalMag'] * metadata['TubelensMag']\n if metadata['TubelensMag'] is None:\n print('No TublensMag found. Use 1 instead')\n metadata['ObjMag'] = metadata['ObjNominalMag'] * 1.0\n\n except (KeyError, TypeError) as e:\n print(e)\n metadata['ObjMag'] = None\n\n if num_obj > 1:\n for o in range(num_obj):\n\n try:\n metadata['ObjName'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Objectives']['Objective'][o]['Name'])\n except KeyError as e:\n print('Key not found:', e)\n metadata['ObjName'].append(None)\n\n try:\n metadata['ObjImmersion'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Objectives']['Objective'][o]['Immersion'])\n except KeyError as e:\n print('Key not found:', e)\n metadata['ObjImmersion'].append(None)\n\n try:\n metadata['ObjNA'].append(np.float(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Objectives']['Objective'][o]['LensNA']))\n except KeyError as e:\n print('Key not found:', e)\n metadata['ObjNA'].append(None)\n\n try:\n metadata['ObjID'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Objectives']['Objective'][o]['Id'])\n except KeyError as e:\n print('Key not found:', e)\n metadata['ObjID'].append(None)\n\n try:\n metadata['TubelensMag'].append(np.float(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['TubeLenses']['TubeLens'][o]['Magnification']))\n except KeyError as e:\n print('Key not found:', e, 'Using Default Value = 1.0 for Tublens Magnification.')\n metadata['TubelensMag'].append(1.0)\n\n try:\n metadata['ObjNominalMag'].append(np.float(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Objectives']['Objective'][o]['NominalMagnification']))\n except KeyError as e:\n print('Key not found:', e, 'Using Default Value = 1.0 for Nominal Magnification.')\n metadata['ObjNominalMag'].append(1.0)\n\n try:\n if metadata['TubelensMag'] is not None:\n metadata['ObjMag'].append(metadata['ObjNominalMag'][o] * metadata['TubelensMag'][o])\n if metadata['TubelensMag'] is None:\n print('No TublensMag found. Use 1 instead')\n metadata['ObjMag'].append(metadata['ObjNominalMag'][o] * 1.0)\n\n except KeyError as e:\n print('Key not found:', e)\n metadata['ObjMag'].append(None)\n\n # get detector information\n\n # check if there are any detector entries inside the dictionary\n if pydash.objects.has(metadatadict_czi, ['ImageDocument', 'Metadata', 'Information', 'Instrument', 'Detectors']):\n\n if isinstance(metadatadict_czi['ImageDocument']['Metadata']['Information']['Instrument']['Detectors']['Detector'], list):\n num_detectors = len(metadatadict_czi['ImageDocument']['Metadata']['Information']['Instrument']['Detectors']['Detector'])\n else:\n num_detectors = 1\n\n # if there is only one detector found\n if num_detectors == 1:\n\n # check for detector ID\n try:\n metadata['DetectorID'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Detectors']['Detector']['Id'])\n except KeyError as e:\n metadata['DetectorID'].append(None)\n\n # check for detector Name\n try:\n metadata['DetectorName'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Detectors']['Detector']['Name'])\n except KeyError as e:\n metadata['DetectorName'].append(None)\n\n # check for detector model\n try:\n metadata['DetectorModel'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Detectors']['Detector']['Manufacturer']['Model'])\n except KeyError as e:\n metadata['DetectorModel'].append(None)\n\n # check for detector type\n try:\n metadata['DetectorType'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Detectors']['Detector']['Type'])\n except KeyError as e:\n metadata['DetectorType'].append(None)\n\n if num_detectors > 1:\n for d in range(num_detectors):\n\n # check for detector ID\n try:\n metadata['DetectorID'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Detectors']['Detector'][d]['Id'])\n except KeyError as e:\n metadata['DetectorID'].append(None)\n\n # check for detector Name\n try:\n metadata['DetectorName'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Detectors']['Detector'][d]['Name'])\n except KeyError as e:\n metadata['DetectorName'].append(None)\n\n # check for detector model\n try:\n metadata['DetectorModel'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Detectors']['Detector'][d]['Manufacturer']['Model'])\n except KeyError as e:\n metadata['DetectorModel'].append(None)\n\n # check for detector type\n try:\n metadata['DetectorType'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']\n ['Instrument']['Detectors']['Detector'][d]['Type'])\n except KeyError as e:\n metadata['DetectorType'].append(None)\n\n # check for well information\n metadata['Well_ArrayNames'] = []\n metadata['Well_Indices'] = []\n metadata['Well_PositionNames'] = []\n metadata['Well_ColId'] = []\n metadata['Well_RowId'] = []\n metadata['WellCounter'] = None\n metadata['SceneStageCenterX'] = []\n metadata['SceneStageCenterY'] = []\n\n try:\n print('Trying to extract Scene and Well information if existing ...')\n # extract well information from the dictionary\n allscenes = metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['Dimensions']['S']['Scenes']['Scene']\n\n # loop over all detected scenes\n for s in range(metadata['SizeS']):\n\n if metadata['SizeS'] == 1:\n well = allscenes\n try:\n metadata['Well_ArrayNames'].append(allscenes['ArrayName'])\n except KeyError as e:\n # print('Key not found in Metadata Dictionary:', e)\n try:\n metadata['Well_ArrayNames'].append(well['Name'])\n except KeyError as e:\n print('Key not found in Metadata Dictionary:', e, 'Using A1 instead')\n metadata['Well_ArrayNames'].append('A1')\n\n try:\n metadata['Well_Indices'].append(allscenes['Index'])\n except KeyError as e:\n print('Key not found in Metadata Dictionary:', e)\n metadata['Well_Indices'].append(1)\n\n try:\n metadata['Well_PositionNames'].append(allscenes['Name'])\n except KeyError as e:\n print('Key not found in Metadata Dictionary:', e)\n metadata['Well_PositionNames'].append('P1')\n\n try:\n metadata['Well_ColId'].append(np.int(allscenes['Shape']['ColumnIndex']))\n except KeyError as e:\n print('Key not found in Metadata Dictionary:', e)\n metadata['Well_ColId'].append(0)\n\n try:\n metadata['Well_RowId'].append(np.int(allscenes['Shape']['RowIndex']))\n except KeyError as e:\n print('Key not found in Metadata Dictionary:', e)\n metadata['Well_RowId'].append(0)\n\n try:\n # count the content of the list, e.g. how many time a certain well was detected\n metadata['WellCounter'] = Counter(metadata['Well_ArrayNames'])\n except KeyError as e:\n print('Key not found in Metadata Dictionary:', e)\n metadata['WellCounter'].append(Counter({'A1': 1}))\n\n try:\n # get the SceneCenter Position\n sx = allscenes['CenterPosition'].split(',')[0]\n sy = allscenes['CenterPosition'].split(',')[1]\n metadata['SceneStageCenterX'].append(np.double(sx))\n metadata['SceneStageCenterY'].append(np.double(sy))\n except KeyError as e:\n metadata['SceneStageCenterX'].append(0.0)\n metadata['SceneStageCenterY'].append(0.0)\n\n if metadata['SizeS'] > 1:\n try:\n well = allscenes[s]\n metadata['Well_ArrayNames'].append(well['ArrayName'])\n except KeyError as e:\n # print('Key not found in Metadata Dictionary:', e)\n try:\n metadata['Well_ArrayNames'].append(well['Name'])\n except KeyError as e:\n print('Key not found in Metadata Dictionary:', e, 'Using A1 instead')\n metadata['Well_ArrayNames'].append('A1')\n\n # get the well information\n try:\n metadata['Well_Indices'].append(well['Index'])\n except KeyError as e:\n # print('Key not found in Metadata Dictionary:', e)\n metadata['Well_Indices'].append(None)\n try:\n metadata['Well_PositionNames'].append(well['Name'])\n except KeyError as e:\n # print('Key not found in Metadata Dictionary:', e)\n metadata['Well_PositionNames'].append(None)\n\n try:\n metadata['Well_ColId'].append(np.int(well['Shape']['ColumnIndex']))\n except KeyError as e:\n print('Key not found in Metadata Dictionary:', e)\n metadata['Well_ColId'].append(None)\n\n try:\n metadata['Well_RowId'].append(np.int(well['Shape']['RowIndex']))\n except KeyError as e:\n print('Key not found in Metadata Dictionary:', e)\n metadata['Well_RowId'].append(None)\n\n # count the content of the list, e.g. how many time a certain well was detected\n metadata['WellCounter'] = Counter(metadata['Well_ArrayNames'])\n\n # try:\n if isinstance(allscenes, list):\n try:\n # get the SceneCenter Position\n sx = allscenes[s]['CenterPosition'].split(',')[0]\n sy = allscenes[s]['CenterPosition'].split(',')[1]\n metadata['SceneStageCenterX'].append(np.double(sx))\n metadata['SceneStageCenterY'].append(np.double(sy))\n except KeyError as e:\n print('Key not found in Metadata Dictionary:', e)\n metadata['SceneCenterX'].append(0.0)\n metadata['SceneCenterY'].append(0.0)\n if not isinstance(allscenes, list):\n metadata['SceneStageCenterX'].append(0.0)\n metadata['SceneStageCenterY'].append(0.0)\n\n # count the number of different wells\n metadata['NumWells'] = len(metadata['WellCounter'].keys())\n\n except (KeyError, TypeError) as e:\n print('No valid Scene or Well information found:', e)\n\n # close CZI file\n czi.close()\n\n # close AICSImage object\n czi_aics.close()\n\n # convert scale unit tom avoid encoding problems\n if convert_scunit:\n if metadata['XScaleUnit'] == 'µm':\n metadata['XScaleUnit'] = 'micron'\n if metadata['YScaleUnit'] == 'µm':\n metadata['YScaleUnit'] = 'micron'\n if metadata['ZScaleUnit'] == 'µm':\n metadata['ZScaleUnit'] = 'micron'\n\n # imwrite(filename, data, description='micron \\xB5'.encode('latin-1')))\n\n return metadata\n\n\ndef get_additional_metadata_czi(filename):\n \"\"\"\n Returns a dictionary with additional CZI metadata.\n\n :param filename: filename of the CZI image\n :type filename: str\n :return: additional_czimd - dictionary with additional CZI metainformation\n :rtype: dict\n \"\"\"\n\n # get CZI object and read array\n czi = zis.CziFile(filename)\n\n # parse the XML into a dictionary\n metadatadict_czi = xmltodict.parse(czi.metadata())\n additional_czimd = {}\n\n try:\n additional_czimd['Experiment'] = metadatadict_czi['ImageDocument']['Metadata']['Experiment']\n except KeyError as e:\n print('Key not found:', e)\n additional_czimd['Experiment'] = None\n\n try:\n additional_czimd['HardwareSetting'] = metadatadict_czi['ImageDocument']['Metadata']['HardwareSetting']\n except KeyError as e:\n print('Key not found:', e)\n additional_czimd['HardwareSetting'] = None\n\n try:\n additional_czimd['CustomAttributes'] = metadatadict_czi['ImageDocument']['Metadata']['CustomAttributes']\n except KeyError as e:\n print('Key not found:', e)\n additional_czimd['CustomAttributes'] = None\n\n try:\n additional_czimd['DisplaySetting'] = metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']\n except KeyError as e:\n print('Key not found:', e)\n additional_czimd['DisplaySetting'] = None\n\n try:\n additional_czimd['Layers'] = metadatadict_czi['ImageDocument']['Metadata']['Layers']\n except KeyError as e:\n print('Key not found:', e)\n additional_czimd['Layers'] = None\n\n # close CZI file\n czi.close()\n\n return additional_czimd\n\n\ndef md2dataframe(metadata, paramcol='Parameter', keycol='Value'):\n \"\"\"Convert the metadata dictionary to a Pandas DataFrame.\n\n :param metadata: MeteData dictionary\n :type metadata: dict\n :param paramcol: Name of Columns for the MetaData Parameters, defaults to 'Parameter'\n :type paramcol: str, optional\n :param keycol: Name of Columns for the MetaData Values, defaults to 'Value'\n :type keycol: str, optional\n :return: Pandas DataFrame containing all the metadata\n :rtype: Pandas.DataFrame\n \"\"\"\n mdframe = pd.DataFrame(columns=[paramcol, keycol])\n\n for k in metadata.keys():\n d = {'Parameter': k, 'Value': metadata[k]}\n df = pd.DataFrame([d], index=[0])\n mdframe = pd.concat([mdframe, df], ignore_index=True)\n\n return mdframe\n\n\ndef get_dimorder(dimstring):\n \"\"\"Get the order of dimensions from dimension string\n\n :param dimstring: string containing the dimensions\n :type dimstring: str\n :return: dims_dict - dictionary with the dimensions and its positions\n :rtype: dict\n :return: dimindex_list - list with indices of dimensions\n :rtype: list\n :return: numvalid_dims - number of valid dimensions\n :rtype: integer\n \"\"\"\n\n dimindex_list = []\n dims = ['R', 'I', 'M', 'H', 'V', 'B', 'S', 'T', 'C', 'Z', 'Y', 'X', '0']\n dims_dict = {}\n\n # loop over all dimensions and find the index\n for d in dims:\n\n dims_dict[d] = dimstring.find(d)\n dimindex_list.append(dimstring.find(d))\n\n # check if a dimension really exists\n numvalid_dims = sum(i > 0 for i in dimindex_list)\n\n return dims_dict, dimindex_list, numvalid_dims\n\n\ndef get_array_czi(filename,\n replace_value=False,\n remove_HDim=True,\n return_addmd=False,\n forceDim=False,\n forceDimname='SizeC',\n forceDimvalue=2):\n \"\"\"Get the pixel data of the CZI file as multidimensional NumPy.Array\n\n :param filename: filename of the CZI file\n :type filename: str\n :param replacevalue: replace arrays entries with a specific value with NaN, defaults to False\n :type replacevalue: bool, optional\n :param remove_HDim: remove the H-Dimension (Airy Scan Detectors), defaults to True\n :type remove_HDim: bool, optional\n :param return_addmd: read the additional metadata, defaults to False\n :type return_addmd: bool, optional\n :param forceDim: force a specfic dimension to have a specif value, defaults to False\n :type forceDim: bool, optional\n :param forceDimname: name of the dimension, defaults to 'SizeC'\n :type forceDimname: str, optional\n :param forceDimvalue: value of the dimension, defaults to 2\n :type forceDimvalue: int, optional\n :return: cziarray - dictionary with the dimensions and its positions\n :rtype: NumPy.Array\n :return: metadata - dictionary with CZI metadata\n :rtype: dict\n :return: additional_metadata_czi - dictionary with additional CZI metadata\n :rtype: dict\n \"\"\"\n\n metadata = get_metadata_czi(filename,\n forceDim=forceDim,\n forceDimname=forceDimname,\n forceDimvalue=forceDimvalue)\n\n # get additional metainformation\n additional_metadata_czi = get_additional_metadata_czi(filename)\n\n # get CZI object and read array\n czi = zis.CziFile(filename)\n cziarray = czi.asarray()\n\n # check for H dimension and remove\n if remove_HDim and metadata['Axes_czifile'][0] == 'H':\n # metadata['Axes'] = metadata['Axes_czifile'][1:]\n metadata['Axes_czifile'] = metadata['Axes_czifile'].replace('H', '')\n cziarray = np.squeeze(cziarray, axis=0)\n\n # get additional information about dimension order etc.\n dim_dict, dim_list, numvalid_dims = get_dimorder(metadata['Axes_czifile'])\n metadata['DimOrder CZI'] = dim_dict\n\n if cziarray.shape[-1] == 3:\n pass\n else:\n # remove the last dimension from the end\n cziarray = np.squeeze(cziarray, axis=len(metadata['Axes_czifile']) - 1)\n metadata['Axes_czifile'] = metadata['Axes_czifile'].replace('0', '')\n\n if replace_value:\n cziarray = replace_value(cziarray, value=0)\n\n # close czi file\n czi.close()\n\n return cziarray, metadata, additional_metadata_czi\n\n\ndef replace_value(data, value=0):\n \"\"\"Replace specifc values in array with NaN\n\n :param data: Array where values should be replaced\n :type data: NumPy.Array\n :param value: value inside array to be replaced with NaN, defaults to 0\n :type value: int, optional\n :return: array with new values\n :rtype: NumPy.Array\n \"\"\"\n\n data = data.astype('float')\n data[data == value] = np.nan\n\n return data\n\n\ndef get_scalefactor(metadata):\n \"\"\"Add scaling factors to the metadata dictionary\n\n :param metadata: dictionary with CZI or OME-TIFF metadata\n :type metadata: dict\n :return: dictionary with additional keys for scling factors\n :rtype: dict\n \"\"\"\n\n # set default scale factor to 1.0\n scalefactors = {'xy': 1.0,\n 'zx': 1.0\n }\n\n try:\n # get the factor between XY scaling\n scalefactors['xy'] = np.round(metadata['XScale'] / metadata['YScale'], 3)\n # get the scalefactor between XZ scaling\n scalefactors['zx'] = np.round(metadata['ZScale'] / metadata['YScale'], 3)\n except KeyError as e:\n print('Key not found: ', e, 'Using defaults = 1.0')\n\n return scalefactors\n\n\ndef calc_scaling(data, corr_min=1.0,\n offset_min=0,\n corr_max=0.85,\n offset_max=0):\n \"\"\"[summary]\n\n :param data: Calculate min / max scaling\n :type data: Numpy.Array\n :param corr_min: correction factor for minvalue, defaults to 1.0\n :type corr_min: float, optional\n :param offset_min: offset for min value, defaults to 0\n :type offset_min: int, optional\n :param corr_max: correction factor for max value, defaults to 0.85\n :type corr_max: float, optional\n :param offset_max: offset for max value, defaults to 0\n :type offset_max: int, optional\n :return: list with [minvalue, maxvalue]\n :rtype: list\n \"\"\"\n\n # get min-max values for initial scaling\n minvalue = np.round((data.min() + offset_min) * corr_min)\n maxvalue = np.round((data.max() + offset_max) * corr_max)\n print('Scaling: ', minvalue, maxvalue)\n\n return [minvalue, maxvalue]\n\n\ndef show_napari(array, metadata,\n blending='additive',\n gamma=0.85,\n add_mdtable=True,\n rename_sliders=False,\n use_BFdims=False):\n \"\"\"Show the multidimensional array using the Napari viewer\n\n :param array: multidimensional NumPy.Array containing the pixeldata\n :type array: NumPy.Array\n :param metadata: dictionary with CZI or OME-TIFF metadata\n :type metadata: dict\n :param blending: NapariViewer option for blending, defaults to 'additive'\n :type blending: str, optional\n :param gamma: NapariViewer value for Gamma, defaults to 0.85\n :type gamma: float, optional\n :param rename_sliders: name slider with correct labels output, defaults to False\n :type verbose: bool, optional\n :param use_BFdims: if True use the 5D dimension string from BioFormats or apeer-ometiff library\n and if False use 6D dimension string from AICSImageIO.\n Only use when the image is read via apeer-ometiff-library etc., defaults to False\n :type verbose: bool, optional\n \"\"\"\n\n # create list for the napari layers\n napari_layers = []\n\n with napari.gui_qt():\n\n # create scalefcator with all ones\n scalefactors = [1.0] * len(array.shape)\n\n # extra check for czi to avoid user mistakes\n if metadata['ImageType'] == 'czi':\n use_BFdims = False\n\n if use_BFdims:\n # use the dimension string from BioFormats 5D\n dimpos = get_dimpositions(metadata['DimOrder BF Array'])\n\n if not use_BFdims:\n # use the dimension string from AICSImageIO 6D (default)\n dimpos = get_dimpositions(metadata['Axes_aics'])\n\n # get the scalefactors from the metadata\n scalef = get_scalefactor(metadata)\n\n # modify the tuple for the scales for napari\n scalefactors[dimpos['Z']] = scalef['zx']\n # remove C dimension from scalefactor\n scalefactors_ch = scalefactors.copy()\n del scalefactors_ch[dimpos['C']]\n\n # initialize the napari viewer\n print('Initializing Napari Viewer ...')\n\n # create a viewer and add some images\n viewer = napari.Viewer()\n\n # add widget for metadata\n if add_mdtable:\n\n # create widget for the metadata\n mdbrowser = TableWidget()\n\n viewer.window.add_dock_widget(mdbrowser,\n name='mdbrowser',\n area='right')\n\n # add the metadata and adapt the table display\n mdbrowser.update_metadata(metadata)\n mdbrowser.update_style()\n\n if metadata['SizeC'] > 1:\n\n # add all channels as layers\n for ch in range(metadata['SizeC']):\n\n try:\n # get the channel name\n chname = metadata['Channels'][ch]\n except KeyError as e:\n print(e)\n # or use CH1 etc. as string for the name\n chname = 'CH' + str(ch + 1)\n\n # cut out channel\n # use dask if array is a dask.array\n if isinstance(array, da.Array):\n print('Extract Channel as Dask.Array')\n channel = array.compute().take(ch, axis=dimpos['C'])\n #new_dimstring = metadata['Axes_aics'].replace('C', '')\n\n else:\n # use normal numpy if not\n print('Extract Channel as NumPy.Array')\n channel = array.take(ch, axis=dimpos['C'])\n if use_BFdims:\n new_dimstring = metadata['DimOrder BF Array'].replace('C', '')\n\n if not use_BFdims:\n new_dimstring = metadata['Axes_aics'].replace('C', '')\n\n # actually show the image array\n print('Adding Channel : ', chname)\n print('Shape Channel : ', ch, channel.shape)\n print('Scaling Factors : ', scalefactors_ch)\n\n # get min-max values for initial scaling\n clim = calc_scaling(channel,\n corr_min=1.0,\n offset_min=0,\n corr_max=0.85,\n offset_max=0)\n\n # add channel to napari viewer\n new_layer = viewer.add_image(channel,\n name=chname,\n scale=scalefactors_ch,\n contrast_limits=clim,\n blending=blending,\n gamma=gamma)\n\n napari_layers.append(new_layer)\n\n if metadata['SizeC'] == 1:\n\n # just add one channel as a layer\n try:\n # get the channel name\n chname = metadata['Channels'][0]\n except KeyError:\n # or use CH1 etc. as string for the name\n chname = 'CH' + str(ch + 1)\n\n # actually show the image array\n print('Adding Channel: ', chname)\n print('Scaling Factors: ', scalefactors)\n\n # use dask if array is a dask.array\n if isinstance(array, da.Array):\n print('Extract Channel using Dask.Array')\n array = array.compute()\n\n # get min-max values for initial scaling\n clim = calc_scaling(array)\n\n # add layer to Napari viewer\n new_layer = viewer.add_image(array,\n name=chname,\n scale=scalefactors,\n contrast_limits=clim,\n blending=blending,\n gamma=gamma)\n\n napari_layers.append(new_layer)\n\n if rename_sliders:\n\n print('Renaming the Sliders based on the Dimension String ....')\n\n if metadata['SizeC'] == 1:\n\n # get the position of dimension entries after removing C dimension\n dimpos_viewer = get_dimpositions(metadata['Axes_aics'])\n\n # get the label of the sliders\n sliders = viewer.dims.axis_labels\n\n # update the labels with the correct dimension strings\n slidernames = ['B', 'S', 'T', 'Z', 'C']\n\n if metadata['SizeC'] > 1:\n\n new_dimstring = metadata['Axes_aics'].replace('C', '')\n\n # get the position of dimension entries after removing C dimension\n dimpos_viewer = get_dimpositions(new_dimstring)\n\n # get the label of the sliders\n sliders = viewer.dims.axis_labels\n\n # update the labels with the correct dimension strings\n slidernames = ['B', 'S', 'T', 'Z']\n\n for s in slidernames:\n if dimpos_viewer[s] >= 0:\n sliders[dimpos_viewer[s]] = s\n\n # apply the new labels to the viewer\n viewer.dims.axis_labels = sliders\n\n return napari_layers\n\n\ndef check_for_previewimage(czi):\n \"\"\"Check if the CZI contains an image from a prescan camera\n\n :param czi: CZI imagefile object\n :type metadata: CziFile object\n :return: has_attimage - Boolean if CZI image contains prescan image\n :rtype: bool\n \"\"\"\n\n att = []\n\n # loop over the attachments\n for attachment in czi.attachments():\n entry = attachment.attachment_entry\n print(entry.name)\n att.append(entry.name)\n\n has_attimage = False\n\n # check for the entry \"SlidePreview\"\n if 'SlidePreview' in att:\n has_attimage = True\n\n return has_attimage\n\n\ndef writexml_czi(filename, xmlsuffix='_CZI_MetaData.xml'):\n \"\"\"Write XML imformation of CZI to disk\n\n :param filename: CZI image filename\n :type filename: str\n :param xmlsuffix: suffix for the XML file that will be created, defaults to '_CZI_MetaData.xml'\n :type xmlsuffix: str, optional\n :return: filename of the XML file\n :rtype: str\n \"\"\"\n\n # open czi file and get the metadata\n czi = zis.CziFile(filename)\n mdczi = czi.metadata()\n czi.close()\n\n # change file name\n xmlfile = filename.replace('.czi', xmlsuffix)\n\n # get tree from string\n tree = ET.ElementTree(ET.fromstring(mdczi))\n\n # write XML file to same folder\n tree.write(xmlfile, encoding='utf-8', method='xml')\n\n return xmlfile\n\n\ndef writexml_ometiff(filename, xmlsuffix='_OMETIFF_MetaData.xml'):\n \"\"\"Write XML imformation of OME-TIFF to disk\n\n :param filename: OME-TIFF image filename\n :type filename: str\n :param xmlsuffix: suffix for the XML file that will be created, defaults to '_OMETIFF_MetaData.xml'\n :type xmlsuffix: str, optional\n :return: filename of the XML file\n :rtype: str\n \"\"\"\n\n if filename.lower().endswith('.ome.tiff'):\n ext = '.ome.tiff'\n if filename.lower().endswith('.ome.tif'):\n ext = '.ome.tif'\n\n with tifffile.TiffFile(filename) as tif:\n omexml_string = tif.ome_metadata\n\n # get tree from string\n tree = ET.ElementTree(ET.fromstring(omexml_string.encode('utf-8')))\n\n # change file name\n xmlfile = filename.replace(ext, xmlsuffix)\n\n tree.write(xmlfile, encoding='utf-8', method='xml', pretty_print=True)\n print('Created OME-XML file for testdata: ', filename)\n\n return xmlfile\n\n\ndef getImageSeriesIDforWell(welllist, wellID):\n \"\"\"\n Returns all ImageSeries (for OME-TIFF) indicies for a specific wellID\n\n :param welllist: list containing all wellIDs as stringe, e.g. '[B4, B4, B4, B4, B5, B5, B5, B5]'\n :type welllist: list\n :param wellID: string specifying the well, eg.g. 'B4'\n :type wellID: str\n :return: imageseriesindices - list containing all ImageSeries indices, which correspond the the well\n :rtype: list\n \"\"\"\n\n imageseries_indices = [i for i, x in enumerate(welllist) if x == wellID]\n\n return imageseries_indices\n\n\ndef addzeros(number):\n \"\"\"Convert a number into a string and add leading zeros.\n Typically used to construct filenames with equal lengths.\n\n :param number: the number\n :type number: int\n :return: zerostring - string with leading zeros\n :rtype: str\n \"\"\"\n\n if number < 10:\n zerostring = '0000' + str(number)\n if number >= 10 and number < 100:\n zerostring = '000' + str(number)\n if number >= 100 and number < 1000:\n zerostring = '00' + str(number)\n if number >= 1000 and number < 10000:\n zerostring = '0' + str(number)\n\n return zerostring\n\n\ndef write_ometiff(filepath, img,\n scalex=0.1,\n scaley=0.1,\n scalez=1.0,\n dimorder='TZCYX',\n pixeltype=np.uint16,\n swapxyaxes=True,\n series=1):\n \"\"\"ONLY FOR INTERNAL TESTING - DO NOT USE!\n\n This function will write an OME-TIFF file to disk.\n The out 6D array has the following dimension order:\n\n [T, Z, C, Y, X] if swapxyaxes = True\n\n [T, Z, C, X, Y] if swapxyaxes = False\n \"\"\"\n\n # Dimension STZCXY\n if swapxyaxes:\n # swap xy to write the OME-Stack with the correct shape\n SizeT = img.shape[0]\n SizeZ = img.shape[1]\n SizeC = img.shape[2]\n SizeX = img.shape[4]\n SizeY = img.shape[3]\n\n if not swapxyaxes:\n SizeT = img.shape[0]\n SizeZ = img.shape[1]\n SizeC = img.shape[2]\n SizeX = img.shape[3]\n SizeY = img.shape[4]\n\n # Getting metadata info\n omexml = bioformats.omexml.OMEXML()\n omexml.image(series - 1).Name = filepath\n\n for s in range(series):\n p = omexml.image(s).Pixels\n p.ID = str(s)\n p.SizeX = SizeX\n p.SizeY = SizeY\n p.SizeC = SizeC\n p.SizeT = SizeT\n p.SizeZ = SizeZ\n p.PhysicalSizeX = np.float(scalex)\n p.PhysicalSizeY = np.float(scaley)\n p.PhysicalSizeZ = np.float(scalez)\n if pixeltype == np.uint8:\n p.PixelType = 'uint8'\n if pixeltype == np.uint16:\n p.PixelType = 'uint16'\n p.channel_count = SizeC\n p.plane_count = SizeZ * SizeT * SizeC\n p = writeOMETIFFplanes(p, SizeT=SizeT, SizeZ=SizeZ, SizeC=SizeC, order=dimorder)\n\n for c in range(SizeC):\n # if pixeltype == 'unit8':\n if pixeltype == np.uint8:\n p.Channel(c).SamplesPerPixel = 1\n\n if pixeltype == np.uint16:\n p.Channel(c).SamplesPerPixel = 2\n\n omexml.structured_annotations.add_original_metadata(bioformats.omexml.OM_SAMPLES_PER_PIXEL, str(SizeC))\n\n # Converting to omexml\n xml = omexml.to_xml(encoding='utf-8')\n\n # write file and save OME-XML as description\n tifffile.imwrite(filepath, img, metadata={'axes': dimorder}, description=xml)\n\n return filepath\n\n\ndef writeOMETIFFplanes(pixel, SizeT=1, SizeZ=1, SizeC=1, order='TZCXY', verbose=False):\n \"\"\"ONLY FOR INTERNAL TESTING - DO NOT USE!\n\n \"\"\"\n if order == 'TZCYX' or order == 'TZCXY':\n\n pixel.DimensionOrder = bioformats.omexml.DO_XYCZT\n counter = 0\n for t in range(SizeT):\n for z in range(SizeZ):\n for c in range(SizeC):\n\n if verbose:\n print('Write PlaneTable: ', t, z, c),\n sys.stdout.flush()\n\n pixel.Plane(counter).TheT = t\n pixel.Plane(counter).TheZ = z\n pixel.Plane(counter).TheC = c\n counter = counter + 1\n\n return pixel\n\n\ndef write_ometiff_aicsimageio(savepath, imgarray, metadata,\n reader='aicsimageio',\n overwrite=False):\n \"\"\"Write an OME-TIFF file from an image array based on the metadata.\n\n :param filepath: savepath of the OME-TIFF stack\n :type filepath: str\n :param imgarray: multi-dimensional image array\n :type imgarray: NumPy.Array\n :param metadata: metadata dictionary with the required information\n to create an correct OME-TIFF file\n :type metadata: dict\n :param reader: string (aicsimagio or czifile) specifying\n the used reader, defaults to aicsimageio\n :type metadata: str\n :param overwrite: option to overwrite an existing OME-TIFF, defaults to False\n :type overwrite: bool, optional\n \"\"\"\n\n # define scaling from metadata or use defualt scaling\n try:\n pixels_physical_size = [metadata['XScale'],\n metadata['YScale'],\n metadata['ZScale']]\n except KeyError as e:\n print('Key not found:', e)\n print('Use default scaling XYZ=1.0')\n pixels_physical_size = [1.0, 1.0, 1.0]\n\n # define channel names list from metadata\n try:\n channel_names = []\n for ch in metadata['Channels']:\n channel_names.append(ch)\n except KeyError as e:\n print('Key not found:', e)\n channel_names = None\n\n # get the dimensions and their position inside the dimension string\n if reader == 'aicsimageio':\n\n dims_dict, dimindex_list, numvalid_dims = get_dimorder(metadata['Axes_aics'])\n\n # if the array has more than 5 dimensions then remove the S dimension\n # because it is not supported by OME-TIFF\n if len(imgarray.shape) > 5:\n try:\n imgarray = np.squeeze(imgarray, axis=dims_dict['S'])\n except Exception:\n print('Could not remover S Dimension from string.)')\n\n # remove the S character from the dimension string\n new_dimorder = metadata['Axes_aics'].replace('S', '')\n\n if reader == 'czifile':\n\n new_dimorder = metadata['Axes']\n dims_dict, dimindex_list, numvalid_dims = get_dimorder(metadata['Axes'])\n \"\"\"\n '0': 'Sample', # e.g. RGBA\n 'X': 'Width',\n 'Y': 'Height',\n 'C': 'Channel',\n 'Z': 'Slice', # depth\n 'T': 'Time',\n 'R': 'Rotation',\n 'S': 'Scene', # contiguous regions of interest in a mosaic image\n 'I': 'Illumination', # direction\n 'B': 'Block', # acquisition\n 'M': 'Mosaic', # index of tile for compositing a scene\n 'H': 'Phase', # e.g. Airy detector fibers\n 'V': 'View', # e.g. for SPIM\n \"\"\"\n\n to_remove = []\n\n # list of unspupported dims for writing an OME-TIFF\n dims = ['R', 'I', 'M', 'H', 'V', 'B', 'S', '0']\n\n for dim in dims:\n if dims_dict[dim] >= 0:\n # remove the CZI DIMENSION character from the dimension string\n new_dimorder = new_dimorder.replace(dim, '')\n # add dimension index to the list of axis to be removed\n to_remove.append(dims_dict[dim])\n print('Remove Dimension:', dim)\n\n # create tuple with dimensions to be removed\n dims2remove = tuple(to_remove)\n # remove dimensions from array\n imgarray = np.squeeze(imgarray, axis=dims2remove)\n\n # write the array as an OME-TIFF incl. the metadata\n try:\n with ome_tiff_writer.OmeTiffWriter(savepath, overwrite_file=overwrite) as writer:\n writer.save(imgarray,\n channel_names=channel_names,\n ome_xml=None,\n image_name=os.path.basename((savepath)),\n pixels_physical_size=pixels_physical_size,\n channel_colors=None,\n dimension_order=new_dimorder)\n writer.close()\n except Exception as error:\n print(error.__class__.__name__ + \": \" + error.msg)\n print('Could not write OME-TIFF')\n savepath = None\n\n return savepath\n\n\ndef correct_omeheader(omefile,\n old=(\"2012-03\", \"2013-06\", r\"ome/2016-06\"),\n new=(\"2016-06\", \"2016-06\", r\"OME/2016-06\")\n ):\n \"\"\"This function is actually a workaround for AICSImageIO<=3.1.4 that\n correct some incorrect namespaces inside the OME-XML header\n\n :param omefile: OME-TIFF image file\n :type omefile: string\n :param old: strings that should be corrected, defaults to (\"2012-03\", \"2013-06\", r\"ome/2016-06\")\n :type old: tuple, optional\n :param new: replacement for the strings to be corrected, defaults to (\"2016-06\", \"2016-06\", r\"OME/2016-06\")\n :type new: tuple, optional\n \"\"\"\n\n # create the tif object from the filename\n tif = tifffile.TiffFile(omefile)\n\n # get the pixel array and the OME-XML string\n array = tif.asarray()\n omexml_string = tif.ome_metadata\n\n # search for the strings to be replaced and do it\n for ostr, nstr in zip(old, new):\n print('Replace: ', ostr, 'with', nstr)\n omexml_string = omexml_string.replace(ostr, nstr)\n\n # save the file with the new, correct strings\n tifffile.imsave(omefile, array,\n photometric='minisblack',\n description=omexml_string)\n\n # close tif object\n tif.close()\n\n print('Updated OME Header.')\n\n\ndef get_fname_woext(filepath):\n \"\"\"Get the complete path of a file without the extension\n It alos will works for extensions like c:\\myfile.abc.xyz\n The output will be: c:\\myfile\n\n :param filepath: complete fiepath\n :type filepath: str\n :return: complete filepath without extension\n :rtype: str\n \"\"\"\n # create empty string\n real_extension = ''\n\n # get all part of the file extension\n sufs = Path(filepath).suffixes\n for s in sufs:\n real_extension = real_extension + s\n\n # remover real extension from filepath\n filepath_woext = filepath.replace(real_extension, '')\n\n return filepath_woext\n\n\ndef convert_to_ometiff(imagefilepath,\n bftoolsdir='/Users/bftools',\n czi_include_attachments=False,\n czi_autostitch=True,\n verbose=True):\n \"\"\"Convert image file using bfconvert tool into a OME-TIFF from with a python script.\n\n :param imagefilepath: path to imagefile\n :type imagefilepath: str\n :param bftoolsdir: bftools directory containing the bfconvert, defaults to '/Users/bftools'\n :type bftoolsdir: str, optional\n :param czi_include_attachments: option convert a CZI attachment (if CZI), defaults to False\n :type czi_include_attachments: bool, optional\n :param czi_autostitch: option stich a CZI, defaults to True\n :type czi_autostitch: bool, optional\n :param verbose: show additional output, defaults to True\n :type verbose: bool, optional\n :return: fileparh of created OME-TIFF file\n :rtype: str\n \"\"\"\n # check if path exits\n if not os.path.exists(bftoolsdir):\n print('No bftools dirctory found. Nothing will be converted')\n file_ometiff = None\n\n if os.path.exists(bftoolsdir):\n\n # set working dir\n os.chdir(bftoolsdir)\n\n # get the imagefile path without extension\n imagefilepath_woext = get_fname_woext(imagefilepath)\n\n # create imagefile path for OME-TIFF\n file_ometiff = imagefilepath_woext + '.ome.tiff'\n\n # create cmdstring for CZI files- mind the spaces !!!\n if imagefilepath.lower().endswith('.czi'):\n\n # configure the CZI options\n if czi_include_attachments:\n czi_att = 'true'\n if not czi_include_attachments:\n czi_att = 'false'\n\n if czi_autostitch:\n czi_stitch = 'true'\n if not czi_autostitch:\n czi_stitch = 'false'\n\n # create cmdstring - mind the spaces !!!\n cmdstring = 'bfconvert -no-upgrade -option zeissczi.attachments ' + czi_att + ' -option zeissczi.autostitch ' + \\\n czi_stitch + ' \"' + imagefilepath + '\" \"' + file_ometiff + '\"'\n\n else:\n # create cmdstring for non-CZIs- mind the spaces !!!\n cmdstring = 'bfconvert -no-upgrade' + ' \"' + imagefilepath + '\" \"' + file_ometiff + '\"'\n\n if verbose:\n print('Original ImageFile : ', imagefilepath_woext)\n print('ImageFile OME.TIFF : ', file_ometiff)\n print('Use CMD : ', cmdstring)\n\n # run the bfconvert tool with the specified parameters\n os.system(cmdstring)\n print('Done.')\n\n return file_ometiff\n\n\ndef get_dimpositions(dimstring, tocheck=['B', 'S', 'T', 'Z', 'C']):\n \"\"\"Simple function to get the indices of the dimension identifiers in a string\n\n :param dimstring: dimension string\n :type dimstring: str\n :param tocheck: list of entries to check, defaults to ['B', 'S', 'T', 'Z', 'C']\n :type tocheck: list, optional\n :return: dictionary with positions of dimensions inside string\n :rtype: dict\n \"\"\"\n dimpos = {}\n for p in tocheck:\n dimpos[p] = dimstring.find(p)\n\n return dimpos\n\n\ndef norm_columns(df, colname='Time [s]', mode='min'):\n \"\"\"Normalize a specif column inside a Pandas dataframe\n\n :param df: DataFrame\n :type df: pf.DataFrame\n :param colname: Name of the coumn to be normalized, defaults to 'Time [s]'\n :type colname: str, optional\n :param mode: Mode of Normalization, defaults to 'min'\n :type mode: str, optional\n :return: Dataframe with normalized column\n :rtype: pd.DataFrame\n \"\"\"\n # normalize columns according to min or max value\n if mode == 'min':\n min_value = df[colname].min()\n df[colname] = df[colname] - min_value\n\n if mode == 'max':\n max_value = df[colname].max()\n df[colname] = df[colname] - max_value\n\n return df\n\n\ndef update5dstack(image5d, image2d,\n dimstring5d='TCZYX',\n t=0,\n z=0,\n c=0):\n\n # remove XY\n dimstring5d = dimstring5d.replace('X', '').replace('Y', '')\n\n if dimstring5d == 'TZC':\n image5d[t, z, c, :, :] = image2d\n if dimstring5d == 'TCZ':\n image5d[t, c, z, :, :] = image2d\n if dimstring5d == 'ZTC':\n image5d[z, t, c, :, :] = image2d\n if dimstring5d == 'ZCT':\n image5d[z, c, t, :, :] = image2d\n if dimstring5d == 'CTZ':\n image5d[c, t, z, :, :] = image2d\n if dimstring5d == 'CZT':\n image5d[c, z, t, :, :] = image2d\n\n return image5d\n\n\ndef getdims_pylibczi(czi):\n\n # Get the shape of the data, the coordinate pairs are (start index, size)\n # [{'X': (0, 1900), 'Y': (0, 1300), 'Z': (0, 60), 'C': (0, 4), 'S': (0, 40), 'B': (0, 1)}]\n # dimensions = czi.dims_shape()\n\n dimsizes = {}\n for d in range(len(czi.dims)):\n # print(d)\n dimsizes['Size' + czi.dims[d]] = czi.size[d]\n\n return dimsizes\n\n\ndef calc_normvar(img2d):\n \"\"\"Determine normalized focus value for a 2D image\n - based on algorithm F - 11 \"Normalized Variance\"\n - Taken from: Sun et al., 2004. MICROSCOPY RESEARCH AND TECHNIQUE 65, 139–149.\n - Maximum value is best-focused, decreasing as defocus increases\n\n :param img2d: 2D image\n :type img2d: NumPy.Array\n :return: normalized focus value for the 2D image\n :rtype: float\n \"\"\"\n\n mean = np.mean(img2d)\n height = img2d.shape[0]\n width = img2d.shape[1]\n\n # subtract the mean and sum up the whole array\n fi = (img2d - mean)**2\n b = np.sum(fi)\n\n # calculate the normalized variance value\n normvar = b / (height * width * mean)\n\n return normvar\n\n\nclass TableWidget(QWidget):\n\n def __init__(self):\n super(QWidget, self).__init__()\n self.layout = QHBoxLayout(self)\n self.mdtable = QTableWidget()\n self.layout.addWidget(self.mdtable)\n self.mdtable.setShowGrid(True)\n self.mdtable.setHorizontalHeaderLabels(['Parameter', 'Value'])\n header = self.mdtable.horizontalHeader()\n header.setDefaultAlignment(Qt.AlignLeft)\n\n def update_metadata(self, metadata):\n\n row_count = len(metadata)\n col_count = 2\n self.mdtable.setColumnCount(col_count)\n self.mdtable.setRowCount(row_count)\n\n row = 0\n\n for key, value in metadata.items():\n newkey = QTableWidgetItem(key)\n self.mdtable.setItem(row, 0, newkey)\n newvalue = QTableWidgetItem(str(value))\n self.mdtable.setItem(row, 1, newvalue)\n row += 1\n\n # fit columns to content\n self.mdtable.resizeColumnsToContents()\n\n def update_style(self):\n\n # define font\n fnt = QFont()\n fnt.setPointSize(11)\n fnt.setBold(True)\n fnt.setFamily('Arial')\n\n # update both header items\n item1 = QtWidgets.QTableWidgetItem('Parameter')\n item1.setForeground(QtGui.QColor(25, 25, 25))\n item1.setFont(fnt)\n self.mdtable.setHorizontalHeaderItem(0, item1)\n\n item2 = QtWidgets.QTableWidgetItem('Value')\n item2.setForeground(QtGui.QColor(25, 25, 25))\n item2.setFont(fnt)\n self.mdtable.setHorizontalHeaderItem(1, item2)\n" ]
[ [ "pandas.concat", "numpy.squeeze", "numpy.double", "pandas.DataFrame", "numpy.round", "numpy.int", "numpy.mean", "numpy.sum", "numpy.float" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
behzadnouri/pandas
[ "506520bd35331aa82db50686c07d96594cac0c10" ]
[ "pandas/io/tests/parser/comment.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\nTests that comments are properly handled during parsing\nfor all of the parsers defined in parsers.py\n\"\"\"\n\nimport numpy as np\nimport pandas.util.testing as tm\n\nfrom pandas import DataFrame\nfrom pandas.compat import StringIO\n\n\nclass CommentTests(object):\n\n def test_comment(self):\n data = \"\"\"A,B,C\n1,2.,4.#hello world\n5.,NaN,10.0\n\"\"\"\n expected = np.array([[1., 2., 4.],\n [5., np.nan, 10.]])\n df = self.read_csv(StringIO(data), comment='#')\n tm.assert_numpy_array_equal(df.values, expected)\n\n df = self.read_table(StringIO(data), sep=',', comment='#',\n na_values=['NaN'])\n tm.assert_numpy_array_equal(df.values, expected)\n\n def test_line_comment(self):\n data = \"\"\"# empty\nA,B,C\n1,2.,4.#hello world\n#ignore this line\n5.,NaN,10.0\n\"\"\"\n expected = np.array([[1., 2., 4.],\n [5., np.nan, 10.]])\n df = self.read_csv(StringIO(data), comment='#')\n tm.assert_numpy_array_equal(df.values, expected)\n\n # check with delim_whitespace=True\n df = self.read_csv(StringIO(data.replace(',', ' ')), comment='#',\n delim_whitespace=True)\n tm.assert_almost_equal(df.values, expected)\n\n # custom line terminator is not supported\n # with the Python parser yet\n if self.engine == 'c':\n expected = np.array([[1., 2., 4.],\n [5., np.nan, 10.]])\n df = self.read_csv(StringIO(data.replace('\\n', '*')),\n comment='#', lineterminator='*')\n tm.assert_numpy_array_equal(df.values, expected)\n\n def test_comment_skiprows(self):\n data = \"\"\"# empty\nrandom line\n# second empty line\n1,2,3\nA,B,C\n1,2.,4.\n5.,NaN,10.0\n\"\"\"\n # this should ignore the first four lines (including comments)\n expected = np.array([[1., 2., 4.], [5., np.nan, 10.]])\n df = self.read_csv(StringIO(data), comment='#', skiprows=4)\n tm.assert_numpy_array_equal(df.values, expected)\n\n def test_comment_header(self):\n data = \"\"\"# empty\n# second empty line\n1,2,3\nA,B,C\n1,2.,4.\n5.,NaN,10.0\n\"\"\"\n # header should begin at the second non-comment line\n expected = np.array([[1., 2., 4.], [5., np.nan, 10.]])\n df = self.read_csv(StringIO(data), comment='#', header=1)\n tm.assert_numpy_array_equal(df.values, expected)\n\n def test_comment_skiprows_header(self):\n data = \"\"\"# empty\n# second empty line\n# third empty line\nX,Y,Z\n1,2,3\nA,B,C\n1,2.,4.\n5.,NaN,10.0\n\"\"\"\n # skiprows should skip the first 4 lines (including comments), while\n # header should start from the second non-commented line starting\n # with line 5\n expected = np.array([[1., 2., 4.], [5., np.nan, 10.]])\n df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)\n tm.assert_numpy_array_equal(df.values, expected)\n\n def test_custom_comment_char(self):\n data = \"a,b,c\\n1,2,3#ignore this!\\n4,5,6#ignorethistoo\"\n\n result = self.read_csv(StringIO(data), comment='#')\n expected = DataFrame({'a': [1, 4], 'b': [2, 5], 'c': [3, 6]})\n tm.assert_frame_equal(result, expected)\n" ]
[ [ "pandas.util.testing.assert_numpy_array_equal", "pandas.compat.StringIO", "pandas.util.testing.assert_almost_equal", "pandas.DataFrame", "pandas.util.testing.assert_frame_equal", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
ternaus/iglovikov_segmentation
[ "5a9463031e5da7c2cf34c967a4f2657416c11bd2" ]
[ "src/inference.py" ]
[ "\"\"\"Script to create segmented masks.\"\"\"\n\nimport argparse\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\nimport torch\nfrom catalyst.dl import SupervisedRunner\nfrom catalyst.dl import utils\nfrom iglovikov_helper_functions.config_parsing.from_py import py2cfg\nfrom iglovikov_helper_functions.utils.img_tools import unpad\nfrom pytorch_toolbelt.inference.tta import TTAWrapper, fliplr_image2mask, d4_image2mask\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom src.dataset import TestSegmentationDataset\n\n\nclass ApplySoftmaxToLogits(nn.Module):\n def __init__(self):\n super().__init__()\n\n @staticmethod\n def forward(x):\n return x.softmax(dim=1)\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n arg(\"-c\", \"--config_path\", type=Path, required=True, help=\"Path to config\")\n arg(\"-i\", \"--input_path\", type=Path, required=True, help=\"Path to the target image folder.\")\n arg(\"-j\", \"--num_workers\", type=int, help=\"Number of CPU threads.\", default=12)\n arg(\"-o\", \"--output_path\", type=Path, help=\"Path where to save resulting masks.\", required=True)\n arg(\"-w\", \"--checkpoint\", type=Path, help=\"Path to checkpoint.\", required=True)\n arg(\"-b\", \"--batch_size\", type=int, help=\"Batch_size.\", default=1)\n arg(\"-t\", \"--tta\", help=\"Test time augmentation.\", default=None, choices=[None, \"d4\", \"lr\"])\n arg(\"-v\", \"--visualize\", help=\"If add visualized predictions.\", action=\"store_true\")\n return parser.parse_args()\n\n\ndef load_checkpoint(file_path: (Path, str), rename_in_layers: dict = None):\n \"\"\"Loads pytorch checkpoint, optionally renaming layer names.\n\n\n\n Args:\n file_path: path to the torch checkpoint.\n rename_in_layers: {from_name: to_name}\n ex: {\"model.0.\": \"\",\n \"model.\": \"\"}\n\n\n Returns:\n\n \"\"\"\n checkpoint = torch.load(file_path, map_location=lambda storage, loc: storage)\n\n if rename_in_layers is not None:\n model_state_dict = checkpoint[\"model_state_dict\"]\n\n result = {}\n for key, value in model_state_dict.items():\n for key_r, value_r in rename_in_layers.items():\n key = key.replace(key_r, value_r)\n\n result[key] = value\n\n checkpoint[\"model_state_dict\"] = result\n\n return checkpoint\n\n\ndef main():\n args = get_args()\n image_paths = sorted(args.input_path.glob(\"*.jpg\"))\n\n config = py2cfg(args.config_path)\n args.output_path.mkdir(exist_ok=True, parents=True)\n\n if args.visualize:\n vis_output_path = Path(str(args.output_path) + \"_vis\")\n vis_output_path.mkdir(exist_ok=True, parents=True)\n\n test_aug = config.test_augmentations\n\n model = config.model\n\n checkpoint = load_checkpoint(args.checkpoint, {\"model.0.\": \"\", \"model.\": \"\"})\n utils.unpack_checkpoint(checkpoint, model=model)\n\n model = nn.Sequential(model, ApplySoftmaxToLogits())\n\n model, _, _, _, device = utils.process_components(model=model)\n\n if args.tta == \"lr\":\n model = TTAWrapper(model, fliplr_image2mask)\n elif args.tta == \"d4\":\n model = TTAWrapper(model, d4_image2mask)\n\n runner = SupervisedRunner(model=model, device=device)\n\n with torch.no_grad():\n test_loader = DataLoader(\n TestSegmentationDataset(image_paths, test_aug, factor=config.pad_factor, imread_lib=config.imread_library),\n batch_size=args.batch_size,\n num_workers=args.num_workers,\n pin_memory=True,\n drop_last=False,\n )\n\n for input_images in tqdm(test_loader):\n raw_predictions = runner.predict_batch({\"features\": input_images[\"features\"].cuda()})[\"logits\"]\n\n image_height, image_width = input_images[\"features\"].shape[2:]\n\n pads = input_images[\"pads\"].cpu().numpy()\n\n image_ids = input_images[\"image_id\"]\n\n _, predictions = raw_predictions.max(1)\n\n for i in range(raw_predictions.shape[0]):\n unpadded_mask = predictions[i].cpu().numpy()\n\n if unpadded_mask.shape != (image_height, image_width):\n unpadded_mask = cv2.resize(\n unpadded_mask, (image_width, image_height), interpolation=cv2.INTER_NEAREST\n )\n\n mask = unpad(unpadded_mask, pads[i]).astype(np.uint8)\n\n mask_name = image_ids[i] + \".png\"\n cv2.imwrite(str(args.output_path / mask_name), mask)\n if args.visualize:\n factor = 255 // config.num_classes\n cv2.imwrite(str(vis_output_path / mask_name), mask * factor)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.no_grad", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]