repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
anirudh-chakravarthy/SipMask
[ "fc82b12c13abb091e271eb4f1b6734da18234443" ]
[ "SipMask-mmdetection/mmdet/datasets/pipelines/loading.py" ]
[ "import os.path as osp\n\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as maskUtils\n\nfrom ..registry import PIPELINES\n\n\[email protected]_module\nclass LoadImageFromFile(object):\n\n def __init__(self, to_float32=False, color_type='color'):\n self.to_float32 = to_float32\n self.color_type = color_type\n\n def __call__(self, results):\n if results['img_prefix'] is not None:\n filename = osp.join(results['img_prefix'],\n results['img_info']['filename'])\n else:\n filename = results['img_info']['filename']\n img = mmcv.imread(filename, self.color_type)\n if self.to_float32:\n img = img.astype(np.float32)\n results['filename'] = filename\n results['img'] = img\n results['img_shape'] = img.shape\n results['ori_shape'] = img.shape\n # Set initial values for default meta_keys\n results['pad_shape'] = img.shape\n results['scale_factor'] = 1.0\n num_channels = 1 if len(img.shape) < 3 else img.shape[2]\n results['img_norm_cfg'] = dict(\n mean=np.zeros(num_channels, dtype=np.float32),\n std=np.ones(num_channels, dtype=np.float32),\n to_rgb=False)\n return results\n\n def __repr__(self):\n return '{} (to_float32={}, color_type={})'.format(\n self.__class__.__name__, self.to_float32, self.color_type)\n\n\[email protected]_module\nclass LoadMultiChannelImageFromFiles(object):\n \"\"\" Load multi channel images from a list of separate channel files.\n Expects results['filename'] to be a list of filenames\n \"\"\"\n\n def __init__(self, to_float32=True, color_type='unchanged'):\n self.to_float32 = to_float32\n self.color_type = color_type\n\n def __call__(self, results):\n if results['img_prefix'] is not None:\n filename = [\n osp.join(results['img_prefix'], fname)\n for fname in results['img_info']['filename']\n ]\n else:\n filename = results['img_info']['filename']\n img = np.stack(\n [mmcv.imread(name, self.color_type) for name in filename], axis=-1)\n if self.to_float32:\n img = img.astype(np.float32)\n results['filename'] = filename\n results['img'] = img\n results['img_shape'] = img.shape\n results['ori_shape'] = img.shape\n return results\n\n def __repr__(self):\n return '{} (to_float32={}, color_type={})'.format(\n self.__class__.__name__, self.to_float32, self.color_type)\n\n\[email protected]_module\nclass LoadAnnotations(object):\n\n def __init__(self,\n with_bbox=True,\n with_label=True,\n with_mask=False,\n with_seg=False,\n poly2mask=True):\n self.with_bbox = with_bbox\n self.with_label = with_label\n self.with_mask = with_mask\n self.with_seg = with_seg\n self.poly2mask = poly2mask\n\n def _load_bboxes(self, results):\n ann_info = results['ann_info']\n results['gt_bboxes'] = ann_info['bboxes']\n\n gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)\n if gt_bboxes_ignore is not None:\n results['gt_bboxes_ignore'] = gt_bboxes_ignore\n results['bbox_fields'].append('gt_bboxes_ignore')\n results['bbox_fields'].append('gt_bboxes')\n return results\n\n def _load_labels(self, results):\n results['gt_labels'] = results['ann_info']['labels']\n return results\n\n def _poly2mask(self, mask_ann, img_h, img_w):\n if isinstance(mask_ann, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n rle = maskUtils.merge(rles)\n elif isinstance(mask_ann['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n else:\n # rle\n rle = mask_ann\n mask = maskUtils.decode(rle)\n return mask\n\n def _load_masks(self, results):\n h, w = results['img_info']['height'], results['img_info']['width']\n gt_masks = results['ann_info']['masks']\n if self.poly2mask:\n gt_masks = [self._poly2mask(mask, h, w) for mask in gt_masks]\n results['gt_masks'] = gt_masks\n results['mask_fields'].append('gt_masks')\n return results\n\n def _load_semantic_seg(self, results):\n results['gt_semantic_seg'] = mmcv.imread(\n osp.join(results['seg_prefix'], results['ann_info']['seg_map']),\n flag='unchanged').squeeze()\n results['seg_fields'].append('gt_semantic_seg')\n return results\n\n def __call__(self, results):\n if self.with_bbox:\n results = self._load_bboxes(results)\n if results is None:\n return None\n if self.with_label:\n results = self._load_labels(results)\n if self.with_mask:\n results = self._load_masks(results)\n if self.with_seg:\n results = self._load_semantic_seg(results)\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += ('(with_bbox={}, with_label={}, with_mask={},'\n ' with_seg={})').format(self.with_bbox, self.with_label,\n self.with_mask, self.with_seg)\n return repr_str\n\n\[email protected]_module\nclass LoadProposals(object):\n\n def __init__(self, num_max_proposals=None):\n self.num_max_proposals = num_max_proposals\n\n def __call__(self, results):\n proposals = results['proposals']\n if proposals.shape[1] not in (4, 5):\n raise AssertionError(\n 'proposals should have shapes (n, 4) or (n, 5), '\n 'but found {}'.format(proposals.shape))\n proposals = proposals[:, :4]\n\n if self.num_max_proposals is not None:\n proposals = proposals[:self.num_max_proposals]\n\n if len(proposals) == 0:\n proposals = np.array([[0, 0, 0, 0]], dtype=np.float32)\n results['proposals'] = proposals\n results['bbox_fields'].append('proposals')\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + '(num_max_proposals={})'.format(\n self.num_max_proposals)\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.zeros" ] ]
MrLogarithm/cdli-accounting-viz
[ "17d1f0d0be987104ef635e07627fa94f34dc9b7c" ]
[ "code/commodify.py" ]
[ "import json\nimport segment\nimport convert\nimport semantic\nimport data\nimport numpy as np\nimport re\nimport os\nimport oyaml\nimport gzip\n\nfrom entry import *\n\nfrom collections import defaultdict\n\n#import mariadb\nimport MySQLdb as mariadb\n\n##################################################\n# CONFIGURATION\n\nconfig_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \n 'config.yaml'\n )\nwith open(config_path, encoding='utf-8') as inp_file:\n config = oyaml.safe_load(inp_file)\n\n\n# Load dictionary for use in making annotations:\ndictionary = np.load( \"dict/epsd.npz\", allow_pickle=True )[\"dictionary\"].item()\n\n# Setup directory to hold processed tablet data:\ndata_dir = os.path.join( \"data\", \"commodities\" )\nif os.path.exists(data_dir):\n if os.path.isfile(data_dir):\n print(\"{0} exists and is not a directory! Aborting...\".format(data_dir))\n exit(-1)\nelse:\n os.mkdir(data_dir)\n\n##################################################\n# Sign substitutions:\n\n# TODO Manually emending sign readings will\n# increase accuracy for identification of\n# some commodities. Will require a sound list\n# of equivalences: perhaps based on the list\n# from the Nuolenna project?\n# https://github.com/tosaja/Nuolenna/blob/master/sign_list.txt\n# \n# For now, just map some manually identified cases as\n# proof of concept:\nsign_substitutions = {\n #'sign in the dictionary':'sign in the corpus',\n \"zid2\":\"zi3\",\n \"kug\":\"ku3\",\n \"ku3\":\"kug\",\n \"ERIN2\":\"ERIM\",\n \"urud-da\":\"uruda\",\n }\n\ndef substitute( sign ):\n if sign in sign_substitutions:\n yield sign_substitutions[ sign ]\n return\n else:\n for s in sign_substitutions:\n yield re.sub(\"(^|-|\\()\"+s+\"(-|$|\\))\",\"\\\\1\"+sign_substitutions[s]+\"\\\\2\",sign)\n return \n\n# Emend the dictionary:\nwords = list(dictionary.keys())\nfor word in words:\n for sub in substitute( word ):\n dictionary[ sub ] = dictionary[word]\n\n##################################################\n# Determinative rules:\n\n# Determinatives selected based on ePSD definitions,\n# personal experience, and \n# https://personal.sron.nl/~jheise/signlists/determin.html\ncommodity_determinatives = set([\n \"dug\", # pottery\n \"gan\", # pig \n \"gesz\", # wood\n \"gisz\", # wood, alternate spelling\n \"gi\", #reed\n \"gu4\", # cattle\n \"ha\", # tree? Looks like phonetic complement which is \n # only used (in Girsu texts) for {ha}har-ra-na\n \"ku6\", # fish\n \"kusz\", # leather\n \"muszen\", # bird \n \"na4\", # stones\n \"sar\", # plant\n \"sza:gan\", # pig\n \"szagan\", # pig\n \"sza\", # pig \n \"sze3\", # wood, cf {sze3}szer7 = {gesz}szer7\n \"tug2\", # cloth\n \"u2\", # plant\n \"uruda\", # metal\n \"urudu\", # metal\n \"zi3\", # grains, alternate name for zid2\n \"zid2\", # grains\n #\"ninda\", # Usage seems restricted to {ninda}nindax(DU) \n # where it appears to be a length measurement.\n #\"munus\", # TODO woman. likely more indicative of a name? \n # check how precise this rule is\n #\"da\", # TODO only occurs in banda3{da}, looking like \n # a phonetic complement more than a determinative\n #\"sza\", # TODO only with (asz){sza}\n #\"gada\", # TODO meaning?\n #\"ga2\", # TODO meaning?\n #\"an\", # TODO meaning?\n #\"ur3\", # TODO only in dur9{ur3} \"donkey\". Looks again like \n # phonetic complement. \n ])\n\ndef new_entry():\n entry = Entry()\n # Initialize these as empty lists:\n # otherwise python makes a shallow\n # copy and all entries end up sharing\n # the same list of words:\n entry.counts = []\n entry.words = []\n entry.words_full = []\n return entry\n\ndef label_wordlist( words ):\n \"\"\"\n words: a list of words to label as commodities\n returns: the input list, with commodities labeled\n by _COM, modifiers such as adjectives labeled\n by _MOD, and vessels labeled as _VES\n \"\"\"\n if words == []:\n return words \n\n # Don't label dates:\n if words[0] in set([\"mu\", \"u4\", \"iti\"]):\n return words\n\n # If this line is a total, we should ignore it to avoid double counts\n # \"dur\" can also mark a total, or be a counted object\n if words[0] in set([\"szunigin\", \"gu2-an-sze3\", \"szu-nigin2\"]): \n words += [\"TOTAL\"]\n # Donors and explanatory information:\n #if words[0] in set([\"ki\", \"giri3\"]):\n #return words\n # If there is no count we assume (perhaps incorrectly)\n # that nothing is being counted in this line:\n if \"###\" not in words:\n return words\n\n # Now we annotate words with various features\n # We will use these features to manually decide\n # what is a commodity, but moving forward we can\n # add more features and train a real ML model:\n features = [[] for _ in words]\n for i,word in enumerate(words):\n\n FEAT_NONE = 0\n FEAT_COM = 1\n FEAT_PERS = 2\n FEAT_ADJ = 3\n FEAT_VES = 4\n\n # DETERMINATIVES\n if any( \"{%s}\"%(det) in word for det in commodity_determinatives ):\n #or any(defn[1] == \"NN\" for defn in dictionary[word]) \\\n features[i].append( FEAT_COM )\n else:\n features[i].append( FEAT_NONE )\n\n # SYNSETS\n features[i].append( FEAT_NONE )\n if word in dictionary:\n # Check all definions of the word until one of them\n # gives us evidence to make a decision:\n for defn, POS in dictionary[word]:\n defn = defn.replace(\"?\",\"\")\n # Does this definition represent a commodity?\n is_com, evidence = semantic.is_commodity_synset( \n word, \n semantic.get_noun_hypernyms( defn )\n )\n if is_com:\n if any( \"vessel\" in str(synset) for synset in evidence):\n features[i][-1] = FEAT_VES\n else:\n features[i][-1] = FEAT_COM\n break\n elif not is_com and evidence != []:\n if evidence == \"person\":\n features[i][-1] = FEAT_PERS\n elif evidence == \"vessel\":\n features[i][-1] = FEAT_VES\n elif evidence == \"mod\":\n features[i][-1] = FEAT_ADJ\n elif isinstance( evidence, list ) and type( evidence[0] ).__name__ == \"Synset\":\n if any( \"person\" in str(synset) for synset in evidence ):\n features[i][-1] = FEAT_PERS\n else:\n features[i][-1] = -1\n else:\n features[i][-1] = -1\n break\n else:\n # If word is not in the dictionary, don't pass a list of synsets\n is_com, evidence = semantic.is_commodity_synset( word, None )\n if is_com:\n features[i][-1] = FEAT_COM\n\n\n\n # Use the features from the last section to \n # label the words in the input list:\n maybe_ration = False\n # If two commodity-like items occur within this many words\n # of one another, the second will be labeled as a modifier.\n # This helps with cases like siki udu, where udu and siki\n # can both be commodities but udu here acts as a modifier.\n mod_proximity_threshold = 3\n for i in range(len(words)):\n if words[0] in set([\"ki-la2-bi\"]) and words[i] == \"na4\":\n continue\n context = words[max(0,i-mod_proximity_threshold):i]\n if features[i][0] == FEAT_COM:\n # This check helps avoid labeling modifiers\n # as commodities: e.g. in zi3 sig15, only\n # label zi3, as we would in e.g. ku6 dar-ra\n if i>0 and any( \"_\" in w for w in context ):\n words[i] += \"_MOD\"\n else:\n words[i] += \"_COM\"\n elif features[i][1] == FEAT_VES:\n words[i] += \"_VES\"\n elif features[i][1] == FEAT_COM:\n if i>0 and any( \"_\" in w for w in context):\n words[i] += \"_MOD\"\n else:\n words[i] += \"_COM\"\n elif features[i][1] == FEAT_ADJ:\n # If this entry only contains one word and that word looks\n # like an adjective/modifier, we instead count it as a \n # commodity: helps with e.g. mun \"salt\" which usually modifies\n # a fish (ku6 mun) but can also be counted on its own.\n if len([w for w in words if w != '###']) == 1 or all([features[i][1]==FEAT_ADJ for i in range(len(words)) if words[i]!='###']):\n if i>0 and any( \"_\" in w for w in context):\n words[i] += \"_MOD\"\n else:\n words[i] += \"_COM\"\n else:\n words[i] += \"_MOD\"\n elif features[i][1] == FEAT_PERS:\n # If this is a person, we infer that \n # there may be an implicit ration:\n maybe_ration = True\n # If there is a commodity in the entry, then\n # the person is probably an owner or recipient.\n # If there is no commodity, then there is probably\n # an implicit ration:\n if maybe_ration:\n if not any( \"_COM\" in w for w in words ):\n words.append( \"implied_ration?\" )\n\n return words\n\n############################################\n# Process each text and annotate the words\n# which are likely to represent counted objects:\n#\ndef commodify_cdli_no( cdli_no ):\n \"\"\"\n Given a CDLI number, fetch the text of the corresponding\n artifact from the database and pass it to commodify_text\n \"\"\"\n # Ensure that we have a valid artifact number:\n if re.match(r'P[0-9]{6}', cdli_no) is not None:\n art_no = int(cdli_no[1:])\n elif re.match(r'[0-9]{6}', cdli_no) is not None:\n art_no = int(cdli_no)\n else:\n raise Exception(\"%s: not a well-formed artifact id\"%(cdli_no))\n\n # For the moment, only accept texts in Sumerian:\n LANG_ID_SUMERIAN = 5\n\n # Connect to DB:\n conn = mariadb.connect(\n user=config['db']['user'],\n password=config['db']['password'],\n host=config['db']['host'],\n port=config['db']['port'],\n database=config['db']['database']\n )\n cur = conn.cursor()\n\n # DB query to get text content and language:\n cur.execute(\"SELECT transliteration, language_id FROM inscriptions INNER JOIN artifacts_languages ON inscriptions.artifact_id = artifacts_languages.artifact_id WHERE inscriptions.artifact_id=%s\", (art_no,))\n text = None\n for atf, lang_id in cur:\n if lang_id == LANG_ID_SUMERIAN:\n text = [line.strip().split(\" \") for line in atf.split(\"\\n\")]\n break\n cur.close()\n conn.close()\n\n if text is not None:\n return commodify_text( text, cdli_no )\n # If no text found with specified id\n # and correct language, raise exception\n raise Exception(\"%s: artifact not found or language not supported\"%(cdli_no))\n\ndef commodify_text( text, filename ):\n \"\"\"\n Commodify a string of text and \n save the extracted data.\n \"\"\"\n entries = []\n entry = new_entry()\n\n # Record distance from the preceding number: \n # most commodities occur within 3 tokens of the\n # numeral\n dist_from_numeral = 0\n # Have we found a commodity in this entry yet?\n found_com = False\n \n if not isinstance( text, list ):\n text = [ re.sub(\"@[a-zA-Z]*\",\"\",word) for word in text.strip().split(\" \") ]\n text = [ re.sub(\"[#!?<>\\[\\]]\",\"\",word) for word in text ]\n text = segment.segment( text )\n else:\n # Standardize notation: asz@c -> asz, ASZxDISZ@t -> ASZxDISZ, etc\n # These represent curved/flat/rotated/variant sign forms\n # but we care about a more granular level of detail\n text = [ [ re.sub(\"@[a-zA-Z]*\",\"\",word) for word in line ] for line in text ]\n text = [ [ re.sub(\"[#!?<>\\[\\]]\",\"\",word) for word in line ] for line in text ]\n text = sum([ segment.segment( line ) for line in text ],[])\n\n for entry_ in text:\n\n entry = new_entry()\n\n for string, counts in entry_:\n if counts is not None:\n #entry.words = label_wordlist( entry.words )\n #entries.append( entry )\n #entry = new_entry()\n entry.counts.append( {\"string\":string, \"readings\":counts} )\n entry.words.append( \"###\" )\n entry.words_full.append( string )\n else:\n entry.words.append( string )\n entry.words_full.append( string )\n entry.words = label_wordlist( entry.words )\n entries.append( entry )\n\n # Tag and append the final entry:\n #entry.words = label_wordlist( entry.words )\n #entries.append( entry )\n entries = [ entry for entry in entries if not (entry.counts == [] and entry.words == [])]\n\n # *_COM -> num string -> number of occurrences\n counts_by_commodity = defaultdict(lambda:defaultdict(int))\n counts_by_modified_commodity = defaultdict(lambda:defaultdict(int))\n # *_COM -> [values]\n values_by_commodity = defaultdict(list)\n values_by_modified_commodity = defaultdict(list)\n # (*_COM, *_COM) -> number of cooccurrences\n collocation_counts = defaultdict(int)\n\n lines = defaultdict(int)\n lines_src = defaultdict(set)\n lines_extra = defaultdict(lambda:{\"counted\":[],\"value\":None,\"modifiers\":[]})\n\n for entry in entries:\n # TODO if len(entry.counts) > 1: ???\n lines[ ' '.join(entry.words_full) ] += 1\n lines_src[ ' '.join(entry.words_full) ].add(filename)\n lines_extra[ ' '.join(entry.words_full) ][\"counted\"] = [w for w in entry.words if \"_COM\" in w]\n lines_extra[ ' '.join(entry.words_full) ][\"modifiers\"] = [w for w in entry.words if \"_MOD\" in w]\n try:\n lines_extra[ ' '.join(entry.words_full) ][\"value\"] = entry.counts[0][\"readings\"]\n except:\n pass\n\n if entry.counts != []:\n count = entry.counts[0][\"string\"]\n values = entry.counts[0][\"readings\"]\n else:\n count, values = \"\", []\n\n #if any(\"szum2\" in w for w in entry.words):\n #print(entry.words)\n\n for i,word in enumerate(entry.words):\n if word.endswith(\"_COM\"):\n # Don't count broken commodities \n # like ...{ku6}:\n if '...' in word:\n continue\n\n # In cases like udu_COM nita_MOD,\n # retrieve the modifier:\n commodity = word#.replace(\"_COM\",\"\")\n modified = [commodity]\n for w in entry.words[i+1:]:\n if w.endswith(\"_MOD\"):\n modified.append(w)#.replace(\"_MOD\",\"\"))\n if w.endswith(\"_COM\") or w == \"###\":\n break\n\n # TODO How to handle unreadable counts? \n # Probably count every instance of the commodity,\n # so that people can accurately say such-and-such\n # occurs N times in the corpus, but omit \"none\"\n # from the list of values? \n\n # JSON keys can't be tuples:\n key = ' '.join(modified)\n counts_by_commodity[ commodity ][ count ] += 1\n counts_by_modified_commodity[ key ][ count ] += 1\n # TODO How do we want to resolve ambiguous values?\n # As baseline, just pick the first possible value:\n if values != []:\n values_by_commodity[ commodity ].append( values )\n values_by_modified_commodity[ key ].append( values )\n\n for i in range(len(entries)):\n for j in range(i+1,len(entries)):\n for word_i in entries[i].words:\n if not word_i.endswith( \"_COM\" ):\n continue\n for word_j in entries[j].words:\n if not word_j.endswith( \"_COM\" ):\n continue\n # dict can only store tuple values\n # for consistency, sort the keys\n # and provide and accessor that \n # sorts queries likewise\n key = ' '.join(sorted([\n word_i.replace(\"_COM\",\"\"), \n word_j.replace(\"_COM\",\"\")]))\n collocation_counts[ key ] += 1\n\n all_objects = sorted(list([re.sub('_[A-Z]{3}', '', k) for k in counts_by_commodity.keys()]))\n output_json = {\n \"counts_by_commodity\": dict(counts_by_commodity),\n \"counts_by_modified_commodity\": dict(counts_by_modified_commodity),\n \"values_by_commodity\": dict(values_by_commodity),\n \"values_by_modified_commodity\": dict(values_by_modified_commodity),\n \"collocation_counts\": dict(collocation_counts),\n \"all_objects\": all_objects,\n #\"dictionary\":output_dictionary,\n \"line_counts\":lines,\n \"line_sources\":{l:sorted(list(lines_src[l])) for l in lines_src},\n \"line_data\":lines_extra,\n \"cdli_no\":filename,\n }\n with gzip.open( os.path.join(data_dir, \"{0}.json.gz\".format(filename) ), 'wb' ) as fp:\n json_string = bytes(json.dumps( output_json ), 'utf-8' )\n fp.write(json_string)\n\n return output_json\n" ]
[ [ "numpy.load" ] ]
iArunava/CycleGAN
[ "73e53d7b7eb47c5a68502df442778771f19ef8a2" ]
[ "models/BNConvt.py" ]
[ "import torch\nimport torch.nn as nn\n\nclass BNConvt(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride,\n padding, bias=False, eps=1e-5, momentum=0.1, conv_first=True, relu=False):\n\n super(BNConvt, self).__init__()\n \n if conv_first:\n self.main = nn.ModuleList([\n nn.ConvTranspose2d(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n bias=bias),\n \n nn.BatchNorm2d(out_channels, eps=eps, momentum=momentum)\n ])\n \n if relu:\n self.main.append(nn.ReLU(inplace=True))\n else:\n\n self.main = nn.ModuleList(\n nn.BatchNorm2d(in_channels, eps=eps, momentum=momentum),\n\n nn.ConvTranspose2d(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n bias=bias)\n )\n\n if relu:\n self.main.insert(0, nn.ReLU(inplace=True))\n\n\n def forward(self, x):\n '''\n Method that defines the forward pass through the BNConv network.\n Arguments:\n - x : The input to the network\n Returns:\n - The output of the network BNConv\n '''\n\n for layer in self.main:\n x = layer(x)\n\n return x\n" ]
[ [ "torch.nn.ReLU", "torch.nn.BatchNorm2d", "torch.nn.ConvTranspose2d" ] ]
jeisch/bokeh
[ "6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1" ]
[ "tests/unit/bokeh/core/property/test_primitive.py" ]
[ "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport pytest ; pytest\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\n\n# External imports\nimport numpy as np\n\n# Bokeh imports\nfrom _util_property import _TestHasProps, _TestModel\nfrom bokeh._testing.util.api import verify_all\n\n# Module under test\nimport bokeh.core.property.primitive as bcpp\n\n#-----------------------------------------------------------------------------\n# Setup\n#-----------------------------------------------------------------------------\n\nALL = (\n 'Bool',\n 'Complex',\n 'Int',\n 'Float',\n 'String',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\nclass Test_Bool(object):\n\n def test_valid(self):\n prop = bcpp.Bool()\n\n assert prop.is_valid(None)\n\n assert prop.is_valid(False)\n assert prop.is_valid(True)\n\n assert prop.is_valid(np.bool8(False))\n assert prop.is_valid(np.bool8(True))\n\n def test_invalid(self):\n prop = bcpp.Bool()\n\n assert not prop.is_valid(0)\n assert not prop.is_valid(1)\n assert not prop.is_valid(0.0)\n assert not prop.is_valid(1.0)\n assert not prop.is_valid(1.0+1.0j)\n assert not prop.is_valid(\"\")\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n assert not prop.is_valid(np.int8(0))\n assert not prop.is_valid(np.int8(1))\n assert not prop.is_valid(np.int16(0))\n assert not prop.is_valid(np.int16(1))\n assert not prop.is_valid(np.int32(0))\n assert not prop.is_valid(np.int32(1))\n assert not prop.is_valid(np.int64(0))\n assert not prop.is_valid(np.int64(1))\n assert not prop.is_valid(np.uint8(0))\n assert not prop.is_valid(np.uint8(1))\n assert not prop.is_valid(np.uint16(0))\n assert not prop.is_valid(np.uint16(1))\n assert not prop.is_valid(np.uint32(0))\n assert not prop.is_valid(np.uint32(1))\n assert not prop.is_valid(np.uint64(0))\n assert not prop.is_valid(np.uint64(1))\n assert not prop.is_valid(np.float16(0))\n assert not prop.is_valid(np.float16(1))\n assert not prop.is_valid(np.float32(0))\n assert not prop.is_valid(np.float32(1))\n assert not prop.is_valid(np.float64(0))\n assert not prop.is_valid(np.float64(1))\n assert not prop.is_valid(np.complex64(1.0+1.0j))\n assert not prop.is_valid(np.complex128(1.0+1.0j))\n if hasattr(np, \"complex256\"):\n assert not prop.is_valid(np.complex256(1.0+1.0j))\n\n def test_has_ref(self):\n prop = bcpp.Bool()\n assert not prop.has_ref\n\n def test_str(self):\n prop = bcpp.Bool()\n assert str(prop) == \"Bool\"\n\nclass Test_Complex(object):\n\n def test_valid(self):\n prop = bcpp.Complex()\n\n assert prop.is_valid(None)\n\n assert prop.is_valid(0)\n assert prop.is_valid(1)\n assert prop.is_valid(0.0)\n assert prop.is_valid(1.0)\n assert prop.is_valid(1.0+1.0j)\n\n assert prop.is_valid(np.int8(0))\n assert prop.is_valid(np.int8(1))\n assert prop.is_valid(np.int16(0))\n assert prop.is_valid(np.int16(1))\n assert prop.is_valid(np.int32(0))\n assert prop.is_valid(np.int32(1))\n assert prop.is_valid(np.int64(0))\n assert prop.is_valid(np.int64(1))\n assert prop.is_valid(np.uint8(0))\n assert prop.is_valid(np.uint8(1))\n assert prop.is_valid(np.uint16(0))\n assert prop.is_valid(np.uint16(1))\n assert prop.is_valid(np.uint32(0))\n assert prop.is_valid(np.uint32(1))\n assert prop.is_valid(np.uint64(0))\n assert prop.is_valid(np.uint64(1))\n assert prop.is_valid(np.float16(0))\n assert prop.is_valid(np.float16(1))\n assert prop.is_valid(np.float32(0))\n assert prop.is_valid(np.float32(1))\n assert prop.is_valid(np.float64(0))\n assert prop.is_valid(np.float64(1))\n assert prop.is_valid(np.complex64(1.0+1.0j))\n assert prop.is_valid(np.complex128(1.0+1.0j))\n if hasattr(np, \"complex256\"):\n assert prop.is_valid(np.complex256(1.0+1.0j))\n\n # TODO (bev) should fail\n assert prop.is_valid(False)\n assert prop.is_valid(True)\n\n def test_invalid(self):\n prop = bcpp.Complex()\n\n assert not prop.is_valid(\"\")\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n assert not prop.is_valid(np.bool8(False))\n assert not prop.is_valid(np.bool8(True))\n\n def test_has_ref(self):\n prop = bcpp.Complex()\n assert not prop.has_ref\n\n def test_str(self):\n prop = bcpp.Complex()\n assert str(prop) == \"Complex\"\n\nclass Test_Float(object):\n\n def test_valid(self):\n prop = bcpp.Float()\n\n assert prop.is_valid(None)\n\n assert prop.is_valid(0)\n assert prop.is_valid(1)\n assert prop.is_valid(0.0)\n assert prop.is_valid(1.0)\n\n assert prop.is_valid(np.int8(0))\n assert prop.is_valid(np.int8(1))\n assert prop.is_valid(np.int16(0))\n assert prop.is_valid(np.int16(1))\n assert prop.is_valid(np.int32(0))\n assert prop.is_valid(np.int32(1))\n assert prop.is_valid(np.int64(0))\n assert prop.is_valid(np.int64(1))\n assert prop.is_valid(np.uint8(0))\n assert prop.is_valid(np.uint8(1))\n assert prop.is_valid(np.uint16(0))\n assert prop.is_valid(np.uint16(1))\n assert prop.is_valid(np.uint32(0))\n assert prop.is_valid(np.uint32(1))\n assert prop.is_valid(np.uint64(0))\n assert prop.is_valid(np.uint64(1))\n assert prop.is_valid(np.float16(0))\n assert prop.is_valid(np.float16(1))\n assert prop.is_valid(np.float32(0))\n assert prop.is_valid(np.float32(1))\n assert prop.is_valid(np.float64(0))\n assert prop.is_valid(np.float64(1))\n\n # TODO (bev) should fail\n assert prop.is_valid(False)\n assert prop.is_valid(True)\n\n def test_invalid(self):\n prop = bcpp.Float()\n\n assert not prop.is_valid(1.0+1.0j)\n assert not prop.is_valid(\"\")\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n assert not prop.is_valid(np.bool8(False))\n assert not prop.is_valid(np.bool8(True))\n assert not prop.is_valid(np.complex64(1.0+1.0j))\n assert not prop.is_valid(np.complex128(1.0+1.0j))\n if hasattr(np, \"complex256\"):\n assert not prop.is_valid(np.complex256(1.0+1.0j))\n\n def test_has_ref(self):\n prop = bcpp.Float()\n assert not prop.has_ref\n\n def test_str(self):\n prop = bcpp.Float()\n assert str(prop) == \"Float\"\n\nclass Test_Int(object):\n\n def test_valid(self):\n prop = bcpp.Int()\n\n assert prop.is_valid(None)\n\n assert prop.is_valid(0)\n assert prop.is_valid(1)\n\n assert prop.is_valid(np.int8(0))\n assert prop.is_valid(np.int8(1))\n assert prop.is_valid(np.int16(0))\n assert prop.is_valid(np.int16(1))\n assert prop.is_valid(np.int32(0))\n assert prop.is_valid(np.int32(1))\n assert prop.is_valid(np.int64(0))\n assert prop.is_valid(np.int64(1))\n assert prop.is_valid(np.uint8(0))\n assert prop.is_valid(np.uint8(1))\n assert prop.is_valid(np.uint16(0))\n assert prop.is_valid(np.uint16(1))\n assert prop.is_valid(np.uint32(0))\n assert prop.is_valid(np.uint32(1))\n assert prop.is_valid(np.uint64(0))\n assert prop.is_valid(np.uint64(1))\n\n # TODO (bev) should fail\n assert prop.is_valid(False)\n assert prop.is_valid(True)\n\n def test_invalid(self):\n prop = bcpp.Int()\n\n assert not prop.is_valid(0.0)\n assert not prop.is_valid(1.0)\n assert not prop.is_valid(1.0+1.0j)\n assert not prop.is_valid(\"\")\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n assert not prop.is_valid(np.bool8(False))\n assert not prop.is_valid(np.bool8(True))\n assert not prop.is_valid(np.float16(0))\n assert not prop.is_valid(np.float16(1))\n assert not prop.is_valid(np.float32(0))\n assert not prop.is_valid(np.float32(1))\n assert not prop.is_valid(np.float64(0))\n assert not prop.is_valid(np.float64(1))\n assert not prop.is_valid(np.complex64(1.0+1.0j))\n assert not prop.is_valid(np.complex128(1.0+1.0j))\n if hasattr(np, \"complex256\"):\n assert not prop.is_valid(np.complex256(1.0+1.0j))\n\n def test_has_ref(self):\n prop = bcpp.Int()\n assert not prop.has_ref\n\n def test_str(self):\n prop = bcpp.Int()\n assert str(prop) == \"Int\"\n\nclass Test_String(object):\n\n def test_valid(self):\n prop = bcpp.String()\n\n assert prop.is_valid(None)\n\n assert prop.is_valid(\"\")\n assert prop.is_valid(\"6\")\n\n def test_invalid(self):\n prop = bcpp.String()\n\n assert not prop.is_valid(False)\n assert not prop.is_valid(True)\n assert not prop.is_valid(0)\n assert not prop.is_valid(1)\n assert not prop.is_valid(0.0)\n assert not prop.is_valid(1.0)\n assert not prop.is_valid(1.0+1.0j)\n\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n def test_has_ref(self):\n prop = bcpp.String()\n assert not prop.has_ref\n\n def test_str(self):\n prop = bcpp.String()\n assert str(prop) == \"String\"\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\nTest___all__ = verify_all(bcpp, ALL)\n" ]
[ [ "numpy.int8", "numpy.uint8", "numpy.uint32", "numpy.bool8", "numpy.float16", "numpy.complex256", "numpy.uint16", "numpy.complex128", "numpy.float64", "numpy.complex64", "numpy.float32", "numpy.uint64", "numpy.int64", "numpy.int32", "numpy.int16" ] ]
NeonicPlasma/TheBrainOfTWOWCentral
[ "05a3748df0d233bcd85ee8e4edaf7d977159fd07" ]
[ "Config/_functions.py" ]
[ "import numpy as np\nimport random, string, re\nfrom Config._const import ALPHABET, OPTION_DESC, ORIGINAL_DECK\nfrom PIL import Image, ImageFont, ImageDraw, ImageChops\n\ndef alt_font(z): return ImageFont.truetype(\"Fonts/ARIALUNI.TTF\", z)\ndef default(z): return ImageFont.truetype(\"Fonts/RobotoCondensed-Regular.ttf\", z)\ndef font_italic(z): return ImageFont.truetype(\"Fonts/RobotoCondensed-Italic.ttf\", z)\ndef font_bold(z): return ImageFont.truetype(\"Fonts/RobotoCondensed-Bold.ttf\", z)\ndef font_boltalic(z): return ImageFont.truetype(\"Fonts/RobotoCondensed-BoldItalic.ttf\", z)\n\n# grammar_list : Simple function to properly list many strings\ndef grammar_list(listed, c_or=False):\n\tconjunction = f', {\"or\" if c_or else \"and\"} '\n\n\tif len(listed) > 2:\n\t\tfirst_list = \", \".join(listed[:-1])\n\t\tlisted = first_list + conjunction + str(listed[-1])\n\telif len(listed) == 2:\n\t\tlisted = \" and \".join(listed)\n\telif len(listed) == 1:\n\t\tlisted = \"\".join(listed)\n\telse:\n\t\tlisted = \"none\"\n\treturn listed\n\n\n# make_letter_tint : Returns a color for a book depending on the letter\ndef make_letter_tint(char):\n char = char.upper()\n charcode = ord(char)\n if char >= \"0\" and char <= \"9\":\n hue1 = (charcode - ord(\"0\")) / 10\n else:\n hue1 = (charcode - ord(\"A\")) / 26 % 1\n hue360 = hue1 * 255\n return tuple((int(hue360), 153, 255))\n\n\n# make_book : Generates a book image for a given contestant name\ndef make_book(name):\n if len(name) < 2:\n name += name\n\n left = Image.open(\"Images/Book/left.png\").convert(\"RGBA\")\n right = Image.open(\"Images/Book/right.png\").convert(\"RGBA\")\n face = Image.open(\"Images/Book/face.png\").convert(\"RGBA\")\n\n left = ImageChops.multiply(\n left, Image.new(\"HSV\", left.size, make_letter_tint(name[0])).convert(\"RGBA\")\n )\n right = ImageChops.multiply(\n right, Image.new(\"HSV\", right.size, make_letter_tint(name[1])).convert(\"RGBA\")\n )\n\n left.paste(right, (0, 0), right)\n left.paste(face, (0, 0), face)\n\n return left\n\n\n# word_count : Returns a response's word count\ndef word_count(response):\n\twords = 0\n\tfor piece in response.split(\" \"):\n\t\tfor character in piece:\n\t\t\tif character.isalnum():\n\t\t\t\twords += 1\n\t\t\t\tbreak\n\treturn words\n\n\n# elim_prize : Returns how many contestants should prize and be eliminated (based on Dark's formulas)\ndef elim_prize(count, elim_rate=0.2):\n\tnumbers = []\n\n\tif count == 2:\n\t\tnumbers.append(1)\n\telse:\n\t\tnumbers.append(round(count * elim_rate))\n\t\n\tnumbers.append(np.floor(np.sqrt(count) * np.log(count) / 3.75))\n\treturn numbers\n\n\n# formatting_fix : Fixes weirdly formatted lines that might cause formatting problems\ndef formatting_fix(line):\n\tformat_types = [\"||\", \"~~\", \"__\", \"***\", \"**\", \"*\", \"_\"]\n\n\tfor r in format_types:\n\t\tif line.count(r) % 2 == 1:\n\t\t\tline = line.replace(r, \"\")\n\t\n\treturn line\n\n\n# is_whole : Detects integers\ndef is_whole(s):\n\ttry:\n\t\tes = int(s)\n\t\tes2 = float(s)\n\t\tif es2 - es == 0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\treturn False\n\n\n# is_float : Detect numbers that have decimal components\ndef is_float(s):\n\ttry:\n\t\tes = int(s)\n\t\tes2 = float(s)\n\t\tif es2 - es != 0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\ttry:\n\t\t\tes2 = float(s)\n\t\t\treturn True\n\t\texcept:\n\t\t\treturn False\n\n\n# key_generator : Generates a random alphanumeric string with variable length\ndef key_generator(n):\n\treturn ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(n))\n\n\n# number_key : Generates a random numeric string with variable length\ndef number_key(n):\n\treturn ''.join(random.SystemRandom().choice(string.digits) for _ in range(n))\n\n\n# strip_alpha : Strip a string to only alphabet characters\ndef strip_alpha(string, spaces=False):\n\tif spaces:\n\t\treturn ''.join([x for x in list(string) if x.upper() in ALPHABET[:26] or x == \" \"])\n\treturn ''.join([x for x in list(string) if x.upper() in ALPHABET[:26]])\n\n\n# find_all : Find all instances of a substring in a string, even overlapping ones\ndef find_all(a_str, sub):\n\tstart = 0\n\n\twhile True:\n\t\tstart = a_str.find(sub, start)\n\t\tif start == -1: return\n\t\tyield start\n\t\tstart += 1\n\n\n# find_multi : Find all instances of multiple substrings in a string\ndef find_multi(a_str, sstrlist):\n\tencounters = {}\n\n\tfor sub in sstrlist:\n\t\tencounters[sub] = []\n\t\tstart = 0\n\n\t\twhile True:\n\t\t\tstart = a_str.find(sub, start)\n\t\t\tif start == -1: break\n\t\t\tencounters[sub].append(start)\n\t\t\tstart += 1\n\n\treturn encounters\n\n# overlap_match : Match a regex to a string and count how many times it matches\ndef match_count(pattern, search_string):\n\ttotal = 0\n\tstart = 0\n\tthere = re.compile(pattern)\n\twhile True:\n\t\tmo = there.search(search_string, start)\n\t\tif mo is None: return total\n\t\ttotal += 1\n\t\tstart = 1 + mo.start()\n\n# strip_front : Removes all leading whitespace characters\ndef strip_front(string):\n\treturn re.sub(r\"^\\s+\", \"\", string, flags=re.UNICODE)\n\n# uno_image : Handler for image generation for the UNO command\ndef uno_image(b_type, tag, PREFIX, hand=None, last=None, draw_c=None, name=None, config=None):\n\tbackground = Image.open(\"Images/Uno/Background.png\") # Start with the background image\n\tdraw = ImageDraw.Draw(background)\n\n\tif b_type in [0, 2]: # 0 or 2 displays a hand\n\t\thand_size = len(hand) # Take the hand size and calculate its horizontal pixel length\n\t\thand_range = ((hand_size - 1) * 90 * (0.95 ** hand_size)) if hand_size < 10 else 550\n\n\t\tfor card in range(len(hand)): # Draw each card on the image. First, load the card and resize it properly\n\t\t\tcard_image = Image.open(\"Images/Uno/{}.png\".format(hand[card])).convert('RGBA').resize((119, 190))\n\n\t\t\t# Calculate the x coordinate for this card\n\t\t\tx_coord = (600 - hand_range) + hand_range * 2 * ((card / (hand_size - 1)) if hand_size != 1 else 0)\n\t\t\tsin_mod = np.sin(np.deg2rad(3 * x_coord / 20)) # Depending on the x coordinate it's lower down\n\t\t\ty_coord = 705 - 95 * sin_mod # Calculate the y coordinate\n\n\t\t\tangle = (np.rad2deg(np.arcsin(sin_mod)) - 90) / 3 # The x coordinate also affects angles\n\n\t\t\tcard_image = card_image.rotate(angle if x_coord >= 600 else -angle, expand=1) # Rotate the card\n\n\t\t\tbackground.paste(card_image, # Paste the card in at the proper position\n\t\t\t(int(round(x_coord - card_image.width / 2)), int(round(y_coord - card_image.height / 2))),\n\t\t\tcard_image)\n\n\t\t\tsize_t = draw.textsize(str(card + 1), font_bold(40)) # Draw the card code above the card\n\t\t\tdraw.text((x_coord - size_t[0] / 2, y_coord - 150), str(card + 1), (255, 255, 255), font_bold(40))\n\t\t\n\t\t# Paste the last card played\n\t\tlast_played = Image.open(\"Images/Uno/{}.png\".format(last)).convert('RGBA').resize((210, 337))\n\t\tbackground.paste(last_played, (495, 106), last_played)\n\t\n\t\tif b_type == 0: # Depending on the type, the text varies\n\t\t\ttexty = f\"\"\"It's your turn! Use the [{PREFIX}uno play] command to play a card.\n\t\t\tUse [{PREFIX}uno play draw] to draw cards!\"\"\".replace(\"\\t\", \"\")\n\t\telse:\n\t\t\ttexty = \"This is your hand!\"\n\n\t\tsize_t = draw.textsize(texty, font_bold(40)) # Draw the text, centered\n\t\tdraw.text((600 - size_t[0] / 2, 10),\n\t\ttexty,\n\t\t(255, 255, 255),\n\t\tfont_bold(40))\n\t\n\tif b_type in [1, 3]: # 1 and 3 are images containing just the last card - they're shared publicly\n\t\t# Paste the last card playeds\n\t\tlast_played = Image.open(\"Images/Uno/{}.png\".format(last)).convert('RGBA').resize((306, 490))\n\t\tbackground.paste(last_played, (447, 180), last_played)\n\n\t\tif b_type == 1:\n\t\t\ttexty = f\"It's {name}'s turn to play a card!\"\n\t\telse:\n\t\t\ttexty = f\"{name} WINS THE GAME!\"\n\n\t\tsize_t = draw.textsize(texty, font_bold(50)) # Draw the text, centered\n\t\tdraw.text((600 - size_t[0] / 2, 25),\n\t\ttexty,\n\t\t(255, 255, 255),\n\t\tfont_bold(50))\n\t\n\tif b_type == 4: # 4 is the config menu\n\n\t\tfor option in range(len(config)): # Depending on the index of the option\n\t\t\tx_c = 50 + 580 * (option // 8) # Draw it at different x and y coordinates\n\t\t\ty_c = 130 + 85 * (option % 8)\n\n\t\t\tif type(list(config.values())[option]) is int: # If this setting is an integer, do the int visualization\n\t\t\t\tdraw.ellipse((x_c + 20, y_c + 20, x_c + 50, y_c + 50), fill='white')\n\n\t\t\t\tfor z in range(list(config.values())[option]): # Draw a number of lines corresponding to the int\n\t\t\t\t\tangle = 2 * np.pi / list(config.values())[option] * z\n\t\t\t\t\tdraw.line((x_c + 35, y_c + 35, x_c + 35 * np.cos(angle) + 35, y_c + 35 * np.sin(angle) + 35),\n\t\t\t\t\tfill=(255, 255, 255), width=3)\n\t\t\t\t\n\t\t\t\tn_color = (0, 0, 0)\n\n\t\t\telse: # If it's a boolean, it's either a black or white circle\n\t\t\t\tdraw.ellipse((x_c, y_c, x_c + 70, y_c + 70), fill='white')\n\n\t\t\t\tif not list(config.values())[option]:\n\t\t\t\t\tdraw.ellipse((x_c + 2, y_c + 2, x_c + 66, y_c + 66), fill='black')\n\t\t\t\t\tn_color = (255, 255, 255)\n\n\t\t\t\telse:\n\t\t\t\t\tn_color = (0, 0, 0)\n\n\t\t\tdraw.text((x_c + 90, y_c + 15),\n\t\t\tOPTION_DESC[list(config.keys())[option]].replace(\"$\", str(list(config.values())[option])),\n\t\t\t(255, 255, 255), font_bold(30))\n\t\t\tx_size = draw.textsize(str(option + 1), font_bold(30))[0]\n\t\t\tdraw.text((x_c - x_size / 2 + 35, y_c + 15), str(option + 1), n_color, font_bold(30))\n\n\t\tinstruc = \"\"\"The round host can change any of these options with [{PREFIX}uno config x y], \n\t\tx being the option number, y being any complement necessary.\"\"\".replace(\"\\n\", \"\").replace(\"\\t\", \"\")\n\t\tx_size = draw.textsize(instruc, font_bold(30))[0]\n\t\tdraw.text((600 - x_size / 2, 15), instruc, (255, 255, 255), font_bold(30))\n\n\t# Save the image with the tag provided\n\tbackground.save(f\"Images/current_card_image_{tag}.png\")\n\treturn\n\n# uno_skip : Sets the uno_info dict to normal\ndef uno_skip():\n\tuno_info = {\n\t\t\"running\": False,\n\t\t\"status\": 0,\n\t\t\"players\": [],\n\t\t\"order\": [],\n\t\t\"hands\": [],\n\t\t\"host\": 0,\n\t\t\"current\": 0,\n\t\t\"deck\": ORIGINAL_DECK,\n\t\t\"last_card\": \"00\",\n\t\t\"carryover\": 0,\n\t\t\"channel\": 0,\n\t\t\"config\": {\"0-7\": False, \"d-skip\": True, \"start\": 7}\n\t}\n\treturn uno_info\n" ]
[ [ "numpy.sin", "numpy.log", "numpy.arcsin", "numpy.sqrt", "numpy.cos", "numpy.deg2rad" ] ]
KwanYu/Airbnb-Backend
[ "d5d77f3541f329bbb28142d18606b22f115b7df6" ]
[ "venv/Lib/site-packages/astropy/io/ascii/tests/test_read.py" ]
[ "# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\nimport re\nfrom io import BytesIO, open\nfrom collections import OrderedDict\nimport locale\nimport platform\nfrom io import StringIO\n\nimport pathlib\nimport pytest\nimport numpy as np\n\nfrom astropy.io import ascii\nfrom astropy.table import Table\nfrom astropy import table\nfrom astropy.units import Unit\nfrom astropy.table.table_helpers import simple_table\n\nfrom .common import (raises, assert_equal, assert_almost_equal,\n assert_true)\nfrom astropy.io.ascii import core\nfrom astropy.io.ascii.ui import _probably_html, get_read_trace\nfrom astropy.utils.exceptions import AstropyWarning\n\n# setup/teardown function to have the tests run in the correct directory\nfrom .common import setup_function, teardown_function # noqa\n\ntry:\n import bz2 # noqa\nexcept ImportError:\n HAS_BZ2 = False\nelse:\n HAS_BZ2 = True\n\nasciiIO = lambda x: BytesIO(x.encode('ascii'))\n\n\[email protected]('fast_reader', [True, False, {'use_fast_converter': False},\n {'use_fast_converter': True}, 'force'])\ndef test_convert_overflow(fast_reader):\n \"\"\"\n Test reading an extremely large integer, which falls through to\n string due to an overflow error (#2234). The C parsers used to\n return inf (kind 'f') for this.\n \"\"\"\n expected_kind = 'U'\n with pytest.warns(AstropyWarning, match=\"OverflowError converting to IntType in column a\"):\n dat = ascii.read(['a', '1' * 10000], format='basic',\n fast_reader=fast_reader, guess=False)\n assert dat['a'].dtype.kind == expected_kind\n\n\ndef test_guess_with_names_arg():\n \"\"\"\n Make sure reading a table with guess=True gives the expected result when\n the names arg is specified.\n \"\"\"\n # This is a NoHeader format table and so `names` should replace\n # the default col0, col1 names. It fails as a Basic format\n # table when guessing because the column names would be '1', '2'.\n dat = ascii.read(['1,2', '3,4'], names=('a', 'b'))\n assert len(dat) == 2\n assert dat.colnames == ['a', 'b']\n\n # This is a Basic format table and the first row\n # gives the column names 'c', 'd', which get replaced by 'a', 'b'\n dat = ascii.read(['c,d', '3,4'], names=('a', 'b'))\n assert len(dat) == 1\n assert dat.colnames == ['a', 'b']\n\n # This is also a Basic format table and the first row\n # gives the column names 'c', 'd', which get replaced by 'a', 'b'\n dat = ascii.read(['c d', 'e f'], names=('a', 'b'))\n assert len(dat) == 1\n assert dat.colnames == ['a', 'b']\n\n\ndef test_guess_with_format_arg():\n \"\"\"\n When the format or Reader is explicitly given then disable the\n strict column name checking in guessing.\n \"\"\"\n dat = ascii.read(['1,2', '3,4'], format='basic')\n assert len(dat) == 1\n assert dat.colnames == ['1', '2']\n\n dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), format='basic')\n assert len(dat) == 1\n assert dat.colnames == ['a', 'b']\n\n dat = ascii.read(['1,2', '3,4'], Reader=ascii.Basic)\n assert len(dat) == 1\n assert dat.colnames == ['1', '2']\n\n dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), Reader=ascii.Basic)\n assert len(dat) == 1\n assert dat.colnames == ['a', 'b']\n\n # For good measure check the same in the unified I/O interface\n dat = Table.read(['1,2', '3,4'], format='ascii.basic')\n assert len(dat) == 1\n assert dat.colnames == ['1', '2']\n\n dat = Table.read(['1,2', '3,4'], format='ascii.basic', names=('a', 'b'))\n assert len(dat) == 1\n assert dat.colnames == ['a', 'b']\n\n\ndef test_guess_with_delimiter_arg():\n \"\"\"\n When the delimiter is explicitly given then do not try others in guessing.\n \"\"\"\n fields = ['10.1E+19', '3.14', '2048', '-23']\n values = [1.01e20, 3.14, 2048, -23]\n\n # Default guess should recognise CSV with optional spaces\n t0 = ascii.read(asciiIO(', '.join(fields)), guess=True)\n for n, v in zip(t0.colnames, values):\n assert t0[n][0] == v\n\n # Forcing space as delimiter produces type str columns ('10.1E+19,')\n t1 = ascii.read(asciiIO(', '.join(fields)), guess=True, delimiter=' ')\n for n, v in zip(t1.colnames[:-1], fields[:-1]):\n assert t1[n][0] == v + ','\n\n\ndef test_reading_mixed_delimiter_tabs_spaces():\n # Regression test for https://github.com/astropy/astropy/issues/6770\n dat = ascii.read('1 2\\t3\\n1 2\\t3', format='no_header', names=list('abc'))\n assert len(dat) == 2\n\n Table.read(['1 2\\t3', '1 2\\t3'], format='ascii.no_header',\n names=['a', 'b', 'c'])\n assert len(dat) == 2\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_read_with_names_arg(fast_reader):\n \"\"\"\n Test that a bad value of `names` raises an exception.\n \"\"\"\n # CParser only uses columns in `names` and thus reports mismach in num_col\n with pytest.raises(ascii.InconsistentTableError):\n ascii.read(['c d', 'e f'], names=('a', ), guess=False, fast_reader=fast_reader)\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_read_all_files(fast_reader):\n for testfile in get_testfiles():\n if testfile.get('skip'):\n print('\\n\\n******** SKIPPING {}'.format(testfile['name']))\n continue\n print('\\n\\n******** READING {}'.format(testfile['name']))\n for guess in (True, False):\n test_opts = testfile['opts'].copy()\n if 'guess' not in test_opts:\n test_opts['guess'] = guess\n if ('Reader' in test_opts and 'fast_{}'.format(test_opts['Reader']._format_name)\n in core.FAST_CLASSES): # has fast version\n if 'Inputter' not in test_opts: # fast reader doesn't allow this\n test_opts['fast_reader'] = fast_reader\n table = ascii.read(testfile['name'], **test_opts)\n assert_equal(table.dtype.names, testfile['cols'])\n for colname in table.dtype.names:\n assert_equal(len(table[colname]), testfile['nrows'])\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_read_all_files_via_table(fast_reader):\n for testfile in get_testfiles():\n if testfile.get('skip'):\n print('\\n\\n******** SKIPPING {}'.format(testfile['name']))\n continue\n print('\\n\\n******** READING {}'.format(testfile['name']))\n for guess in (True, False):\n test_opts = testfile['opts'].copy()\n if 'guess' not in test_opts:\n test_opts['guess'] = guess\n if 'Reader' in test_opts:\n format = 'ascii.{}'.format(test_opts['Reader']._format_name)\n del test_opts['Reader']\n else:\n format = 'ascii'\n if f'fast_{format}' in core.FAST_CLASSES:\n test_opts['fast_reader'] = fast_reader\n table = Table.read(testfile['name'], format=format, **test_opts)\n assert_equal(table.dtype.names, testfile['cols'])\n for colname in table.dtype.names:\n assert_equal(len(table[colname]), testfile['nrows'])\n\n\ndef test_guess_all_files():\n for testfile in get_testfiles():\n if testfile.get('skip'):\n print('\\n\\n******** SKIPPING {}'.format(testfile['name']))\n continue\n if not testfile['opts'].get('guess', True):\n continue\n print('\\n\\n******** READING {}'.format(testfile['name']))\n for filter_read_opts in (['Reader', 'delimiter', 'quotechar'], []):\n # Copy read options except for those in filter_read_opts\n guess_opts = dict((k, v) for k, v in testfile['opts'].items()\n if k not in filter_read_opts)\n table = ascii.read(testfile['name'], guess=True, **guess_opts)\n assert_equal(table.dtype.names, testfile['cols'])\n for colname in table.dtype.names:\n assert_equal(len(table[colname]), testfile['nrows'])\n\n\ndef test_daophot_indef():\n \"\"\"Test that INDEF is correctly interpreted as a missing value\"\"\"\n table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot)\n for col in table.itercols():\n # Four columns have all INDEF values and are masked, rest are normal Column\n if col.name in ('OTIME', 'MAG', 'MERR', 'XAIRMASS'):\n assert np.all(col.mask)\n else:\n assert not hasattr(col, 'mask')\n\n\ndef test_daophot_types():\n \"\"\"\n Test specific data types which are different from what would be\n inferred automatically based only data values. DAOphot reader uses\n the header information to assign types.\n \"\"\"\n table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot)\n assert table['LID'].dtype.char in 'fd' # float or double\n assert table['MAG'].dtype.char in 'fd' # even without any data values\n assert table['PIER'].dtype.char in 'US' # string (data values are consistent with int)\n assert table['ID'].dtype.char in 'il' # int or long\n\n\ndef test_daophot_header_keywords():\n table = ascii.read('data/daophot.dat', Reader=ascii.Daophot)\n expected_keywords = (('NSTARFILE', 'test.nst.1', 'filename', '%-23s'),\n ('REJFILE', '\"hello world\"', 'filename', '%-23s'),\n ('SCALE', '1.', 'units/pix', '%-23.7g'),)\n\n keywords = table.meta['keywords'] # Ordered dict of keyword structures\n for name, value, units, format_ in expected_keywords:\n keyword = keywords[name]\n assert_equal(keyword['value'], value)\n assert_equal(keyword['units'], units)\n assert_equal(keyword['format'], format_)\n\n\ndef test_daophot_multiple_aperture():\n table = ascii.read('data/daophot3.dat', Reader=ascii.Daophot)\n assert 'MAG5' in table.colnames # MAG5 is one of the newly created column names\n assert table['MAG5'][4] == 22.13 # A sample entry in daophot3.dat file\n assert table['MERR2'][0] == 1.171\n assert np.all(table['RAPERT5'] == 23.3) # assert all the 5th apertures are same 23.3\n\n\ndef test_daophot_multiple_aperture2():\n table = ascii.read('data/daophot4.dat', Reader=ascii.Daophot)\n assert 'MAG15' in table.colnames # MAG15 is one of the newly created column name\n assert table['MAG15'][1] == -7.573 # A sample entry in daophot4.dat file\n assert table['MERR2'][0] == 0.049\n assert np.all(table['RAPERT5'] == 5.) # assert all the 5th apertures are same 5.0\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_empty_table_no_header(fast_reader):\n with pytest.raises(ascii.InconsistentTableError):\n ascii.read('data/no_data_without_header.dat', Reader=ascii.NoHeader,\n guess=False, fast_reader=fast_reader)\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_wrong_quote(fast_reader):\n with pytest.raises(ascii.InconsistentTableError):\n ascii.read('data/simple.txt', guess=False, fast_reader=fast_reader)\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_extra_data_col(fast_reader):\n with pytest.raises(ascii.InconsistentTableError):\n ascii.read('data/bad.txt', fast_reader=fast_reader)\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_extra_data_col2(fast_reader):\n with pytest.raises(ascii.InconsistentTableError):\n ascii.read('data/simple5.txt', delimiter='|', fast_reader=fast_reader)\n\n\ndef test_missing_file():\n with pytest.raises(OSError):\n ascii.read('does_not_exist')\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_set_names(fast_reader):\n names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6')\n data = ascii.read('data/simple3.txt', names=names, delimiter='|',\n fast_reader=fast_reader)\n assert_equal(data.dtype.names, names)\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_set_include_names(fast_reader):\n names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6')\n include_names = ('c1', 'c3')\n data = ascii.read('data/simple3.txt', names=names, include_names=include_names,\n delimiter='|', fast_reader=fast_reader)\n assert_equal(data.dtype.names, include_names)\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_set_exclude_names(fast_reader):\n exclude_names = ('Y', 'object')\n data = ascii.read('data/simple3.txt', exclude_names=exclude_names, delimiter='|',\n fast_reader=fast_reader)\n assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'rad'))\n\n\ndef test_include_names_daophot():\n include_names = ('ID', 'MAG', 'PIER')\n data = ascii.read('data/daophot.dat', include_names=include_names)\n assert_equal(data.dtype.names, include_names)\n\n\ndef test_exclude_names_daophot():\n exclude_names = ('ID', 'YCENTER', 'MERR', 'NITER', 'CHI', 'PERROR')\n data = ascii.read('data/daophot.dat', exclude_names=exclude_names)\n assert_equal(data.dtype.names, ('XCENTER', 'MAG', 'MSKY', 'SHARPNESS', 'PIER'))\n\n\ndef test_custom_process_lines():\n def process_lines(lines):\n bars_at_ends = re.compile(r'^\\| | \\|$', re.VERBOSE)\n striplines = (x.strip() for x in lines)\n return [bars_at_ends.sub('', x) for x in striplines if len(x) > 0]\n reader = ascii.get_reader(delimiter='|')\n reader.inputter.process_lines = process_lines\n data = reader.read('data/bars_at_ends.txt')\n assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'))\n assert_equal(len(data), 3)\n\n\ndef test_custom_process_line():\n def process_line(line):\n line_out = re.sub(r'^\\|\\s*', '', line.strip())\n return line_out\n reader = ascii.get_reader(data_start=2, delimiter='|')\n reader.header.splitter.process_line = process_line\n reader.data.splitter.process_line = process_line\n data = reader.read('data/nls1_stackinfo.dbout')\n cols = get_testfiles('data/nls1_stackinfo.dbout')['cols']\n assert_equal(data.dtype.names, cols[1:])\n\n\ndef test_custom_splitters():\n reader = ascii.get_reader()\n reader.header.splitter = ascii.BaseSplitter()\n reader.data.splitter = ascii.BaseSplitter()\n f = 'data/test4.dat'\n data = reader.read(f)\n testfile = get_testfiles(f)\n assert_equal(data.dtype.names, testfile['cols'])\n assert_equal(len(data), testfile['nrows'])\n assert_almost_equal(data.field('zabs1.nh')[2], 0.0839710433091)\n assert_almost_equal(data.field('p1.gamma')[2], 1.25997502704)\n assert_almost_equal(data.field('p1.ampl')[2], 0.000696444029148)\n assert_equal(data.field('statname')[2], 'chi2modvar')\n assert_almost_equal(data.field('statval')[2], 497.56468441)\n\n\ndef test_start_end():\n data = ascii.read('data/test5.dat', header_start=1, data_start=3, data_end=-5)\n assert_equal(len(data), 13)\n assert_equal(data.field('statname')[0], 'chi2xspecvar')\n assert_equal(data.field('statname')[-1], 'chi2gehrels')\n\n\ndef test_set_converters():\n converters = {'zabs1.nh': [ascii.convert_numpy('int32'),\n ascii.convert_numpy('float32')],\n 'p1.gamma': [ascii.convert_numpy('str')]\n }\n data = ascii.read('data/test4.dat', converters=converters)\n assert_equal(str(data['zabs1.nh'].dtype), 'float32')\n assert_equal(data['p1.gamma'][0], '1.26764500000')\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_from_string(fast_reader):\n f = 'data/simple.txt'\n with open(f) as fd:\n table = fd.read()\n testfile = get_testfiles(f)\n data = ascii.read(table, fast_reader=fast_reader, **testfile['opts'])\n assert_equal(data.dtype.names, testfile['cols'])\n assert_equal(len(data), testfile['nrows'])\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_from_filelike(fast_reader):\n f = 'data/simple.txt'\n testfile = get_testfiles(f)\n with open(f, 'rb') as fd:\n data = ascii.read(fd, fast_reader=fast_reader, **testfile['opts'])\n assert_equal(data.dtype.names, testfile['cols'])\n assert_equal(len(data), testfile['nrows'])\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_from_lines(fast_reader):\n f = 'data/simple.txt'\n with open(f) as fd:\n table = fd.readlines()\n testfile = get_testfiles(f)\n data = ascii.read(table, fast_reader=fast_reader, **testfile['opts'])\n assert_equal(data.dtype.names, testfile['cols'])\n assert_equal(len(data), testfile['nrows'])\n\n\ndef test_comment_lines():\n table = ascii.get_reader(Reader=ascii.Rdb)\n data = table.read('data/apostrophe.rdb')\n assert_equal(table.comment_lines, ['# first comment', ' # second comment'])\n assert_equal(data.meta['comments'], ['first comment', 'second comment'])\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_fill_values(fast_reader):\n f = 'data/fill_values.txt'\n testfile = get_testfiles(f)\n data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,\n **testfile['opts'])\n assert_true((data['a'].mask == [False, True]).all())\n assert_true((data['a'] == [1, 1]).all())\n assert_true((data['b'].mask == [False, True]).all())\n assert_true((data['b'] == [2, 1]).all())\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_fill_values_col(fast_reader):\n f = 'data/fill_values.txt'\n testfile = get_testfiles(f)\n data = ascii.read(f, fill_values=('a', '1', 'b'), fast_reader=fast_reader,\n **testfile['opts'])\n check_fill_values(data)\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_fill_values_include_names(fast_reader):\n f = 'data/fill_values.txt'\n testfile = get_testfiles(f)\n data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,\n fill_include_names=['b'], **testfile['opts'])\n check_fill_values(data)\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_fill_values_exclude_names(fast_reader):\n f = 'data/fill_values.txt'\n testfile = get_testfiles(f)\n data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,\n fill_exclude_names=['a'], **testfile['opts'])\n check_fill_values(data)\n\n\ndef check_fill_values(data):\n \"\"\"compare array column by column with expectation \"\"\"\n assert not hasattr(data['a'], 'mask')\n assert_true((data['a'] == ['1', 'a']).all())\n assert_true((data['b'].mask == [False, True]).all())\n # Check that masked value is \"do not care\" in comparison\n assert_true((data['b'] == [2, -999]).all())\n data['b'].mask = False # explicitly unmask for comparison\n assert_true((data['b'] == [2, 1]).all())\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_fill_values_list(fast_reader):\n f = 'data/fill_values.txt'\n testfile = get_testfiles(f)\n data = ascii.read(f, fill_values=[('a', '42'), ('1', '42', 'a')],\n fast_reader=fast_reader, **testfile['opts'])\n data['a'].mask = False # explicitly unmask for comparison\n assert_true((data['a'] == [42, 42]).all())\n\n\ndef test_masking_Cds():\n f = 'data/cds.dat'\n testfile = get_testfiles(f)\n data = ascii.read(f,\n **testfile['opts'])\n assert_true(data['AK'].mask[0])\n assert not hasattr(data['Fit'], 'mask')\n\n\ndef test_null_Ipac():\n f = 'data/ipac.dat'\n testfile = get_testfiles(f)\n data = ascii.read(f, **testfile['opts'])\n mask = np.array([(True, False, True, False, True),\n (False, False, False, False, False)],\n dtype=[('ra', '|b1'),\n ('dec', '|b1'),\n ('sai', '|b1'),\n ('v2', '|b1'),\n ('sptype', '|b1')])\n assert np.all(data.mask == mask)\n\n\ndef test_Ipac_meta():\n keywords = OrderedDict((('intval', 1),\n ('floatval', 2.3e3),\n ('date', \"Wed Sp 20 09:48:36 1995\"),\n ('key_continue', 'IPAC keywords can continue across lines')))\n comments = ['This is an example of a valid comment']\n f = 'data/ipac.dat'\n testfile = get_testfiles(f)\n data = ascii.read(f, **testfile['opts'])\n assert data.meta['keywords'].keys() == keywords.keys()\n for data_kv, kv in zip(data.meta['keywords'].values(), keywords.values()):\n assert data_kv['value'] == kv\n assert data.meta['comments'] == comments\n\n\ndef test_set_guess_kwarg():\n \"\"\"Read a file using guess with one of the typical guess_kwargs explicitly set.\"\"\"\n data = ascii.read('data/space_delim_no_header.dat',\n delimiter=',', guess=True)\n assert(data.dtype.names == ('1 3.4 hello',))\n assert(len(data) == 1)\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_read_rdb_wrong_type(fast_reader):\n \"\"\"Read RDB data with inconstent data type (except failure)\"\"\"\n table = \"\"\"col1\\tcol2\nN\\tN\n1\\tHello\"\"\"\n with pytest.raises(ValueError):\n ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader)\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_default_missing(fast_reader):\n \"\"\"Read a table with empty values and ensure that corresponding entries are masked\"\"\"\n table = '\\n'.join(['a,b,c,d',\n '1,3,,',\n '2, , 4.0 , ss '])\n dat = ascii.read(table, fast_reader=fast_reader)\n assert dat.masked is False\n assert dat.pformat() == [' a b c d ',\n '--- --- --- ---',\n ' 1 3 -- --',\n ' 2 -- 4.0 ss']\n\n # Single row table with a single missing element\n table = \"\"\" a \\n \"\" \"\"\"\n dat = ascii.read(table, fast_reader=fast_reader)\n assert dat.pformat() == [' a ',\n '---',\n ' --']\n assert dat['a'].dtype.kind == 'i'\n\n # Same test with a fixed width reader\n table = '\\n'.join([' a b c d ',\n '--- --- --- ---',\n ' 1 3 ',\n ' 2 4.0 ss'])\n dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine)\n assert dat.masked is False\n assert dat.pformat() == [' a b c d ',\n '--- --- --- ---',\n ' 1 3 -- --',\n ' 2 -- 4.0 ss']\n\n dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None)\n assert dat.masked is False\n assert dat.pformat() == [' a b c d ',\n '--- --- --- ---',\n ' 1 3 ',\n ' 2 4.0 ss']\n\n dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[])\n assert dat.masked is False\n assert dat.pformat() == [' a b c d ',\n '--- --- --- ---',\n ' 1 3 ',\n ' 2 4.0 ss']\n\n\ndef get_testfiles(name=None):\n \"\"\"Set up information about the columns, number of rows, and reader params to\n read a bunch of test files and verify columns and number of rows.\"\"\"\n\n testfiles = [\n {'cols': ('agasc_id', 'n_noids', 'n_obs'),\n 'name': 'data/apostrophe.rdb',\n 'nrows': 2,\n 'opts': {'Reader': ascii.Rdb}},\n {'cols': ('agasc_id', 'n_noids', 'n_obs'),\n 'name': 'data/apostrophe.tab',\n 'nrows': 2,\n 'opts': {'Reader': ascii.Tab}},\n {'cols': ('Index',\n 'RAh',\n 'RAm',\n 'RAs',\n 'DE-',\n 'DEd',\n 'DEm',\n 'DEs',\n 'Match',\n 'Class',\n 'AK',\n 'Fit'),\n 'name': 'data/cds.dat',\n 'nrows': 1,\n 'opts': {'Reader': ascii.Cds}},\n # Test malformed CDS file (issues #2241 #467)\n {'cols': ('Index',\n 'RAh',\n 'RAm',\n 'RAs',\n 'DE-',\n 'DEd',\n 'DEm',\n 'DEs',\n 'Match',\n 'Class',\n 'AK',\n 'Fit'),\n 'name': 'data/cds_malformed.dat',\n 'nrows': 1,\n 'opts': {'Reader': ascii.Cds, 'data_start': 'guess'}},\n {'cols': ('a', 'b', 'c'),\n 'name': 'data/commented_header.dat',\n 'nrows': 2,\n 'opts': {'Reader': ascii.CommentedHeader}},\n {'cols': ('a', 'b', 'c'),\n 'name': 'data/commented_header2.dat',\n 'nrows': 2,\n 'opts': {'Reader': ascii.CommentedHeader, 'header_start': -1}},\n {'cols': ('col1', 'col2', 'col3', 'col4', 'col5'),\n 'name': 'data/continuation.dat',\n 'nrows': 2,\n 'opts': {'Inputter': ascii.ContinuationLinesInputter,\n 'Reader': ascii.NoHeader}},\n {'cols': ('ID',\n 'XCENTER',\n 'YCENTER',\n 'MAG',\n 'MERR',\n 'MSKY',\n 'NITER',\n 'SHARPNESS',\n 'CHI',\n 'PIER',\n 'PERROR'),\n 'name': 'data/daophot.dat',\n 'nrows': 2,\n 'opts': {'Reader': ascii.Daophot}},\n {'cols': ('NUMBER',\n 'FLUX_ISO',\n 'FLUXERR_ISO',\n 'VALU-ES',\n 'VALU-ES_1',\n 'FLAG'),\n 'name': 'data/sextractor.dat',\n 'nrows': 3,\n 'opts': {'Reader': ascii.SExtractor}},\n {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'),\n 'name': 'data/ipac.dat',\n 'nrows': 2,\n 'opts': {'Reader': ascii.Ipac}},\n {'cols': ('col0',\n 'objID',\n 'osrcid',\n 'xsrcid',\n 'SpecObjID',\n 'ra',\n 'dec',\n 'obsid',\n 'ccdid',\n 'z',\n 'modelMag_i',\n 'modelMagErr_i',\n 'modelMag_r',\n 'modelMagErr_r',\n 'expo',\n 'theta',\n 'rad_ecf_39',\n 'detlim90',\n 'fBlim90'),\n 'name': 'data/nls1_stackinfo.dbout',\n 'nrows': 58,\n 'opts': {'data_start': 2, 'delimiter': '|', 'guess': False}},\n {'cols': ('Index',\n 'RAh',\n 'RAm',\n 'RAs',\n 'DE-',\n 'DEd',\n 'DEm',\n 'DEs',\n 'Match',\n 'Class',\n 'AK',\n 'Fit'),\n 'name': 'data/no_data_cds.dat',\n 'nrows': 0,\n 'opts': {'Reader': ascii.Cds}},\n {'cols': ('ID',\n 'XCENTER',\n 'YCENTER',\n 'MAG',\n 'MERR',\n 'MSKY',\n 'NITER',\n 'SHARPNESS',\n 'CHI',\n 'PIER',\n 'PERROR'),\n 'name': 'data/no_data_daophot.dat',\n 'nrows': 0,\n 'opts': {'Reader': ascii.Daophot}},\n {'cols': ('NUMBER',\n 'FLUX_ISO',\n 'FLUXERR_ISO',\n 'VALUES',\n 'VALUES_1',\n 'FLAG'),\n 'name': 'data/no_data_sextractor.dat',\n 'nrows': 0,\n 'opts': {'Reader': ascii.SExtractor}},\n {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'),\n 'name': 'data/no_data_ipac.dat',\n 'nrows': 0,\n 'opts': {'Reader': ascii.Ipac}},\n {'cols': ('ra', 'v2'),\n 'name': 'data/ipac.dat',\n 'nrows': 2,\n 'opts': {'Reader': ascii.Ipac, 'include_names': ['ra', 'v2']}},\n {'cols': ('a', 'b', 'c'),\n 'name': 'data/no_data_with_header.dat',\n 'nrows': 0,\n 'opts': {}},\n {'cols': ('agasc_id', 'n_noids', 'n_obs'),\n 'name': 'data/short.rdb',\n 'nrows': 7,\n 'opts': {'Reader': ascii.Rdb}},\n {'cols': ('agasc_id', 'n_noids', 'n_obs'),\n 'name': 'data/short.tab',\n 'nrows': 7,\n 'opts': {'Reader': ascii.Tab}},\n {'cols': ('test 1a', 'test2', 'test3', 'test4'),\n 'name': 'data/simple.txt',\n 'nrows': 2,\n 'opts': {'quotechar': \"'\"}},\n {'cols': ('top1', 'top2', 'top3', 'top4'),\n 'name': 'data/simple.txt',\n 'nrows': 1,\n 'opts': {'quotechar': \"'\", 'header_start': 1, 'data_start': 2}},\n {'cols': ('top1', 'top2', 'top3', 'top4'),\n 'name': 'data/simple.txt',\n 'nrows': 1,\n 'opts': {'quotechar': \"'\", 'header_start': 1}},\n {'cols': ('top1', 'top2', 'top3', 'top4'),\n 'name': 'data/simple.txt',\n 'nrows': 2,\n 'opts': {'quotechar': \"'\", 'header_start': 1, 'data_start': 1}},\n {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'),\n 'name': 'data/simple2.txt',\n 'nrows': 3,\n 'opts': {'delimiter': '|'}},\n {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'),\n 'name': 'data/simple3.txt',\n 'nrows': 2,\n 'opts': {'delimiter': '|'}},\n {'cols': ('col1', 'col2', 'col3', 'col4', 'col5', 'col6'),\n 'name': 'data/simple4.txt',\n 'nrows': 3,\n 'opts': {'Reader': ascii.NoHeader, 'delimiter': '|'}},\n {'cols': ('col1', 'col2', 'col3'),\n 'name': 'data/space_delim_no_header.dat',\n 'nrows': 2,\n 'opts': {'Reader': ascii.NoHeader}},\n {'cols': ('col1', 'col2', 'col3'),\n 'name': 'data/space_delim_no_header.dat',\n 'nrows': 2,\n 'opts': {'Reader': ascii.NoHeader, 'header_start': None}},\n {'cols': ('obsid', 'offset', 'x', 'y', 'name', 'oaa'),\n 'name': 'data/space_delim_blank_lines.txt',\n 'nrows': 3,\n 'opts': {}},\n {'cols': ('zabs1.nh', 'p1.gamma', 'p1.ampl', 'statname', 'statval'),\n 'name': 'data/test4.dat',\n 'nrows': 9,\n 'opts': {}},\n {'cols': ('a', 'b', 'c'),\n 'name': 'data/fill_values.txt',\n 'nrows': 2,\n 'opts': {'delimiter': ','}},\n {'name': 'data/whitespace.dat',\n 'cols': ('quoted colname with tab\\tinside', 'col2', 'col3'),\n 'nrows': 2,\n 'opts': {'delimiter': r'\\s'}},\n {'name': 'data/simple_csv.csv',\n 'cols': ('a', 'b', 'c'),\n 'nrows': 2,\n 'opts': {'Reader': ascii.Csv}},\n {'name': 'data/simple_csv_missing.csv',\n 'cols': ('a', 'b', 'c'),\n 'nrows': 2,\n 'skip': True,\n 'opts': {'Reader': ascii.Csv}},\n {'cols': ('cola', 'colb', 'colc'),\n 'name': 'data/latex1.tex',\n 'nrows': 2,\n 'opts': {'Reader': ascii.Latex}},\n {'cols': ('Facility', 'Id', 'exposure', 'date'),\n 'name': 'data/latex2.tex',\n 'nrows': 3,\n 'opts': {'Reader': ascii.AASTex}},\n {'cols': ('cola', 'colb', 'colc'),\n 'name': 'data/latex3.tex',\n 'nrows': 2,\n 'opts': {'Reader': ascii.Latex}},\n {'cols': ('Col1', 'Col2', 'Col3', 'Col4'),\n 'name': 'data/fixed_width_2_line.txt',\n 'nrows': 2,\n 'opts': {'Reader': ascii.FixedWidthTwoLine}},\n ]\n\n try:\n import bs4 # noqa\n testfiles.append({'cols': ('Column 1', 'Column 2', 'Column 3'),\n 'name': 'data/html.html',\n 'nrows': 3,\n 'opts': {'Reader': ascii.HTML}})\n except ImportError:\n pass\n\n if name is not None:\n return [x for x in testfiles if x['name'] == name][0]\n else:\n return testfiles\n\n\ndef test_header_start_exception():\n '''Check certain Readers throw an exception if ``header_start`` is set\n\n For certain Readers it does not make sense to set the ``header_start``, they\n throw an exception if you try.\n This was implemented in response to issue #885.\n '''\n for readerclass in [ascii.NoHeader, ascii.SExtractor, ascii.Ipac,\n ascii.BaseReader, ascii.FixedWidthNoHeader,\n ascii.Cds, ascii.Daophot]:\n with pytest.raises(ValueError):\n ascii.core._get_reader(readerclass, header_start=5)\n\n\ndef test_csv_table_read():\n \"\"\"\n Check for a regression introduced by #1935. Pseudo-CSV file with\n commented header line.\n \"\"\"\n lines = ['# a, b',\n '1, 2',\n '3, 4']\n t = ascii.read(lines)\n assert t.colnames == ['a', 'b']\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_overlapping_names(fast_reader):\n \"\"\"\n Check that the names argument list can overlap with the existing column names.\n This tests the issue in #1991.\n \"\"\"\n t = ascii.read(['a b', '1 2'], names=['b', 'a'], fast_reader=fast_reader)\n assert t.colnames == ['b', 'a']\n\n\ndef test_sextractor_units():\n \"\"\"\n Make sure that the SExtractor reader correctly inputs descriptions and units.\n \"\"\"\n table = ascii.read('data/sextractor2.dat', Reader=ascii.SExtractor, guess=False)\n expected_units = [None, Unit('pix'), Unit('pix'), Unit('mag'),\n Unit('mag'), None, Unit('pix**2'), Unit('m**(-6)'),\n Unit('mag * arcsec**(-2)')]\n expected_descrs = ['Running object number',\n 'Windowed position estimate along x',\n 'Windowed position estimate along y',\n 'Kron-like elliptical aperture magnitude',\n 'RMS error for AUTO magnitude',\n 'Extraction flags',\n None,\n 'Barycenter position along MAMA x axis',\n 'Peak surface brightness above background']\n for i, colname in enumerate(table.colnames):\n assert table[colname].unit == expected_units[i]\n assert table[colname].description == expected_descrs[i]\n\n\ndef test_sextractor_last_column_array():\n \"\"\"\n Make sure that the SExtractor reader handles the last column correctly when it is array-like.\n \"\"\"\n table = ascii.read('data/sextractor3.dat', Reader=ascii.SExtractor, guess=False)\n expected_columns = ['X_IMAGE', 'Y_IMAGE', 'ALPHA_J2000', 'DELTA_J2000',\n 'MAG_AUTO', 'MAGERR_AUTO',\n 'MAG_APER', 'MAG_APER_1', 'MAG_APER_2', 'MAG_APER_3',\n 'MAG_APER_4', 'MAG_APER_5', 'MAG_APER_6',\n 'MAGERR_APER', 'MAGERR_APER_1', 'MAGERR_APER_2', 'MAGERR_APER_3',\n 'MAGERR_APER_4', 'MAGERR_APER_5', 'MAGERR_APER_6']\n expected_units = [Unit('pix'), Unit('pix'), Unit('deg'), Unit('deg'),\n Unit('mag'), Unit('mag'),\n Unit('mag'), Unit('mag'), Unit('mag'),\n Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'),\n Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'),\n Unit('mag'), Unit('mag')]\n expected_descrs = ['Object position along x', None,\n 'Right ascension of barycenter (J2000)',\n 'Declination of barycenter (J2000)',\n 'Kron-like elliptical aperture magnitude',\n 'RMS error for AUTO magnitude', ] + [\n 'Fixed aperture magnitude vector'] * 7 + [\n 'RMS error vector for fixed aperture mag.'] * 7\n for i, colname in enumerate(table.colnames):\n assert table[colname].name == expected_columns[i]\n assert table[colname].unit == expected_units[i]\n assert table[colname].description == expected_descrs[i]\n\n\ndef test_list_with_newlines():\n \"\"\"\n Check that lists of strings where some strings consist of just a newline\n (\"\\n\") are parsed correctly.\n \"\"\"\n t = ascii.read([\"abc\", \"123\\n\", \"456\\n\", \"\\n\", \"\\n\"])\n assert t.colnames == ['abc']\n assert len(t) == 2\n assert t[0][0] == 123\n assert t[1][0] == 456\n\n\ndef test_commented_csv():\n \"\"\"\n Check that Csv reader does not have ignore lines with the # comment\n character which is defined for most Basic readers.\n \"\"\"\n t = ascii.read(['#a,b', '1,2', '#3,4'], format='csv')\n assert t.colnames == ['#a', 'b']\n assert len(t) == 2\n assert t['#a'][1] == '#3'\n\n\ndef test_meta_comments():\n \"\"\"\n Make sure that line comments are included in the ``meta`` attribute\n of the output Table.\n \"\"\"\n t = ascii.read(['#comment1', '# comment2 \\t', 'a,b,c', '1,2,3'])\n assert t.colnames == ['a', 'b', 'c']\n assert t.meta['comments'] == ['comment1', 'comment2']\n\n\ndef test_guess_fail():\n \"\"\"\n Check the error message when guess fails\n \"\"\"\n with pytest.raises(ascii.InconsistentTableError) as err:\n ascii.read('asfdasdf\\n1 2 3', format='basic')\n assert \"** To figure out why the table did not read, use guess=False and\" in str(err.value)\n\n # Test the case with guessing enabled but for a format that has no free params\n with pytest.raises(ValueError) as err:\n ascii.read('asfdasdf\\n1 2 3', format='ipac')\n assert 'At least one header line beginning and ending with delimiter required' in str(err.value)\n\n # Test the case with guessing enabled but with all params specified\n with pytest.raises(ValueError) as err:\n ascii.read('asfdasdf\\n1 2 3', format='basic',\n quotechar='\"', delimiter=' ', fast_reader=False)\n assert 'Number of header columns (1) inconsistent with data columns (3)' in str(err.value)\n\n\[email protected]('not HAS_BZ2')\ndef test_guessing_file_object():\n \"\"\"\n Test guessing a file object. Fixes #3013 and similar issue noted in #3019.\n \"\"\"\n t = ascii.read(open('data/ipac.dat.bz2', 'rb'))\n assert t.colnames == ['ra', 'dec', 'sai', 'v2', 'sptype']\n\n\ndef test_pformat_roundtrip():\n \"\"\"Check that the screen output of ``print tab`` can be read. See #3025.\"\"\"\n \"\"\"Read a table with empty values and ensure that corresponding entries are masked\"\"\"\n table = '\\n'.join(['a,b,c,d',\n '1,3,1.11,1',\n '2, 2, 4.0 , ss '])\n dat = ascii.read(table)\n out = ascii.read(dat.pformat())\n assert len(dat) == len(out)\n assert dat.colnames == out.colnames\n for c in dat.colnames:\n assert np.all(dat[c] == out[c])\n\n\ndef test_ipac_abbrev():\n lines = ['| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|',\n '| r | rE | rea | real | D | do | dou | f | i | l | da| c |',\n ' 1 2 3 4 5 6 7 8 9 10 11 12 ']\n dat = ascii.read(lines, format='ipac')\n for name in dat.columns[0:8]:\n assert dat[name].dtype.kind == 'f'\n for name in dat.columns[8:10]:\n assert dat[name].dtype.kind == 'i'\n for name in dat.columns[10:12]:\n assert dat[name].dtype.kind in ('U', 'S')\n\n\ndef test_almost_but_not_quite_daophot():\n '''Regression test for #3319.\n This tables looks so close to a daophot table, that the daophot reader gets\n quite far before it fails with an AttributeError.\n\n Note that this table will actually be read as Commented Header table with\n the columns ['some', 'header', 'info'].\n '''\n lines = [\"# some header info\",\n \"#F header info beginning with 'F'\",\n \"1 2 3\",\n \"4 5 6\",\n \"7 8 9\"]\n dat = ascii.read(lines)\n assert len(dat) == 3\n\n\[email protected]('fast', [False, 'force'])\ndef test_commented_header_comments(fast):\n \"\"\"\n Test that comments in commented_header are as expected with header_start\n at different positions, and that the table round-trips.\n \"\"\"\n comments = ['comment 1', 'comment 2', 'comment 3']\n lines = ['# a b',\n '# comment 1',\n '# comment 2',\n '# comment 3',\n '1 2',\n '3 4']\n dat = ascii.read(lines, format='commented_header', fast_reader=fast)\n assert dat.meta['comments'] == comments\n assert dat.colnames == ['a', 'b']\n\n out = StringIO()\n ascii.write(dat, out, format='commented_header', fast_writer=fast)\n assert out.getvalue().splitlines() == lines\n\n lines.insert(1, lines.pop(0))\n dat = ascii.read(lines, format='commented_header', header_start=1, fast_reader=fast)\n assert dat.meta['comments'] == comments\n assert dat.colnames == ['a', 'b']\n\n lines.insert(2, lines.pop(1))\n dat = ascii.read(lines, format='commented_header', header_start=2, fast_reader=fast)\n assert dat.meta['comments'] == comments\n assert dat.colnames == ['a', 'b']\n dat = ascii.read(lines, format='commented_header', header_start=-2, fast_reader=fast)\n assert dat.meta['comments'] == comments\n assert dat.colnames == ['a', 'b']\n\n lines.insert(3, lines.pop(2))\n dat = ascii.read(lines, format='commented_header', header_start=-1, fast_reader=fast)\n assert dat.meta['comments'] == comments\n assert dat.colnames == ['a', 'b']\n\n lines = ['# a b',\n '1 2',\n '3 4']\n dat = ascii.read(lines, format='commented_header', fast_reader=fast)\n assert 'comments' not in dat.meta\n assert dat.colnames == ['a', 'b']\n\n\ndef test_probably_html():\n \"\"\"\n Test the routine for guessing if a table input to ascii.read is probably HTML\n \"\"\"\n for tabl0 in ('data/html.html',\n 'http://blah.com/table.html',\n 'https://blah.com/table.html',\n 'file://blah/table.htm',\n 'ftp://blah.com/table.html',\n 'file://blah.com/table.htm',\n ' <! doctype html > hello world',\n 'junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk',\n ['junk < table baz>', ' <tr foo >', ' <td bar> ', '</td> </tr>', '</table> junk'],\n (' <! doctype html > ', ' hello world'),\n ):\n assert _probably_html(tabl0) is True\n\n for tabl0 in ('data/html.htms',\n 'Xhttp://blah.com/table.html',\n ' https://blah.com/table.htm',\n 'fole://blah/table.htm',\n ' < doctype html > hello world',\n 'junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk',\n ['junk < table baz>', ' <t foo >', ' <td bar> ', '</td> </tr>', '</table> junk'],\n (' <! doctype htm > ', ' hello world'),\n [[1, 2, 3]],\n ):\n assert _probably_html(tabl0) is False\n\n\[email protected]('fast_reader', [True, False, 'force'])\ndef test_data_header_start(fast_reader):\n tests = [(['# comment',\n '',\n ' ',\n 'skip this line', # line 0\n 'a b', # line 1\n '1 2'], # line 2\n [{'header_start': 1},\n {'header_start': 1, 'data_start': 2}\n ]\n ),\n\n (['# comment',\n '',\n ' \\t',\n 'skip this line', # line 0\n 'a b', # line 1\n '',\n ' \\t',\n 'skip this line', # line 2\n '1 2'], # line 3\n [{'header_start': 1, 'data_start': 3}]),\n\n (['# comment',\n '',\n ' ',\n 'a b', # line 0\n '',\n ' ',\n 'skip this line', # line 1\n '1 2'], # line 2\n [{'header_start': 0, 'data_start': 2},\n {'data_start': 2}])]\n\n for lines, kwargs_list in tests:\n for kwargs in kwargs_list:\n\n t = ascii.read(lines, format='basic', fast_reader=fast_reader,\n guess=True, **kwargs)\n assert t.colnames == ['a', 'b']\n assert len(t) == 1\n assert np.all(t['a'] == [1])\n # Sanity check that the expected Reader is being used\n assert get_read_trace()[-1]['kwargs']['Reader'] is (\n ascii.Basic if (fast_reader is False) else ascii.FastBasic)\n\n\ndef test_table_with_no_newline():\n \"\"\"\n Test that an input file which is completely empty fails in the expected way.\n Test that an input file with one line but no newline succeeds.\n \"\"\"\n # With guessing\n table = BytesIO()\n with pytest.raises(ascii.InconsistentTableError):\n ascii.read(table)\n\n # Without guessing\n table = BytesIO()\n with pytest.raises(ValueError) as err:\n ascii.read(table, guess=False, fast_reader=False, format='basic')\n assert 'No header line found' in str(err.value)\n\n table = BytesIO()\n t = ascii.read(table, guess=False, fast_reader=True, format='fast_basic')\n assert not t and t.as_array().size == 0\n\n # Put a single line of column names but with no newline\n for kwargs in [dict(),\n dict(guess=False, fast_reader=False, format='basic'),\n dict(guess=False, fast_reader=True, format='fast_basic')]:\n table = BytesIO()\n table.write(b'a b')\n t = ascii.read(table, **kwargs)\n assert t.colnames == ['a', 'b']\n assert len(t) == 0\n\n\ndef test_path_object():\n fpath = pathlib.Path('data/simple.txt')\n data = ascii.read(fpath)\n\n assert len(data) == 2\n assert sorted(list(data.columns)) == ['test 1a', 'test2', 'test3', 'test4']\n assert data['test2'][1] == 'hat2'\n\n\ndef test_column_conversion_error():\n \"\"\"\n Test that context information (upstream exception message) from column\n conversion error is provided.\n \"\"\"\n ipac = \"\"\"\\\n| col0 |\n| double |\n 1 2\n\"\"\"\n with pytest.raises(ValueError) as err:\n ascii.read(ipac, guess=False, format='ipac')\n assert 'Column col0 failed to convert:' in str(err.value)\n\n with pytest.raises(ValueError) as err:\n ascii.read(['a b', '1 2'], guess=False, format='basic', converters={'a': []})\n assert 'no converters' in str(err.value)\n\n\ndef test_non_C_locale_with_fast_reader():\n \"\"\"Test code that forces \"C\" locale while calling fast reader (#4364)\"\"\"\n current = locale.setlocale(locale.LC_ALL)\n\n try:\n if platform.system() == 'Darwin':\n locale.setlocale(locale.LC_ALL, 'de_DE')\n else:\n locale.setlocale(locale.LC_ALL, 'de_DE.utf8')\n\n for fast_reader in (True,\n False,\n {'use_fast_converter': False},\n {'use_fast_converter': True}):\n t = ascii.read(['a b', '1.5 2'], format='basic', guess=False,\n fast_reader=fast_reader)\n assert t['a'].dtype.kind == 'f'\n except locale.Error as e:\n pytest.skip(f'Locale error: {e}')\n finally:\n locale.setlocale(locale.LC_ALL, current)\n\n\ndef test_no_units_for_char_columns():\n '''Test that a char column of a Table is assigned no unit and not\n a dimensionless unit.'''\n t1 = Table([[\"A\"]], names=\"B\")\n out = StringIO()\n ascii.write(t1, out, format=\"ipac\")\n t2 = ascii.read(out.getvalue(), format=\"ipac\", guess=False)\n assert t2[\"B\"].unit is None\n\n\ndef test_initial_column_fill_values():\n \"\"\"Regression test for #5336, #5338.\"\"\"\n\n class TestHeader(ascii.BasicHeader):\n def _set_cols_from_names(self):\n self.cols = [ascii.Column(name=x) for x in self.names]\n # Set some initial fill values\n for col in self.cols:\n col.fill_values = {'--': '0'}\n\n class Tester(ascii.Basic):\n header_class = TestHeader\n\n reader = ascii.get_reader(Reader=Tester)\n\n assert reader.read(\"\"\"# Column definition is the first uncommented line\n# Default delimiter is the space character.\na b c\n# Data starts after the header column definition, blank lines ignored\n-- 2 3\n4 5 6 \"\"\")['a'][0] is np.ma.masked\n\n\ndef test_latex_no_trailing_backslash():\n \"\"\"\n Test that latex/aastex file with no trailing backslash can be read.\n \"\"\"\n lines = r\"\"\"\n\\begin{table}\n\\begin{tabular}{ccc}\na & b & c \\\\\n1 & 1.0 & c \\\\ % comment\n3\\% & 3.0 & e % comment\n\\end{tabular}\n\\end{table}\n\"\"\"\n dat = ascii.read(lines, format='latex')\n assert dat.colnames == ['a', 'b', 'c']\n assert np.all(dat['a'] == ['1', r'3\\%'])\n assert np.all(dat['c'] == ['c', 'e'])\n\n\ndef text_aastex_no_trailing_backslash():\n lines = r\"\"\"\n\\begin{deluxetable}{ccc}\n\\tablehead{\\colhead{a} & \\colhead{b} & \\colhead{c}}\n\\startdata\n1 & 1.0 & c \\\\\n2 & 2.0 & d \\\\ % comment\n3\\% & 3.0 & e % comment\n\\enddata\n\\end{deluxetable}\n\"\"\"\n dat = ascii.read(lines, format='aastex')\n assert dat.colnames == ['a', 'b', 'c']\n assert np.all(dat['a'] == ['1', r'3\\%'])\n assert np.all(dat['c'] == ['c', 'e'])\n\n\[email protected]('encoding', ['utf8', 'latin1', 'cp1252'])\ndef test_read_with_encoding(tmpdir, encoding):\n data = {\n 'commented_header': '# à b è \\n 1 2 héllo',\n 'csv': 'à,b,è\\n1,2,héllo'\n }\n\n testfile = str(tmpdir.join('test.txt'))\n for fmt, content in data.items():\n with open(testfile, 'w', encoding=encoding) as f:\n f.write(content)\n\n table = ascii.read(testfile, encoding=encoding)\n assert table.pformat() == [' à b è ',\n '--- --- -----',\n ' 1 2 héllo']\n\n for guess in (True, False):\n table = ascii.read(testfile, format=fmt, fast_reader=False,\n encoding=encoding, guess=guess)\n assert table['è'].dtype.kind == 'U'\n assert table.pformat() == [' à b è ',\n '--- --- -----',\n ' 1 2 héllo']\n\n\ndef test_unsupported_read_with_encoding(tmpdir):\n # Fast reader is not supported, make sure it raises an exception\n with pytest.raises(ascii.ParameterError):\n ascii.read('data/simple3.txt', guess=False, fast_reader='force',\n encoding='latin1', format='fast_csv')\n\n\ndef test_read_chunks_input_types():\n \"\"\"\n Test chunked reading for different input types: file path, file object,\n and string input.\n \"\"\"\n fpath = 'data/test5.dat'\n t1 = ascii.read(fpath, header_start=1, data_start=3, )\n\n for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()):\n t_gen = ascii.read(fp, header_start=1, data_start=3,\n guess=False, format='fast_basic',\n fast_reader={'chunk_size': 400, 'chunk_generator': True})\n ts = list(t_gen)\n for t in ts:\n for col, col1 in zip(t.columns.values(), t1.columns.values()):\n assert col.name == col1.name\n assert col.dtype.kind == col1.dtype.kind\n\n assert len(ts) == 4\n t2 = table.vstack(ts)\n assert np.all(t1 == t2)\n\n for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()):\n # Now read the full table in chunks\n t3 = ascii.read(fp, header_start=1, data_start=3,\n fast_reader={'chunk_size': 300})\n assert np.all(t1 == t3)\n\n\[email protected]('masked', [True, False])\ndef test_read_chunks_formats(masked):\n \"\"\"\n Test different supported formats for chunked reading.\n \"\"\"\n t1 = simple_table(size=102, cols=10, kinds='fS', masked=masked)\n for i, name in enumerate(t1.colnames):\n t1.rename_column(name, 'col{}'.format(i + 1))\n\n # TO DO commented_header does not currently work due to the special-cased\n # implementation of header parsing.\n\n for format in 'tab', 'csv', 'no_header', 'rdb', 'basic':\n out = StringIO()\n ascii.write(t1, out, format=format)\n t_gen = ascii.read(out.getvalue(), format=format,\n fast_reader={'chunk_size': 400, 'chunk_generator': True})\n ts = list(t_gen)\n for t in ts:\n for col, col1 in zip(t.columns.values(), t1.columns.values()):\n assert col.name == col1.name\n assert col.dtype.kind == col1.dtype.kind\n\n assert len(ts) > 4\n t2 = table.vstack(ts)\n assert np.all(t1 == t2)\n\n # Now read the full table in chunks\n t3 = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400})\n assert np.all(t1 == t3)\n\n\ndef test_read_chunks_chunk_size_too_small():\n fpath = 'data/test5.dat'\n with pytest.raises(ValueError) as err:\n ascii.read(fpath, header_start=1, data_start=3,\n fast_reader={'chunk_size': 10})\n assert 'no newline found in chunk (chunk_size too small?)' in str(err.value)\n\n\ndef test_read_chunks_table_changes():\n \"\"\"Column changes type or size between chunks. This also tests the case with\n no final newline.\n \"\"\"\n col = ['a b c'] + ['1.12334 xyz a'] * 50 + ['abcdefg 555 abc'] * 50\n table = '\\n'.join(col)\n t1 = ascii.read(table, guess=False)\n t2 = ascii.read(table, fast_reader={'chunk_size': 100})\n\n # This also confirms that the dtypes are exactly the same, i.e.\n # the string itemsizes are the same.\n assert np.all(t1 == t2)\n\n\ndef test_read_non_ascii():\n \"\"\"Test that pure-Python reader is used in case the file contains non-ASCII characters\n in it.\n \"\"\"\n table = Table.read(['col1, col2', '\\u2119, \\u01b4', '1, 2'], format='csv')\n assert np.all(table['col1'] == ['\\u2119', '1'])\n assert np.all(table['col2'] == ['\\u01b4', '2'])\n\n\[email protected]('enable', [True, False, 'force'])\ndef test_kwargs_dict_guess(enable):\n \"\"\"Test that fast_reader dictionary is preserved through guessing sequence.\n \"\"\"\n # Fails for enable=(True, 'force') - #5578\n ascii.read('a\\tb\\n 1\\t2\\n3\\t 4.0', fast_reader=dict(enable=enable))\n assert get_read_trace()[-1]['kwargs']['Reader'] is (\n ascii.Tab if (enable is False) else ascii.FastTab)\n for k in get_read_trace():\n if not k.get('status', 'Disabled').startswith('Disabled'):\n assert k.get('kwargs').get('fast_reader').get('enable') is enable\n" ]
[ [ "numpy.all", "numpy.array" ] ]
Richard-Tarbell/polsalt
[ "e953985ffbc786fd071d0b48ebca5bd1dac9a960" ]
[ "scripts/correct_files.py" ]
[ "import os\nimport sys\nimport copy\nimport numpy as np\nfrom astropy.io import fits\n\npolsaltdir = '/'.join(os.path.realpath(__file__).split('/')[:-2])\ndatadir = polsaltdir+'/polsalt/data/'\nsys.path.extend((polsaltdir+'/polsalt/',))\n\nfrom specpolwollaston import correct_wollaston, read_wollaston\n\ndef correct_files(hdu,tilt=0):\n \"\"\"For a given input file, apply corrections for wavelength, \n distortion, and bad pixels\n\n Parameters\n ----------\n input_file: astropy.io.fits.HDUList\n\n tilt: (float)\n change in row from col = 0 to cols\n \"\"\"\n \n cbin, rbin = [int(x) for x in hdu[0].header['CCDSUM'].split(\" \")]\n beams, rows, cols = hdu[1].data.shape\n \n #temporary cludge\n thdu = fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU(hdu[1].data[0])])\n thdu[0].header = hdu[0].header\n thdu[1].name = 'SCI'\n rpix_oc = read_wollaston(thdu, wollaston_file=datadir+\"wollaston.txt\")\n drow_oc = (rpix_oc-rpix_oc[:,cols/2][:,None])/rbin\n drow_oc += -tilt*(np.arange(cols) - cols/2)/cols\n \n for i in range(1, len(hdu)):\n for o in range(beams):\n\n if hdu[i].name == 'BPM' :\n tdata = hdu[i].data[o].astype('float') \n else: \n tdata = hdu[i].data[o]\n tdata = correct_wollaston(tdata, -drow_oc[o])\n if hdu[i].name == 'BPM' : \n hdu[i].data[o] = (tdata > 0.1).astype('uint')\n else: \n hdu[i].data[o] = tdata \n \n return hdu\n\nif __name__=='__main__':\n\n import glob\n if '*' in sys.argv[-1]:\n images = glob.glob(sys.argv[-1])\n else:\n images = sys.argv[1:]\n for img in images:\n hdu=correct_files(fits.open(img))\n hdu.writeto('c' + img, clobber=True)\n \n" ]
[ [ "numpy.arange" ] ]
SammyEK/kensu-py
[ "8a210c8fe53ec28ef759aa1b43faa5f4402bea3f" ]
[ "kensu/utils/kensu.py" ]
[ "import datetime\nimport getpass\nimport json\nimport logging\nimport os\nimport time\n\nfrom kensu.client import *\nfrom kensu.utils.dsl.extractors.external_lineage_dtos import KensuDatasourceAndSchema\nfrom kensu.utils.dsl import mapping_strategies\nfrom kensu.utils.dsl.extractors import Extractors\nfrom kensu.utils.dsl.lineage_builder import LineageBuilder\nfrom kensu.utils.helpers import to_hash_key\nfrom kensu.utils.injection import Injection\nfrom kensu.pandas import DataFrame,Series\n\n\nclass Kensu(object):\n UNKNOWN_PHYSICAL_LOCATION = PhysicalLocation(name=\"Unknown\", lat=0.12341234, lon=0.12341234,\n pk=PhysicalLocationPK(city=\"Unknown\", country=\"Unknown\"))\n\n @staticmethod\n def discover_user_name():\n return getpass.getuser()\n\n @staticmethod\n def get_git_repo():\n cur_dir = os.path.realpath(os.path.curdir)\n try:\n import git\n try:\n git_repo = git.Repo(cur_dir, search_parent_directories=True)\n return git_repo\n except git.GitError as e:\n logging.warn(\"kensu-py was unable to identify a git repo. The working dir is not a git repo?\")\n pass\n except ImportError as e:\n logging.warn(\"Install GitPython for a maximum context about the GIT code repo if any\")\n pass\n\n @staticmethod\n def discover_code_location():\n cur_dir = os.path.realpath(os.path.curdir)\n code_location = cur_dir\n git_repo = Kensu.get_git_repo()\n if git_repo is not None:\n remote = git_repo.remote()\n code_location = next(remote.urls, code_location)\n return code_location\n\n @staticmethod\n def discover_code_version():\n\n code_version = datetime.datetime.now().isoformat()\n\n git_repo = Kensu.get_git_repo()\n if git_repo is not None:\n code_version = git_repo.head.commit.hexsha\n if git_repo.is_dirty():\n code_version = code_version + \" (dirty)\"\n return code_version\n\n def get_conf_path(self, default = \"conf.ini\"):\n return os.environ[\"CONF_FILE\"] if \"CONF_FILE\" in os.environ else default\n\n def __init__(self, api_url=None, auth_token=None, process_name=None,\n user_name=None, code_location=None, init_context=True, \n do_report=None, report_to_file=None, offline_file_name=None, reporter=None, **kwargs):\n \"\"\"\n \"\"\"\n from configparser import ConfigParser, ExtendedInterpolation\n\n config = ConfigParser(interpolation=ExtendedInterpolation())\n # TODO... path to conf there are so many args in the function here, so adding it will require a good migration plan (it doesn't land in kwargs...)\n config.read(self.get_conf_path(\"conf.ini\"))\n\n kensu_conf = config['kensu'] if config.has_section('kensu') else config['DEFAULT']\n self.conf = kensu_conf\n\n kensu_host = self.get_kensu_host(api_url)\n if kensu_host is None:\n kensu_host = kensu_conf.get(\"api_url\")\n if auth_token is None:\n kensu_auth_token = kensu_conf.get(\"api_token\")\n else:\n kensu_auth_token = auth_token\n\n def kwargs_or_conf_or_default(key, default, kw=kwargs, conf=kensu_conf):\n if key in kw and kw[key] is not None:\n return kw[key]\n elif key in conf and conf.get(key) is not None:\n r = conf.get(key)\n if isinstance(default, list):\n r = r.replace(\" \",\"\").split(\",\")\n elif isinstance(default, bool):\n r = conf.getboolean(key)\n return r\n else:\n return default\n self.extractors = Extractors()\n pandas_support = kwargs_or_conf_or_default(\"pandas_support\", True)\n sklearn_support = kwargs_or_conf_or_default(\"sklearn_support\", True)\n bigquery_support = kwargs_or_conf_or_default(\"bigquery_support\", False)\n tensorflow_support = kwargs_or_conf_or_default(\"tensorflow_support\", False)\n self.extractors.add_default_supports(pandas_support=pandas_support, sklearn_support=sklearn_support,bigquery_support=bigquery_support,tensorflow_support=tensorflow_support)\n\n project_names = kwargs_or_conf_or_default(\"project_names\", [])\n environment = kwargs_or_conf_or_default(\"environment\", None)\n timestamp = kwargs_or_conf_or_default(\"timestamp\", None)\n logical_naming = kwargs_or_conf_or_default(\"logical_naming\", None)\n mapping = kwargs_or_conf_or_default(\"mapping\", None)\n report_in_mem = kwargs_or_conf_or_default(\"report_in_mem\", False)\n\n if \"get_code_version\" in kwargs and kwargs[\"get_code_version\"] is not None:\n get_code_version = kwargs[\"get_code_version\"]\n else:\n get_code_version = Kensu.discover_code_version\n\n def default_if_arg_none(arg, default):\n if arg is None:\n return default\n else:\n return arg\n process_name = default_if_arg_none(process_name, kensu_conf.get(\"process_name\"))\n user_name = default_if_arg_none(user_name, kensu_conf.get(\"user_name\"))\n code_location = default_if_arg_none(code_location, kensu_conf.get(\"code_location\"))\n\n do_report = default_if_arg_none(do_report, kensu_conf.getboolean(\"do_report\", True))\n report_to_file = default_if_arg_none(report_to_file, kensu_conf.getboolean(\"report_to_file\", False))\n offline_file_name = default_if_arg_none(offline_file_name, kensu_conf.get(\"offline_file_name\", None))\n\n self.kensu_api = KensuEntitiesApi()\n self.kensu_api.api_client.host = kensu_host\n self.kensu_api.api_client.default_headers[\"X-Auth-Token\"] = kensu_auth_token\n\n # add function to Kensu entities\n injection = Injection()\n injection.set_reporter(reporter)\n injection.set_do_report(do_report, offline_file_name=offline_file_name, report_to_file=report_to_file)\n injection.set_kensu_api(self.kensu_api)\n\n self.logical_naming = logical_naming\n self.mapping = mapping\n self.report_in_mem = report_in_mem\n\n self.set_default_physical_location(Kensu.UNKNOWN_PHYSICAL_LOCATION)\n # can be updated using set_default_physical_location\n self.init_context(process_name=process_name, user_name=user_name, code_location=code_location,\n get_code_version=get_code_version, project_names=project_names, environment=environment, timestamp=timestamp)\n\n # sets the api url using host if passed, otherwise gets KENSU_API_URL\n def get_kensu_host(self, host=None):\n if host is None:\n if \"KENSU_API_URL\" in os.environ:\n kensu_host = os.environ[\"KENSU_API_URL\"]\n else:\n kensu_host = None\n else:\n kensu_host = host\n\n return kensu_host\n\n def register_schema_name(self, ds, schema):\n name = ds.name\n if \"in-mem\" in name and ds.format is not None:\n name = name + \" of format=\" + str(ds.format or '?')\n self.schema_name_by_guid[schema.to_guid()] = name\n return schema\n\n def to_schema_name(self, s_guid):\n return self.schema_name_by_guid.get(s_guid) or s_guid\n\n def to_schema_names(self, s_guids):\n return [self.to_schema_name(s_guid) for s_guid in s_guids]\n\n\n def init_context(self, process_name=None, user_name=None, code_location=None, get_code_version=None, project_names=None,environment=None,timestamp=None):\n # list of triples i, o, mapping strategy\n # i and o are either one or a list of triples (object, DS, SC)\n self.dependencies = []\n self.dependencies_mapping = []\n self.dependencies_per_columns = {}\n self.real_schema_df = {}\n self.schema_name_by_guid = {}\n self.sent_runs = []\n self.data_collectors = {}\n self.model={}\n self.set_timestamp(timestamp)\n self.inputs_ds = []\n self.write_reinit = False\n\n if user_name is None:\n user_name = Kensu.discover_user_name()\n if code_location is None:\n code_location = Kensu.discover_code_location()\n self.user = User(pk=UserPK(user_name))._report()\n self.code_base = CodeBase(pk=CodeBasePK(code_location))._report()\n if get_code_version is None:\n if timestamp is not None: # this is weird though...\n version = datetime.datetime.fromtimestamp(timestamp/1000).isoformat()\n else:\n version = Kensu.discover_code_version()\n else:\n version = get_code_version()\n self.code_version = CodeVersion(maintainers_refs=[self.user.to_ref()],\n pk=CodeVersionPK(version=version,\n codebase_ref=self.code_base.to_ref()))._report()\n if process_name is None:\n if \"__file__\" in globals():\n process_name = os.path.basename(os.path.realpath(__file__))\n else:\n raise Exception(\"Can't determine `process_name`, maybe is this running from a Notebook?\")\n self.process = Process(pk=ProcessPK(qualified_name=process_name))._report()\n if project_names is None:\n self.project_refs = []\n else:\n self.project_refs = [Project(pk=ProjectPK(name=n))._report().to_ref() for n in project_names]\n process_run_name = process_name + \"@\" + datetime.datetime.now().isoformat()\n self.process_run = ProcessRun(\n pk=ProcessRunPK(process_ref=self.process.to_ref(), qualified_name=process_run_name)\n , launched_by_user_ref=self.user.to_ref()\n , executed_code_version_ref=self.code_version.to_ref()\n , projects_refs=self.project_refs\n , environment = environment\n )._report()\n\n def set_reinit(self, bool = True):\n self.write_reinit = bool\n\n def add_input_ref(self, entities):\n if self.write_reinit == True:\n self.inputs_ds = []\n self.write_reinit = False\n self.inputs_ds.append(entities)\n\n def set_timestamp(self, timestamp):\n if timestamp is not None:\n self.kensu_api.api_client.default_headers[\"X-Entity-Creation-Time\"] = timestamp\n else:\n timestamp = datetime.datetime.now().timestamp()*1000\n self.timestamp = timestamp\n\n def set_default_physical_location(self, pl):\n self.default_physical_location = pl\n pl._report()\n self.default_physical_location_ref = pl.to_ref()\n\n def get_dependencies_mapping(self):\n return self.dependencies_mapping\n\n def add_dependencies_mapping(self, guid, col, from_guid, from_col, type):\n dep = {'GUID': guid,\n 'COLUMNS': col,\n 'FROM_ID': from_guid,\n 'FROM_COLUMNS': from_col,\n 'TYPE': type}\n self.dependencies_mapping.append(dep)\n\n def in_mem(self, var_name):\n return \"in-memory-data://\" + self.process.pk.qualified_name + \"/\" + var_name\n\n def report_with_mapping(self):\n self.set_reinit()\n import pandas as pd\n deps = self.dependencies_mapping\n ddf = pd.DataFrame(deps)\n df = ddf.set_index(['GUID', 'COLUMNS', 'FROM_ID']).groupby(['GUID', 'COLUMNS', 'FROM_ID']).agg(list)\n df = df[~df.index.duplicated(keep='first')].reset_index()\n\n unique_ids = list(df['GUID'].unique())\n\n if self.report_in_mem:\n for element in unique_ids:\n dataflow = []\n dependencies = df[df['GUID'] == element]\n data = {}\n for row in dependencies.iterrows():\n info = row[1]\n from_columns = [str(x) for x in info['FROM_COLUMNS']]\n data[str(info['COLUMNS'])] = from_columns\n schema_dep = SchemaLineageDependencyDef(from_schema_ref=SchemaRef(by_guid=info['FROM_ID']),\n to_schema_ref=SchemaRef(by_guid=info['GUID']),\n column_data_dependencies=data)\n dataflow.append(schema_dep)\n\n lineage = ProcessLineage(name=self.get_lineage_name(dataflow),\n operation_logic='APPEND',\n pk=ProcessLineagePK(process_ref=ProcessRef(by_guid=self.process.to_guid()),\n data_flow=dataflow))._report()\n\n if lineage.to_guid() not in self.sent_runs:\n lineage_run = LineageRun(pk=LineageRunPK(lineage_ref=ProcessLineageRef(by_guid=lineage.to_guid()),\n process_run_ref=ProcessRunRef(\n by_guid=self.process_run.to_guid()),\n timestamp=round(self.timestamp)))._report()\n self.sent_runs.append(lineage.to_guid())\n else:\n dependencies_per_columns = {}\n for element in unique_ids:\n if element in self.real_schema_df:\n sub_df = df[df['GUID'] == element]\n for row in sub_df.iterrows():\n info = row[1]\n destination_guid = info['GUID']\n guid = info['GUID']\n origin_column = info['COLUMNS']\n column = info['COLUMNS']\n all_deps = df\n self.create_dependencies(destination_guid, guid, origin_column, column, all_deps,\n dependencies_per_columns)\n\n dataflows = {}\n for destination_guid in dependencies_per_columns:\n if dependencies_per_columns[destination_guid] != {}:\n dataflows[destination_guid] = {}\n for column in dependencies_per_columns[destination_guid]:\n for origin_guid in dependencies_per_columns[destination_guid][column]:\n if origin_guid not in dataflows[destination_guid] and origin_guid!=destination_guid:\n dataflows[destination_guid][origin_guid] = {}\n dataflows[destination_guid][origin_guid][column]=list(set(dependencies_per_columns[destination_guid][column][origin_guid]))\n elif origin_guid!=destination_guid:\n dataflows[destination_guid][origin_guid][column] = \\\n list(set(dependencies_per_columns[destination_guid][column][origin_guid]))\n\n for to_guid in dataflows:\n schemas_pk = set()\n from_pks = set()\n dataflow = []\n is_ml_model = False\n for from_guid in dataflows[to_guid]:\n\n schema_dep = SchemaLineageDependencyDef(from_schema_ref=SchemaRef(by_guid=from_guid),\n to_schema_ref=SchemaRef(by_guid=to_guid),\n column_data_dependencies=dataflows[to_guid][from_guid])\n if to_guid in self.model:\n is_ml_model = True\n dataflow.append(schema_dep)\n schemas_pk.add(from_guid)\n from_pks.add(from_guid)\n schemas_pk.add(to_guid)\n\n lineage = ProcessLineage(name=self.get_lineage_name(dataflow),\n operation_logic='APPEND',\n pk=ProcessLineagePK(\n process_ref=ProcessRef(by_guid=self.process.to_guid()),\n data_flow=dataflow))._report()\n\n\n\n if lineage.to_guid() not in self.sent_runs:\n lineage_run = LineageRun(\n pk=LineageRunPK(lineage_ref=ProcessLineageRef(by_guid=lineage.to_guid()),\n process_run_ref=ProcessRunRef(by_guid=self.process_run.to_guid()),\n timestamp=round(self.timestamp)))._report()\n self.sent_runs.append(lineage.to_guid())\n\n for schema in schemas_pk:\n stats_df = self.real_schema_df[schema]\n try:\n stats = self.extractors.extract_stats(stats_df)\n except:\n from kensu.requests.models import ksu_str\n # FIXME weird... should be fine to delete (and try,except too)\n if isinstance(stats_df, pd.DataFrame) or isinstance(stats_df, DataFrame) or isinstance(stats_df,Series) or isinstance(stats_df,pd.Series) :\n stats = self.extractors.extract_stats(stats_df)\n elif isinstance(stats_df, ksu_str):\n stats = None\n elif isinstance(stats_df, dict):\n stats = stats_df\n else:\n #TODO Support ndarray\n stats = None\n if stats is not None:\n DataStats(pk=DataStatsPK(schema_ref=SchemaRef(by_guid=schema),\n lineage_run_ref=LineageRunRef(by_guid=lineage_run.to_guid())),\n stats=stats,\n extra_as_json=None)._report()\n elif isinstance(stats_df, KensuDatasourceAndSchema):\n stats_df.f_publish_stats(lineage_run.to_guid())\n #FIXME should be using extractors instead\n if is_ml_model:\n model_name = self.model[to_guid][1]\n metrics = self.model[to_guid][2]\n import json\n hyperparams = json.dumps(self.model[to_guid][3])\n\n model = Model(ModelPK(name=model_name))._report()\n train = ModelTraining(pk=ModelTrainingPK(model_ref=ModelRef(by_guid=model.to_guid()),\n process_lineage_ref=ProcessLineageRef(\n by_guid=lineage.to_guid())))._report()\n r=ModelMetrics(pk=ModelMetricsPK(model_training_ref=ModelTrainingRef(by_guid=train.to_guid()),\n lineage_run_ref=LineageRunRef(by_guid=lineage_run.to_guid()),\n stored_in_schema_ref=SchemaRef(by_guid=to_guid)),\n metrics=metrics, hyper_params_as_json=hyperparams)._report()\n\n def create_dependencies(self,destination_guid, guid, origin_column, column, all_deps,\n dependencies_per_columns_rt):\n visited = list()\n visited.append((guid,column))\n self.dependencies_per_columns = dependencies_per_columns_rt\n filtered_dependencies = all_deps[all_deps['GUID'] == guid]\n\n filtered_dependencies = filtered_dependencies[filtered_dependencies['COLUMNS'] == str(column)]\n if destination_guid in self.dependencies_per_columns:\n for row in filtered_dependencies.iterrows():\n row = row[1]\n\n if row['FROM_ID'] in self.real_schema_df:\n if origin_column in self.dependencies_per_columns[destination_guid]:\n if row['FROM_ID'] in self.dependencies_per_columns[destination_guid][origin_column]:\n self.dependencies_per_columns[destination_guid][origin_column][row['FROM_ID']] = \\\n self.dependencies_per_columns[destination_guid][origin_column][row['FROM_ID']] + \\\n row['FROM_COLUMNS']\n else:\n self.dependencies_per_columns[destination_guid][origin_column][row['FROM_ID']] = row[\n 'FROM_COLUMNS']\n else:\n self.dependencies_per_columns[destination_guid][origin_column] = {}\n self.dependencies_per_columns[destination_guid][origin_column][row['FROM_ID']] = row[\n 'FROM_COLUMNS']\n # dependencies_per_columns[guid][row['FROM_ID']] = row['FROM_COLUMNS']\n else:\n guid = row['FROM_ID']\n columns = row['FROM_COLUMNS']\n for column in columns:\n if (guid,column) not in visited:\n self.create_dependencies(destination_guid, guid, origin_column, column, all_deps,\n self.dependencies_per_columns)\n else:\n self.dependencies_per_columns[destination_guid] = {}\n self.create_dependencies(destination_guid, guid, origin_column, column, all_deps,\n self.dependencies_per_columns)\n\n def get_dependencies(self):\n return self.dependencies\n def add_dependency(self, i, o, mapping_strategy=mapping_strategies.FULL):\n if not isinstance(i, tuple):\n (ids, isc) = self.extractors.extract_data_source_and_schema(i, self.default_physical_location_ref)\n i = (i, ids, isc)\n\n if not isinstance(o, tuple):\n (ods, osc) = self.extractors.extract_data_source_and_schema(o, self.default_physical_location_ref)\n o = (o, ods, osc)\n\n self.dependencies.append((i, o, mapping_strategy))\n def add_dependencies(self, ins, outs, mapping_strategy=mapping_strategies.FULL):\n new_ins = []\n for i in ins:\n if not isinstance(i, tuple):\n (ids, isc) = self.extractors.extract_data_source_and_schema(i, self.default_physical_location_ref)\n i = (i, ids, isc)\n new_ins.append(i)\n\n new_outs = []\n for o in outs:\n if not isinstance(o, tuple):\n (ods, osc) = self.extractors.extract_data_source_and_schema(o, self.default_physical_location_ref)\n o = (o, ods, osc)\n new_outs.append(o)\n\n self.dependencies.append((new_ins, new_outs, mapping_strategy))\n\n def get_lineage_name(self,\n data_flow # type: list[SchemaLineageDependencyDef]\n ):\n inputs = \",\".join(sorted(self.to_schema_names([d.from_schema_ref.by_guid for d in data_flow])))\n outputs = \",\".join(sorted(self.to_schema_names([d.to_schema_ref.by_guid for d in data_flow])))\n return \"Lineage to {} from {}\".format(outputs, inputs)\n\n\n @property\n def s(self):\n return self.start_lineage(True)\n\n\n def start_lineage(self, report_stats=True):\n lineage_builder = LineageBuilder(self, report_stats)\n return lineage_builder\n\n\n def new_lineage(self, process_lineage_dependencies, report_stats=True, **kwargs):\n # if the new_lineage has a model training in it (output),\n # then kwargs will be pass to the function to compute metrics\n # ex: kwargs[\"y_test\"] can refer to the test set to compute CV metrics\n data_flow = [d.toSchemaLineageDependencyDef() for d in process_lineage_dependencies]\n lineage = ProcessLineage(name=self.get_lineage_name(data_flow),\n operation_logic=\"APPEND\",\n # FIXME? => add control and the function level like report_stats\n pk=ProcessLineagePK(\n process_ref=self.process.to_ref(),\n data_flow=data_flow\n )\n )._report()\n\n if self.timestamp is None:\n self.timestamp=int(time.time()) * 1000\n\n lineage_run = LineageRun(pk=LineageRunPK(\n lineage_ref=lineage.to_ref(),\n process_run_ref=self.process_run.to_ref(),\n timestamp=self.timestamp\n )\n )._report()\n\n data_flow_inputs = list(\n {to_hash_key(d.input_schema): (d.input_schema, d.input) for d in process_lineage_dependencies}.values())\n data_flow_outputs = list(\n {to_hash_key(d.output_schema): (d.output_schema, d.output) for d in process_lineage_dependencies}.values())\n\n for (schema, df) in (data_flow_inputs + data_flow_outputs):\n stats = self.extractors.extract_stats(df)\n if report_stats and stats is not None:\n DataStats(pk=DataStatsPK(schema_ref=schema.to_ref(),\n lineage_run_ref=lineage_run.to_ref()),\n stats=stats,\n extra_as_json=None)._report()\n\n # TODO Machine Learning part for OUTPUTS ONLY (right ?)\n for (schema, df) in data_flow_outputs:\n ml = self.extractors.extract_machine_learning_info(df)\n if ml is not None:\n model = Model(ModelPK(ml[\"name\"]))._report()\n model_training = ModelTraining(ModelTrainingPK(model_ref=model.to_ref(),\n process_lineage_ref=lineage.to_ref())\n )._report()\n\n metrics = self.extractors.extract_machine_learning_metrics(df, **kwargs)\n if len(metrics) > 0:\n hp = self.extractors.extract_machine_learning_hyper_parameters(df)\n ModelMetrics(pk=ModelMetricsPK(\n model_training_ref=model_training.to_ref(),\n lineage_run_ref=lineage_run.to_ref(),\n stored_in_schema_ref=schema.to_ref()\n ),\n metrics=metrics,\n hyper_params_as_json=json.dumps(hp)\n )._report()\n" ]
[ [ "pandas.DataFrame" ] ]
prabhatrmishra/IDCardInfoExtr
[ "c59270f61a3251a6aff55bc7d81f2057c4663a37" ]
[ "Recognition/prepare_data.py" ]
[ "import os\nimport numpy as np \nimport cv2\nimport lmdb\nimport argparse\n\n\ncnt = 0\ndef filter_text(lang,text):\n #print(lang,text)\n unicode_range = {'odia':'[^\\u0020-\\u0040-\\u0B00-\\u0B7F]','kanada':'[^\\u0020-\\u0040-\\u0C80-\\u0CFF]',\n 'tamil':'[^\\u0020-\\u0040-\\u0B80-\\u0BFF]','malyalam':'[^\\u0020-\\u0040-\\u0D00-\\u0D7F]',\n 'urdu':'[^\\u0020-\\u0040-\\u0600-\\u06FF]','telgu':'[^\\u0020-\\u0040-\\u0C00-\\u0C7F]',\n 'marathi':'[^\\u0020-\\u0040-\\u0900-\\u097F]','sanskrit':'[^\\u0020-\\u0040-\\u0900-\\u097F]',\n 'hindi':'[^\\u0020-\\u0040-\\u0900-\\u097F]','ban':'[^\\u0020-\\u0040-\\u0980-\\u09FF]'}\n import re\n t = re.sub(unicode_range[lang],'',text)\n if len(text) == len(t):\n return False\n else:\n return True\n\n\ndef crop(imgpath,bbox):\n\timg = cv2.imread(imgpath)\n\tbbox = bbox.reshape(4,2)\n\ttopleft_x = np.min(bbox[:,0])\n\ttopleft_y = np.min(bbox[:,1])\n\tbot_right_x = np.max(bbox[:,0])\n\tbot_right_y = np.max(bbox[:,1])\n\tcropped_img = img[topleft_y:bot_right_y, topleft_x:bot_right_x]\n\treturn cropped_img\n\t\n\ndef create_lists(args):\n\tglobal cnt\n\timgPathList = []\n\tlabelList = []\n\tif not os.path.exists(args.word_image_dir):\n\t\tos.mkdir(args.word_image_dir)\n\n\tgt_filelist = os.listdir(args.image_gt_dir)\n\tgt_filelist.sort()\n\t#gt_filelist = gt_filelist[0:9801]\n\n\tfor gt_file in gt_filelist:\n\t\tgt_filename = gt_file.split('.')[0]\n\t\tf = open(args.image_gt_dir+gt_file,'r')\n\t\tlines = f.readlines()\n\t\tfor line in lines:\n\t\t\telements = line.split(',')\n\t\t\telements[-1] = elements[-1].strip()\n\t\t\telements[-2] = elements[-2].lower()\n\t\t\t#print(elements[-1])\n\t\t\t#if not filter_text(args.lang,elements[-1]):\n\t\t\tif not (elements[-2]=='Symbol' or elements[-1]==\"###\" or elements[-1]=='') and args.lang in elements[-2]:\n\t\t\t\tbbox = [int(ele) for ele in elements[0:8]]\n\t\t\t\tbbox = np.array(bbox)\n\t\t\t\tlabel = elements[-1].strip()\n\t\t\t\tif cnt<10:\n\t\t\t\t\tprint(label)\n\t\t\t\timgpath = args.image_dir+gt_filename+'.jpg'\n\t\t\t\ttry:\n\t\t\t\t\tcropped_img = crop(imgpath,bbox)\n\n\t\t\t\texcept:\n\t\t\t\t\tprint(\".png image ignore\")\n\t\t\t\t\tcontinue\n\t\t\t\t#print(np.shape(cropped_img))\n\t\t\t\tif not (0 in np.shape(cropped_img)):\n\t\t\t\t\tword_image_path = args.word_image_dir+\"img\"+str(cnt)+'.jpg'\n\t\t\t\t\tprint(word_image_path)\n\t\t\t\t\tcv2.imwrite(word_image_path,cropped_img)\n\t\t\t\t\timgPathList.append(word_image_path)\n\t\t\t\t\tlabelList.append(label)\n\t\t\t\t\tcnt = cnt+1\n\t\t\t\t\tprint('processed number:',cnt)\n\treturn imgPathList, labelList\n\ndef generate_gt(outputPath,imgPathList,labelList,lang):\n\tpath = outputPath+lang+'lmdb_data_gt.txt'\n\twith open(path,'w') as f:\n\t\tfor image,label in zip(imgPathList,labelList):\n\t\t\tline = image+'\\t'+label+'\\n'\n\t\t\tf.write(line)\t\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--image_dir',help='path to Scene images')\n\tparser.add_argument('--image_gt_dir',help='path to Scene gt')\n\tparser.add_argument('--word_image_dir',help='path to store cropped word images')\n\tparser.add_argument('--output_path',help='path to save gt.txt')\n\tparser.add_argument('--lang',help='language to generate gt for')\n\targs = parser.parse_args()\n\t\n\timgPathList, labelList = create_lists(args)\n\t#print(len(imgPathList),len(labelList))\n\n\timgPathList_filtered, labelList_filtered = [], []\n\n\tfor img, label in zip(imgPathList,labelList):\n\t\tif not filter_text(args.lang,label):\n\t\t\timgPathList_filtered.append(img)\n\t\t\tlabelList_filtered.append(label)\n\n\n\tprint(len(imgPathList_filtered),len(labelList_filtered))\n\tgenerate_gt(args.output_path,imgPathList_filtered,labelList_filtered,args.lang)\n \n\n\n\n\n\n" ]
[ [ "numpy.max", "numpy.array", "numpy.min", "numpy.shape" ] ]
Orbitery/Herzenssache_Finale_Abgabe
[ "18e726f3e8f53b92b9e87eea6758df81f515d872" ]
[ "decide.py" ]
[ "import numpy as np\ndef decider(predicted, ecg_names,data_samples,data_names, is_binary_classifier):\n \"\"\"[Prediction results are analyzed. Each ECG signal is assigned a class by majority vote. This classification is then returned.\n e.g. in an ECG signal 50 \"A heartbeats\" and 2 \"O heartbeats\" were detected, then the corresponding ECG signal is classified as \"A\". ]\n\n Args:\n predicted (list): [List with the probabilites of each class]\n ecg_names (list): [List with the unique name of every ecg-signal]\n data_samples (list): [List with Values of ecg-signal]\n data_names (list): [List with the unique name of the ecg signal, which belongs to the heartbeats]\n is_binary_classifier (bool): [shows if binary (True) or multilabel (False) classification is active]\n\n Returns:\n predictions [list of tuples]: [Values of ECG-Predictions]\n \"\"\"\n\n data_samples = np.array(data_samples)\n data_samples = data_samples.reshape((*data_samples.shape, 1))\n\n predictions = list()\n label_predicted = []\n label_predicted_democatric = []\n x = 0\n\n if (is_binary_classifier==True):\n for row in predicted: #Determine the most probable class\n if predicted[x,0]>predicted[x,1]:\n label_predicted.append(\"N\")\n elif predicted[x,0]<predicted[x,1]:\n label_predicted.append(\"A\")\n else:\n print(\"Error\")\n x = x + 1\n n_sum = 0\n a_sum = 0\n t = 0\n\n for ecg_row in ecg_names: #Democratic approach to classify ECG signals based on heartbeat predictions.\n for idx, y in enumerate(data_names):\n if (ecg_row==y):\n if (label_predicted[idx]=='N'):\n n_sum = n_sum + 1\n elif (label_predicted[idx]=='A'):\n a_sum = a_sum +1\n else:\n pass\n if (n_sum>=a_sum):\n label_predicted_democatric.append(\"N\")\n elif (n_sum<a_sum):\n label_predicted_democatric.append(\"A\")\n print(\"In {}: Number of A-Heartbeats: {}, Number of N-Heartbeats: {}\".format(ecg_row,a_sum, n_sum))\n n_sum = 0\n a_sum = 0 \n \n\n\n for idx, name_row in enumerate(ecg_names): #Create the final predictions\n predictions.append((ecg_names[idx], label_predicted_democatric[idx]))\n\n elif (is_binary_classifier == False):\n for row in predicted: #Determine the most probable class\n if (((predicted[x,0]>predicted[x,1]) and (predicted[x,0]> predicted[x,2]) and (predicted[x,0]>predicted[x,3]))):\n label_predicted.append(\"N\")\n elif (((predicted[x,1]>predicted[x,0]) and (predicted[x,1]> predicted[x,2]) and (predicted[x,1]>predicted[x,3]))):\n label_predicted.append(\"A\")\n elif (((predicted[x,2]>predicted[x,0]) and (predicted[x,2]> predicted[x,1]) and (predicted[x,2]>predicted[x,3]))):\n label_predicted.append(\"O\")\n elif (((predicted[x,3]>predicted[x,0]) and (predicted[x,3]> predicted[x,1]) and (predicted[x,3]>predicted[x,2]))):\n label_predicted.append(\"~\")\n else:\n print(\"Error\")\n x = x + 1\n n_sum = 0\n a_sum = 0\n o_sum = 0\n t_sum = 0\n\n t = 0\n for ecg_row in ecg_names: #Democratic approach to classify ECG signals based on heartbeat predictions.\n for idx, y in enumerate(data_names):\n if (ecg_row==y):\n if (label_predicted[idx]=='N'):\n n_sum = n_sum + 1\n elif (label_predicted[idx]=='A'):\n a_sum = a_sum +1\n elif (label_predicted[idx]=='O'):\n o_sum = o_sum +1 \n elif (label_predicted[idx]=='~'):\n o_sum = o_sum +1 \n else:\n pass\n if ((n_sum>=a_sum)and(n_sum>=o_sum)and(n_sum>=t_sum)):\n label_predicted_democatric.append(\"N\")\n elif ((a_sum>=n_sum)and(a_sum>=o_sum)and(a_sum>=t_sum)):\n label_predicted_democatric.append(\"A\")\n elif ((o_sum>=n_sum)and(o_sum>=a_sum)and(o_sum>=t_sum)):\n label_predicted_democatric.append(\"O\")\n elif ((t_sum>=n_sum)and(t_sum>=o_sum)and(t_sum>=a_sum)):\n label_predicted_democatric.append(\"~\")\n print(\"In {}: Number of A-Heartbeats: {}, Number of N-Heartbeats: {}, Number of O-Heartbeats: {}, Number of ~-Heartbeats: {}\".format(ecg_row,a_sum, n_sum,o_sum,t_sum))\n n_sum = 0\n a_sum = 0\n o_sum = 0\n t_sum = 0\n\n \n for idx, name_row in enumerate(ecg_names): #Create the final predictions\n predictions.append((ecg_names[idx], label_predicted_democatric[idx]))\n print(\"Predictions are created\")\n return (predictions) \n " ]
[ [ "numpy.array" ] ]
zihangdai/reexamine-srnn
[ "8f467a8267ec52e2610fc34d8f9b6536007d1243" ]
[ "speech/create_permuted_data.py" ]
[ "import os, sys\nfrom shutil import copyfile\nimport glob\nimport argparse\n\nimport torch\n\nSEED = 123456\n\ndef permute_data(data, perm):\n if isinstance(data, list):\n return list([permute_data(d, perm) for d in data])\n else:\n if data.dim() == 2:\n data = data.mean(1)\n\n n_step = data.size(0) // perm.size(0)\n data = data[:n_step * perm.size(0)].view(n_step, perm.size(0))\n data = data[:, perm].view(-1).contiguous()\n\n return data\n\ndef main(args):\n # create new dir\n parent, child = args.data_dir.rstrip('/').rsplit('/', 1)\n save_dir = os.path.join(parent, '{}-permuted-{}'.format(child, args.d_data))\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n # create and save permutation to new dir\n torch.manual_seed(SEED)\n perm = torch.randperm(args.d_data)\n torch.save(perm, os.path.join(save_dir, 'perm-{}.t7'.format(args.d_data)))\n with open(os.path.join(save_dir, 'perm-{}.txt'.format(args.d_data)), 'w') \\\n as f:\n f.write(' '.join([str(i) for i in perm.tolist()]))\n\n # permutate data and save to new dir\n for fn in os.listdir(args.data_dir):\n print(fn)\n src = os.path.join(args.data_dir, fn)\n dst = os.path.join(save_dir, fn)\n if fn.endswith('.txt'):\n copyfile(src, dst)\n elif fn.endswith('.t7'):\n src_data = torch.load(src)\n dst_data = permute_data(src_data, perm)\n torch.save(dst_data, dst)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True)\n parser.add_argument('--d_data', type=int, default=200)\n\n args = parser.parse_args()\n\n main(args)\n" ]
[ [ "torch.manual_seed", "torch.save", "torch.randperm", "torch.load" ] ]
roy-wang-py/behavioralCloning
[ "d3a28679cee78cd49ecefa3d37bbd6cdb393a16f" ]
[ "model.py" ]
[ "import os\nimport csv\nfrom keras.models import Sequential, Model\nfrom keras.layers import Cropping2D,Flatten,Lambda,Dense,Activation,Dropout,MaxPooling2D\nimport cv2\nimport numpy as np\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import load_model\nfrom keras.layers.convolutional import Conv2D\nfrom keras.optimizers import Adam\nfrom scipy import ndimage\n\n\nsamples = []\n\n#file dir: file dir where data was stored.\nfile_dir = \"/opt/carnd_p3/data\"\n#file_dir = \"/home/workspace/data\"\nwith open(file_dir+'/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append(line)\nsamples = samples[1::]\n\n#split data\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\n\n\ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n correction = 0.1 # this is a parameter to tune\n images = []\n angles = []\n for batch_sample in batch_samples:\n for i in range(3):\n #As i run simulator in windows, file dir is split by \"\\\" instead of \"/\" in dataset\n if batch_sample[i].find('\\\\') != -1 :\n name = file_dir+'/IMG/'+batch_sample[i].split('\\\\')[-1]\n else:\n name = file_dir+'/IMG/'+batch_sample[i].split('/')[-1] \n #center_image = cv2.imread(name)\n #simulator used RGB, while cv2.imread is BGR\n center_image = ndimage.imread(name)\n \n center_angle = float(batch_sample[3])\n \n if i == 0:\n #flip center img\n images.append(center_image)\n angles.append(center_angle)\n images.append(cv2.flip(center_image,1))\n angles.append(center_angle*-1.0)\n elif i ==1:\n #flip left img\n center_angle = center_angle+correction\n images.append(center_image)\n angles.append(center_angle)\n images.append(cv2.flip(center_image,1))\n angles.append(center_angle*-1.0)\n else:\n #flip right img\n center_angle = center_angle-correction\n images.append(center_image)\n angles.append(center_angle)\n images.append(cv2.flip(center_image,1))\n angles.append(center_angle*-1.0)\n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=32)\nvalidation_generator = generator(validation_samples, batch_size=32)\n\n#define activate function\nactivate_func = 'elu'\n\n#create model\nmodel = Sequential()\n\n# Preprocess incoming data, centered around zero with small standard deviation \nmodel.add(Lambda(lambda x: ((x / 255.0) - 0.5), input_shape=(160,320,3)))\n#crop images\nmodel.add(Cropping2D(cropping=((70,25), (0,0))))\nmodel.add(Conv2D(24,(5,5),strides=(2,2),activation=activate_func)) \n#model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\nmodel.add(Dropout(0.5))\n\nmodel.add(Conv2D(36,(3,3),strides=(2,2),activation=activate_func))\n#model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\nmodel.add(Dropout(0.5))\n\nmodel.add(Conv2D(48,(3,3),strides=(2,2),activation=activate_func))\n#model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\nmodel.add(Dropout(0.5))\n\nmodel.add(Conv2D(64,(3,3),activation=activate_func))\n#model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\nmodel.add(Dropout(0.5))\n\nmodel.add(Conv2D(64,(3,3),activation=activate_func))\nmodel.add(Flatten())\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(100))\nmodel.add(Activation(activate_func))\n#model.add(Dropout(0.5))\nmodel.add(Dense(50))\nmodel.add(Activation(activate_func))\n#model.add(Dropout(0.5))\n\nmodel.add(Dense(10))\nmodel.add(Activation(activate_func))\n#model.add(Dropout(0.5))\n\nmodel.add(Dense(1))\nmodel.summary()\n\n#set default learning rate\nmodel.compile(optimizer=Adam(0.0001), loss=\"mse\")\n#model.compile(loss='mse', optimizer='adam')\nmodel.fit_generator(train_generator, samples_per_epoch= len(train_samples), validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=3)\n\nmodel.save('model.h5') # creates a HDF5 file 'model.h5'\n\n\n\n" ]
[ [ "sklearn.model_selection.train_test_split", "scipy.ndimage.imread", "numpy.array", "sklearn.utils.shuffle" ] ]
TianHongZXY/Fengshenbang-LM
[ "ea13855627264a1c3d7e19d7e9d0e0ca7ab6cac4" ]
[ "fengshen/models/bart/modeling_bart.py" ]
[ "import warnings\nfrom pytorch_lightning import LightningModule\nfrom fengshen.models import transformer_utils\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nfrom transformers.file_utils import add_end_docstrings, replace_return_docstrings\nfrom transformers.modeling_outputs import ModelOutput, Seq2SeqLMOutput\nfrom transformers.models.bart import BartPretrainedModel, BartConfig, BartModel\nfrom transformers.models.bart.modeling_bart import BartClassificationHead\n\n\n_CONFIG_FOR_DOC = \"BartConfig\"\n\n\n# ------------------------ ZZ: CBart addition ------------------------\n\n\ndef _reorder_buffer(attn_cache, new_order):\n for k, input_buffer_k in attn_cache.items():\n if input_buffer_k is not None:\n attn_cache[k] = input_buffer_k.index_select(0, new_order)\n return attn_cache\n\n\ndef _make_linear_from_emb(emb):\n vocab_size, emb_size = emb.weight.shape\n lin_layer = nn.Linear(vocab_size, emb_size, bias=False)\n lin_layer.weight.data = emb.weight.data\n return lin_layer\n\n\nBART_GENERATION_EXAMPLE = r\"\"\"\n Summarization example::\n\n >>> from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig\n\n >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')\n >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')\n\n >>> ARTICLE_TO_SUMMARIZE = \"My friends are cool but they eat too many carbs.\"\n >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')\n\n >>> # Generate Summary\n >>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True)\n >>> print([tokenizer.decode(g, skip_special_tokens=True,\n clean_up_tokenization_spaces=False) for g in summary_ids])\n\n Mask filling example::\n\n >>> from transformers import BartTokenizer, BartForConditionalGeneration\n >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')\n >>> TXT = \"My friends are <mask> but they eat too many carbs.\"\n\n >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large')\n >>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']\n >>> logits = model(input_ids).logits\n\n >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()\n >>> probs = logits[0, masked_index].softmax(dim=0)\n >>> values, predictions = probs.topk(5)\n\n >>> tokenizer.decode(predictions).split()\n\"\"\"\n\n\n@dataclass\nclass CBartLMOutput(ModelOutput):\n \"\"\"\n Base class for CBart specific language models outputs.\n\n Args:\n ....\n \"\"\"\n loss: Optional[torch.FloatTensor] = None\n encoder_loss: Optional[torch.FloatTensor] = None\n decoder_loss: Optional[torch.FloatTensor] = None\n encoder_logits: torch.FloatTensor = None\n logits: torch.FloatTensor = None\n past_key_values: Optional[Tuple[torch.FloatTensor]] = None\n decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nclass BartForTextInfill(BartPretrainedModel):\n \"\"\"\n this class is designed for text infilling.\n During training, the encoder is used to predict replace, insert,\n and the decoder is used to generate original input.\n Compared with BartForConditionalGeneration class,\n we add a module over the encoder and add a new loss for the encoder.\n \"\"\"\n base_model_prefix = \"model\"\n authorized_missing_keys = [r\"final_logits_bias\",\n r\"encoder\\.version\", r\"decoder\\.version\"]\n\n def __init__(self, config: BartConfig):\n super().__init__(config)\n base_model = BartModel(config)\n self.model = base_model\n self.register_buffer(\"final_logits_bias\", torch.zeros(\n (1, self.model.shared.num_embeddings)))\n # print( config.encoder_loss_type, config.num_labels)\n\n # add a new attribute into BartConfig class (revise BartConfig)\n self.encoder_loss_type = config.encoder_loss_type\n self.num_labels = config.num_labels\n if self.encoder_loss_type == 0: # 0 is classification loss, 1 is regression loss\n # add a classification module for the encoder\n self.classification_head = BartClassificationHead(\n config.d_model, config.d_model, config.num_labels, config.classif_dropout,\n )\n else:\n # add a regression module for the encoder\n self.classification_head = BartClassificationHead(\n config.d_model, config.d_model, 1, config.classif_dropout,\n )\n\n self.model._init_weights(self.classification_head.dense)\n self.model._init_weights(self.classification_head.out_proj)\n self.loss_weight = config.loss_weight\n self.register_buffer(\"label_weights\", torch.zeros((self.num_labels)))\n\n def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:\n old_num_tokens = self.model.shared.num_embeddings\n new_embeddings = super().resize_token_embeddings(new_num_tokens)\n self.model.shared = new_embeddings\n self._resize_final_logits_bias(new_num_tokens, old_num_tokens)\n return new_embeddings\n\n def _resize_final_logits_bias(self, new_num_tokens: int, old_num_tokens: int) -> None:\n if new_num_tokens <= old_num_tokens:\n new_bias = self.final_logits_bias[:, :new_num_tokens]\n else:\n extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens),\n device=self.final_logits_bias.device)\n new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)\n self.register_buffer(\"final_logits_bias\", new_bias)\n\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n @add_end_docstrings(BART_GENERATION_EXAMPLE)\n def forward(\n self,\n input_ids,\n attention_mask=None,\n encoder_outputs=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n past_key_values=None,\n encoder_labels=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=True,\n **unused,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`,\n `optional`, defaults to :obj:`None`):\n Labels for computing the masked language modeling loss.\n Indices should either be in ``[0, ..., config.vocab_size]``\n or -100 (see ``input_ids`` docstring).\n Tokens with indices set to ``-100`` are ignored (masked),\n the loss is only computed for the tokens\n with labels in ``[0, ..., config.vocab_size]``.\n\n Returns:\n\n Conditional generation example::\n\n # Mask filling only works for bart-large\n from transformers import BartTokenizer, BartForConditionalGeneration\n tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')\n TXT = \"My friends are <mask> but they eat too many carbs.\"\n\n model = BartForConditionalGeneration.from_pretrained('facebook/bart-large')\n input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']\n logits = model(input_ids).logits\n\n masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()\n probs = logits[0, masked_index].softmax(dim=0)\n values, predictions = probs.topk(5)\n\n tokenizer.decode(predictions).split()\n # ['good', 'great', 'all', 'really', 'very']\n \"\"\"\n if \"lm_labels\" in unused:\n warnings.warn(\n \"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = unused.pop(\"lm_labels\")\n if \"decoder_cached_states\" in unused:\n warnings.warn(\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, \"\n + \"use `decoder_past_key_values` instead.\",\n FutureWarning,\n )\n unused.pop(\"decoder_cached_states\")\n return_dict = return_dict if return_dict is not None else False\n\n if labels is not None:\n use_cache = False\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n encoder_outputs=encoder_outputs,\n decoder_attention_mask=decoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n # logits and loss for the encoder\n # last hidden state\n encoder_last_hidden_state = outputs['encoder_last_hidden_state']\n # eos_mask = input_ids.eq(self.config.eos_token_id)\n # if len(torch.unique(eos_mask.sum(1))) > 1:\n # raise ValueError(\"All examples must have the same number of <eos> tokens.\")\n # sentence_representation = x[eos_mask, :].view(x.size(0), -1, x.size(-1))[:, -1, :]\n encoder_logits = self.classification_head(encoder_last_hidden_state)\n encoder_loss = None\n if encoder_labels is not None:\n # classification loss\n if self.encoder_loss_type == 0:\n # ZZ: seems like MSE loss does not support weighting, so only CEL has weighting applied for now\n loss_fct = nn.CrossEntropyLoss(weight=self.label_weights)\n encoder_loss = loss_fct(\n encoder_logits.view(-1, self.config.num_labels), encoder_labels.view(-1))\n # regression loss\n else:\n encoder_logits = encoder_logits.view(\n encoder_logits.size(0), -1)\n encoder_logits = torch.sigmoid(\n encoder_logits) * self.num_labels - 0.5\n loss_fct = nn.MSELoss(reduction='none')\n _loss = loss_fct(encoder_logits, encoder_labels)\n encoder_loss = torch.mean(_loss[encoder_labels >= 0])\n # encoder_loss =_loss[encoder_labels>=0]\n\n # logits and loss for the decoder\n lm_logits = F.linear(\n outputs[0], self.model.shared.weight, bias=self.final_logits_bias)\n masked_lm_loss = None\n if labels is not None:\n loss_fct = nn.CrossEntropyLoss()\n # TODO(SS): do we need to ignore pad tokens in labels?\n masked_lm_loss = loss_fct(\n lm_logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n loss = None\n if masked_lm_loss is not None and encoder_loss is not None:\n loss = encoder_loss * self.loss_weight + masked_lm_loss\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return CBartLMOutput(\n loss=loss,\n encoder_loss=encoder_loss,\n decoder_loss=masked_lm_loss,\n encoder_logits=encoder_logits,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n def prepare_inputs_for_generation(self, decoder_input_ids, past, attention_mask, use_cache, **kwargs):\n assert past is not None, \"past has to be defined for encoder_outputs\"\n\n encoder_outputs, past_key_values = past\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past_key_values,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n # change this to avoid caching (presumably for debugging)\n \"use_cache\": use_cache,\n }\n\n def adjust_logits_during_generation(self, logits, cur_len, max_length):\n if cur_len == 1:\n self._force_token_ids_generation(logits, self.config.bos_token_id)\n if cur_len == max_length - 1 and self.config.eos_token_id is not None:\n self._force_token_ids_generation(logits, self.config.eos_token_id)\n return logits\n\n def _force_token_ids_generation(self, scores, token_ids) -> None:\n \"\"\"force one of token_ids to be generated by setting prob of all other tokens to 0\"\"\"\n if isinstance(token_ids, int):\n token_ids = [token_ids]\n all_but_token_ids_mask = torch.tensor(\n [x for x in range(self.config.vocab_size) if x not in token_ids],\n dtype=torch.long,\n device=next(self.parameters()).device,\n )\n assert len(\n scores.shape) == 2, \"scores should be of rank 2 with shape: [batch_size, vocab_size]\"\n scores[:, all_but_token_ids_mask] = -float(\"inf\")\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n ((enc_out, enc_mask), past_key_values) = past\n reordered_past = []\n for layer_past in past_key_values:\n # get the correct batch idx from decoder layer's batch dim for cross and self-attn\n layer_past_new = {\n attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()\n }\n reordered_past.append(layer_past_new)\n\n new_enc_out = enc_out if enc_out is None else enc_out.index_select(\n 0, beam_idx)\n new_enc_mask = enc_mask if enc_mask is None else enc_mask.index_select(\n 0, beam_idx)\n\n past = ((new_enc_out, new_enc_mask), reordered_past)\n return past\n\n def get_encoder(self):\n return self.model.encoder\n\n def get_output_embeddings(self):\n return _make_linear_from_emb(self.model.shared) # make it on the fly\n\n def get_encoder_logits(self, input_ids, attention_mask=None):\n # print(input_ids, attention_mask)\n # encoder_outputs = self.model.get_encoder_outputs(\n # self,\n # input_ids,\n # attention_mask=attention_mask,\n # output_attentions=None,\n # output_hidden_states=None,\n # return_dict=None,\n # )\n\n encoder_outputs = self.model.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n return_dict=True\n )\n # logits and loss for the encoder\n # last hidden state\n encoder_last_hidden_state = encoder_outputs['last_hidden_state']\n encoder_logits = self.classification_head(encoder_last_hidden_state)\n\n # classification\n if self.encoder_loss_type == 0:\n # probs = torch.softmax(encoder_logits,dim=-1)\n pass\n # regression\n else:\n encoder_logits = encoder_logits.view(encoder_logits.size(0), -1)\n encoder_logits = torch.sigmoid(\n encoder_logits) * self.num_labels - 0.5\n return encoder_outputs, encoder_logits\n\n\nclass CBartLightning(LightningModule):\n @staticmethod\n def add_module_specific_args(parent_args):\n parser = parent_args.add_argument_group(\"CBart specific parameters\")\n parser.add_argument('--num_labels', type=int, default=3)\n parser.add_argument('--encoder_loss_type', type=int, default=0)\n parser.add_argument('--loss_weight', type=float, default=1.0)\n parser.add_argument('--label_weights', type=float, nargs='+', default=[1.0, 1.0, 1.0])\n parser.add_argument('--masked_lm', type=float, default=0)\n return parent_args\n\n def __init__(\n self,\n args,\n **kwargs,\n ):\n super().__init__()\n self.save_hyperparameters(args)\n self.model = BartForTextInfill.from_pretrained(args.model_path, num_labels=self.hparams.num_labels,\n encoder_loss_type=self.hparams.encoder_loss_type,\n loss_weight=self.hparams.loss_weight,)\n self.model.label_weights = torch.tensor(\n self.hparams.label_weights, dtype=torch.half)\n\n def forward(self, **inputs):\n return self.model(**inputs)\n\n def training_step(self, batch, batch_idx):\n outputs = self(**batch)\n return outputs\n\n def validation_step(self, batch, batch_idx, dataloader_idx=0):\n outputs = self(**batch)\n val_loss = outputs[\"loss\"]\n\n return {\"loss\": val_loss}\n\n def setup(self, stage=None) -> None:\n if stage != \"fit\":\n return\n # Get dataloader by calling it - train_dataloader() is called after setup() by default\n train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()\n\n # Calculate total steps\n tb_size = self.hparams.train_batchsize * max(1, self.trainer.gpus)\n ab_size = self.trainer.accumulate_grad_batches * float(self.trainer.max_epochs)\n self.total_steps = (len(train_loader.dataset) // tb_size) // ab_size\n\n def configure_optimizers(self):\n transformer_utils.configure_optimizers(self)\n" ]
[ [ "torch.nn.Linear", "torch.zeros", "torch.cat", "torch.sigmoid", "torch.nn.MSELoss", "torch.nn.functional.linear", "torch.tensor", "torch.mean", "torch.nn.CrossEntropyLoss" ] ]
packtpartner/pymc3
[ "48cc820494ff22a3010ac2440fc1c5d28e09d87e" ]
[ "benchmarks/benchmarks/benchmarks.py" ]
[ "import time\nimport timeit\n\nimport numpy as np\nimport pandas as pd\nimport pymc3 as pm\nimport theano\nimport theano.tensor as tt\n\n\ndef glm_hierarchical_model(random_seed=123):\n \"\"\"Sample glm hierarchical model to use in benchmarks\"\"\"\n np.random.seed(random_seed)\n data = pd.read_csv(pm.get_data('radon.csv'))\n data['log_radon'] = data['log_radon'].astype(theano.config.floatX)\n county_idx = data.county_code.values\n\n n_counties = len(data.county.unique())\n with pm.Model() as model:\n mu_a = pm.Normal('mu_a', mu=0., sd=100**2)\n sigma_a = pm.HalfCauchy('sigma_a', 5)\n mu_b = pm.Normal('mu_b', mu=0., sd=100**2)\n sigma_b = pm.HalfCauchy('sigma_b', 5)\n a = pm.Normal('a', mu=0, sd=1, shape=n_counties)\n b = pm.Normal('b', mu=0, sd=1, shape=n_counties)\n a = mu_a + sigma_a * a\n b = mu_b + sigma_b * b\n eps = pm.HalfCauchy('eps', 5)\n radon_est = a[county_idx] + b[county_idx] * data.floor.values\n pm.Normal('radon_like', mu=radon_est, sd=eps, observed=data.log_radon)\n return model\n\n\ndef mixture_model(random_seed=1234):\n \"\"\"Sample mixture model to use in benchmarks\"\"\"\n np.random.seed(1234)\n size = 1000\n w_true = np.array([0.35, 0.4, 0.25])\n mu_true = np.array([0., 2., 5.])\n sigma = np.array([0.5, 0.5, 1.])\n component = np.random.choice(mu_true.size, size=size, p=w_true)\n x = np.random.normal(mu_true[component], sigma[component], size=size)\n\n with pm.Model() as model:\n w = pm.Dirichlet('w', a=np.ones_like(w_true))\n mu = pm.Normal('mu', mu=0., sd=10., shape=w_true.shape)\n enforce_order = pm.Potential('enforce_order', tt.switch(mu[0] - mu[1] <= 0, 0., -np.inf) +\n tt.switch(mu[1] - mu[2] <= 0, 0., -np.inf))\n tau = pm.Gamma('tau', alpha=1., beta=1., shape=w_true.shape)\n pm.NormalMixture('x_obs', w=w, mu=mu, tau=tau, observed=x)\n\n # Initialization can be poorly specified, this is a hack to make it work\n start = {\n 'mu': mu_true.copy(),\n 'tau_log__': np.log(1. / sigma**2),\n 'w_stickbreaking__': np.array([-0.03, 0.44])\n }\n return model, start\n\n\nclass OverheadSuite:\n \"\"\"\n Just tests how long sampling from a normal distribution takes for various\n samplers\n \"\"\"\n params = [pm.NUTS, pm.HamiltonianMC, pm.Metropolis, pm.Slice]\n timer = timeit.default_timer\n\n def setup(self, step):\n self.n_steps = 10000\n with pm.Model() as self.model:\n pm.Normal('x', mu=0, sd=1)\n\n def time_overhead_sample(self, step):\n with self.model:\n pm.sample(self.n_steps, step=step(), random_seed=1,\n progressbar=False, compute_convergence_checks=False)\n\n\nclass ExampleSuite:\n \"\"\"Implements examples to keep up with benchmarking them.\"\"\"\n timeout = 360.0 # give it a few minutes\n timer = timeit.default_timer\n\n def time_drug_evaluation(self):\n drug = np.array([101, 100, 102, 104, 102, 97, 105, 105, 98, 101,\n 100, 123, 105, 103, 100, 95, 102, 106, 109, 102, 82,\n 102, 100, 102, 102, 101, 102, 102, 103, 103, 97, 97,\n 103, 101, 97, 104, 96, 103, 124, 101, 101, 100, 101,\n 101, 104, 100, 101])\n placebo = np.array([99, 101, 100, 101, 102, 100, 97, 101, 104, 101,\n 102, 102, 100, 105, 88, 101, 100, 104, 100, 100,\n 100, 101, 102, 103, 97, 101, 101, 100, 101, 99,\n 101, 100, 100, 101, 100, 99, 101, 100, 102, 99,\n 100, 99])\n\n y = pd.DataFrame({\n 'value': np.r_[drug, placebo],\n 'group': np.r_[['drug']*len(drug), ['placebo']*len(placebo)]\n })\n y_mean = y.value.mean()\n y_std = y.value.std() * 2\n\n sigma_low = 1\n sigma_high = 10\n with pm.Model():\n group1_mean = pm.Normal('group1_mean', y_mean, sd=y_std)\n group2_mean = pm.Normal('group2_mean', y_mean, sd=y_std)\n group1_std = pm.Uniform('group1_std', lower=sigma_low, upper=sigma_high)\n group2_std = pm.Uniform('group2_std', lower=sigma_low, upper=sigma_high)\n lambda_1 = group1_std**-2\n lambda_2 = group2_std**-2\n\n nu = pm.Exponential('ν_minus_one', 1/29.) + 1\n\n pm.StudentT('drug', nu=nu, mu=group1_mean, lam=lambda_1, observed=drug)\n pm.StudentT('placebo', nu=nu, mu=group2_mean, lam=lambda_2, observed=placebo)\n diff_of_means = pm.Deterministic('difference of means', group1_mean - group2_mean)\n pm.Deterministic('difference of stds', group1_std - group2_std)\n pm.Deterministic(\n 'effect size', diff_of_means / np.sqrt((group1_std**2 + group2_std**2) / 2))\n pm.sample(draws=20000, cores=4, chains=4,\n progressbar=False, compute_convergence_checks=False)\n\n def time_glm_hierarchical(self):\n with glm_hierarchical_model():\n pm.sample(draws=20000, cores=4, chains=4,\n progressbar=False, compute_convergence_checks=False)\n\n\nclass NUTSInitSuite:\n \"\"\"Tests initializations for NUTS sampler on models\n \"\"\"\n timeout = 360.0\n params = ('adapt_diag', 'jitter+adapt_diag', 'advi+adapt_diag_grad')\n number = 1\n repeat = 1\n draws = 10000\n chains = 4\n\n def time_glm_hierarchical_init(self, init):\n \"\"\"How long does it take to run the initialization.\"\"\"\n with glm_hierarchical_model():\n pm.init_nuts(init=init, chains=self.chains, progressbar=False)\n\n def track_glm_hierarchical_ess(self, init):\n with glm_hierarchical_model():\n start, step = pm.init_nuts(init=init, chains=self.chains, progressbar=False, random_seed=123)\n t0 = time.time()\n trace = pm.sample(draws=self.draws, step=step, cores=4, chains=self.chains,\n start=start, random_seed=100, progressbar=False,\n compute_convergence_checks=False)\n tot = time.time() - t0\n ess = float(pm.ess(trace, var_names=['mu_a'])['mu_a'].values)\n return ess / tot\n\n def track_marginal_mixture_model_ess(self, init):\n model, start = mixture_model()\n with model:\n _, step = pm.init_nuts(init=init, chains=self.chains,\n progressbar=False, random_seed=123)\n start = [{k: v for k, v in start.items()} for _ in range(self.chains)]\n t0 = time.time()\n trace = pm.sample(draws=self.draws, step=step, cores=4, chains=self.chains,\n start=start, random_seed=100, progressbar=False,\n compute_convergence_checks=False)\n tot = time.time() - t0\n ess = pm.ess(trace, var_names=['mu'])['mu'].values.min() # worst case\n return ess / tot\n\n\nNUTSInitSuite.track_glm_hierarchical_ess.unit = 'Effective samples per second'\nNUTSInitSuite.track_marginal_mixture_model_ess.unit = 'Effective samples per second'\n\n\nclass CompareMetropolisNUTSSuite:\n timeout = 360.0\n # None will be the \"sensible default\", and include initialization, but should be fastest\n params = (None, pm.NUTS, pm.Metropolis)\n number = 1\n repeat = 1\n draws = 20000\n\n def track_glm_hierarchical_ess(self, step):\n with glm_hierarchical_model():\n if step is not None:\n step = step()\n t0 = time.time()\n trace = pm.sample(draws=self.draws, step=step, cores=4, chains=4,\n random_seed=100, progressbar=False,\n compute_convergence_checks=False)\n tot = time.time() - t0\n ess = float(pm.ess(trace, var_names=['mu_a'])['mu_a'].values)\n return ess / tot\n\n\nCompareMetropolisNUTSSuite.track_glm_hierarchical_ess.unit = 'Effective samples per second'\n\n\nclass DifferentialEquationSuite:\n \"\"\"Implements ode examples to keep up with benchmarking them.\"\"\"\n\n timeout = 600\n timer = timeit.default_timer\n\n def track_1var_2par_ode_ess(self):\n def freefall(y, t, p):\n return 2.0 * p[1] - p[0] * y[0]\n\n # Times for observation\n times = np.arange(0, 10, 0.5)\n y = np.array([\n -2.01,\n 9.49,\n 15.58,\n 16.57,\n 27.58,\n 32.26,\n 35.13,\n 38.07,\n 37.36,\n 38.83,\n 44.86,\n 43.58,\n 44.59,\n 42.75,\n 46.9,\n 49.32,\n 44.06,\n 49.86,\n 46.48,\n 48.18\n ]).reshape(-1, 1)\n \n ode_model = pm.ode.DifferentialEquation(func=freefall, times=times, n_states=1, n_theta=2, t0=0)\n with pm.Model() as model:\n # Specify prior distributions for some of our model parameters\n sigma = pm.HalfCauchy(\"sigma\", 1)\n gamma = pm.Lognormal(\"gamma\", 0, 1)\n # If we know one of the parameter values, we can simply pass the value.\n ode_solution = ode_model(y0=[0], theta=[gamma, 9.8])\n # The ode_solution has a shape of (n_times, n_states)\n Y = pm.Normal(\"Y\", mu=ode_solution, sd=sigma, observed=y)\n \n t0 = time.time()\n trace = pm.sample(500, tune=1000, chains=2, cores=2, random_seed=0)\n tot = time.time() - t0\n ess = pm.ess(trace)\n return np.mean([ess.sigma, ess.gamma]) / tot\n\n \nDifferentialEquationSuite.track_1var_2par_ode_ess.unit = 'Effective samples per second'\n" ]
[ [ "numpy.random.normal", "numpy.array", "numpy.ones_like", "numpy.random.choice", "numpy.log", "numpy.random.seed", "numpy.mean", "numpy.arange", "numpy.sqrt" ] ]
DavidHuizingh/pixelsort
[ "1f300a61d5d8ccfc0efcc44e86aa354974048684" ]
[ "Prototyping Code/array_test/arrays_and_images.py" ]
[ "\nfrom pathlib import Path\nfrom PIL import Image\nfrom time import sleep\nimport numpy as np\n\nimport skimage\n\n\n\n# F:\\GitHub\\pixelsort\\array_test\nfolder = Path(__file__).parent\ntest_image = folder / \"pixels.jpg\"\n\n\nimage = Image.open(test_image)\nimage.show()\n\nimage_np = np.array(image)\nimage_from_np = Image.fromarray(image_np)\nimage_from_np.show()\n\n\n\nshape = np.shape(image)\nx_max = shape[0]\ny_max = shape[1]\n\ny_array = []\nfor y in range(y_max):\n x_array = []\n for x in range(x_max):\n \n pixel_data = image_np[x, y]\n \n x_array.append(pixel_data)\n\n x_bar = str(x_array)\n y_array.append(x_bar)\n\n\n\nfor i, bar in enumerate(y_array, 0):\n print(f\"row {i} | {bar}\")\n\ncoord = image_np[0,0]\n\nprint(coord)\n\n#skimage.transform.setup\n\n\nimage_data = image.load()\nimage_data_np = np.array(image_data)\n\nsleep(1)\n\n" ]
[ [ "numpy.array", "numpy.shape" ] ]
nicolaseberle/flyTracker
[ "387b64a7c92d7ecf503dfccde29d7db9efb07cfb" ]
[ "dev/pytorch_dataset_loader/removing_open_cv.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport torch\nfrom flytracker.utils import FourArenasQRCodeMask\nfrom torch.utils.data import DataLoader\nfrom itertools import takewhile\nimport matplotlib.pyplot as plt\nfrom torchvision.transforms.functional import rgb_to_grayscale, to_tensor\nimport cv2 as cv\n\n\n# In[22]:\n\n\nclass VideoDataset(torch.utils.data.IterableDataset):\n def __init__(self, path, device=\"cuda\"):\n super().__init__()\n self.capture = cv.VideoCapture(path)\n self.device = device\n\n def __iter__(self):\n return self\n\n def __next__(self) -> torch.Tensor:\n # Loading image\n succes, image = self.capture.read()\n if succes is False:\n raise StopIteration\n\n image = torch.tensor(image)\n image = torch.movedim(torch.tensor(image), -1, 0)\n image = rgb_to_grayscale(image).squeeze()\n return image\n\n\nmask = FourArenasQRCodeMask().mask\npath = \"/home/gert-jan/Documents/flyTracker/data/movies/4arenas_QR.h264\"\n\ndataset = VideoDataset(path, device=\"cuda\")\nloader = DataLoader(dataset, batch_size=1, pin_memory=True, num_workers=4)\n\n\nfor batch_idx, batch in enumerate(loader):\n image = batch.cuda(non_blocking=True)\n if batch_idx % 100 == 0:\n print(f\"Loaded {batch_idx}, {image.device}\")\n if batch_idx == 1000:\n break\n\n" ]
[ [ "torch.tensor", "torch.utils.data.DataLoader" ] ]
uncbiag/MedicalNet
[ "02507c71bd3e5ae431850f4fffa97da4900837f5" ]
[ "datasets/cbct_onthefly.py" ]
[ "'''\nDataset for training\nWritten by Whalechen\n'''\n\nimport math\nimport os\nimport random\n\nimport numpy as np\nfrom torch.utils.data import Dataset\nimport nibabel\nimport SimpleITK as sitk\nfrom scipy import ndimage\nimport blosc\nblosc.set_nthreads(1)\n\n\nclass CbctOnTheFlyDataset(Dataset):\n\n def __init__(self, img_list, sets):\n with open(img_list, 'r') as f:\n self.img_list = [line.strip() for line in f]\n print(\"Processing {} datas\".format(len(self.img_list)))\n self.input_D = sets.input_D\n self.input_H = sets.input_H\n self.input_W = sets.input_W\n self.phase = sets.phase\n\n\n if self.phase == \"train\":\n # read image and labels\n self.img_dict = {}\n self.label_dict = {}\n for idx in range(len(self.img_list)):\n ith_info = self.img_list[idx].split(\" \")\n img_name = ith_info[0]\n label_name = ith_info[1]\n assert os.path.isfile(img_name)\n assert os.path.isfile(label_name)\n print(\"Loading {} \".format(img_name))\n # img = nibabel.load(img_name)\n img = sitk.ReadImage(img_name)\n assert img is not None\n #mask = nibabel.load(label_name)\n mask = sitk.ReadImage(label_name)\n assert mask is not None\n # data processing\n # img = img.get_data()\n # mask = mask.get_data()\n img = sitk.GetArrayFromImage(img)\n mask = sitk.GetArrayFromImage(mask)\n\n img_array, mask_array = self.__training_data_process__(img, mask)\n\n img_array = self.__nii2tensorarray__(img_array)\n mask_array = self.__nii2tensorarray__(mask_array)\n\n self.img_dict[img_name] = blosc.pack_array(img_array)\n self.label_dict[label_name] = blosc.pack_array(mask_array)\n\n elif self.phase == \"test\":\n # read image\n self.img_dict = {}\n # self.label_dict = {}\n for idx in range(len(self.img_list)):\n ith_info = self.img_list[idx].split(\" \")\n img_name = ith_info[0]\n # label_name = ith_info[1]\n assert os.path.isfile(img_name)\n # assert os.path.isfile(label_name)\n # img = nibabel.load(img_name)\n img = sitk.ReadImage(img_name)\n assert img is not None\n # mask = nibabel.load(label_name)\n # assert mask is not None\n\n img = sitk.GetArrayFromImage(img)\n # img = img.get_data()\n # mask = mask.get_data()\n\n img_array = self.__testing_data_process__(img)\n img_array = self.__nii2tensorarray__(img_array)\n\n self.img_dict[img_name] = blosc.pack_array(img_array)\n # self.label_dict[label_name] = blosc.pack_array(mask)\n\n def __nii2tensorarray__(self, data):\n [z, y, x] = data.shape\n new_data = np.reshape(data, [1, z, y, x])\n new_data = new_data.astype(\"float32\")\n \n return new_data\n \n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n \n if self.phase == \"train\":\n # read image and labels\n ith_info = self.img_list[idx].split(\" \")\n img_name = ith_info[0]\n label_name = ith_info[1]\n\n img_array = blosc.unpack_array(self.img_dict[img_name])\n mask_array = blosc.unpack_array(self.label_dict[label_name])\n\n # data processing\n # img_array, mask_array = self.__training_data_process__(img, mask)\n\n # 2 tensor array\n # img_array = self.__nii2tensorarray__(img_array)\n # mask_array = self.__nii2tensorarray__(mask_array)\n\n assert img_array.shape == mask_array.shape, \"img shape:{} is not equal to mask shape:{}\".format(img_array.shape, mask_array.shape)\n return img_array, mask_array\n \n elif self.phase == \"test\":\n # read image\n ith_info = self.img_list[idx].split(\" \")\n img_name = ith_info[0]\n # label_name = ith_info[1]\n\n img_array = blosc.unpack_array(self.img_dict[img_name])\n # mask = blosc.unpack_array(self.label_dict[label_name])\n # data processing\n # img_array = self.__testing_data_process__(img)\n\n # 2 tensor array\n # img_array = self.__nii2tensorarray__(img_array)\n\n return img_array\n \n\n def __drop_invalid_range__(self, volume, label=None):\n \"\"\"\n Cut off the invalid area\n \"\"\"\n zero_value = volume[0, 0, 0]\n non_zeros_idx = np.where(volume != zero_value)\n \n [max_z, max_h, max_w] = np.max(np.array(non_zeros_idx), axis=1)\n [min_z, min_h, min_w] = np.min(np.array(non_zeros_idx), axis=1)\n \n if label is not None:\n return volume[min_z:max_z, min_h:max_h, min_w:max_w], label[min_z:max_z, min_h:max_h, min_w:max_w]\n else:\n return volume[min_z:max_z, min_h:max_h, min_w:max_w]\n\n\n def __random_center_crop__(self, data, label):\n from random import random\n \"\"\"\n Random crop\n \"\"\"\n target_indexs = np.where(label>0)\n [img_d, img_h, img_w] = data.shape\n [max_D, max_H, max_W] = np.max(np.array(target_indexs), axis=1)\n [min_D, min_H, min_W] = np.min(np.array(target_indexs), axis=1)\n [target_depth, target_height, target_width] = np.array([max_D, max_H, max_W]) - np.array([min_D, min_H, min_W])\n Z_min = int((min_D - target_depth*1.0/2) * random())\n Y_min = int((min_H - target_height*1.0/2) * random())\n X_min = int((min_W - target_width*1.0/2) * random())\n \n Z_max = int(img_d - ((img_d - (max_D + target_depth*1.0/2)) * random()))\n Y_max = int(img_h - ((img_h - (max_H + target_height*1.0/2)) * random()))\n X_max = int(img_w - ((img_w - (max_W + target_width*1.0/2)) * random()))\n \n Z_min = np.max([0, Z_min])\n Y_min = np.max([0, Y_min])\n X_min = np.max([0, X_min])\n\n Z_max = np.min([img_d, Z_max])\n Y_max = np.min([img_h, Y_max])\n X_max = np.min([img_w, X_max])\n \n Z_min = int(Z_min)\n Y_min = int(Y_min)\n X_min = int(X_min)\n \n Z_max = int(Z_max)\n Y_max = int(Y_max)\n X_max = int(X_max)\n\n return data[Z_min: Z_max, Y_min: Y_max, X_min: X_max], label[Z_min: Z_max, Y_min: Y_max, X_min: X_max]\n\n\n\n def __itensity_normalize_one_volume__(self, volume):\n \"\"\"\n normalize the itensity of an nd volume based on the mean and std of nonzeor region\n inputs:\n volume: the input nd volume\n outputs:\n out: the normalized nd volume\n \"\"\"\n\n bg = -1\n pixels = volume[volume > bg]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n # out_random = np.random.normal(0, 1, size = volume.shape)\n # out[volume == bg] = out_random[volume == bg]\n return out\n\n def __resize_data__(self, data):\n \"\"\"\n Resize the data to the input size\n \"\"\" \n [depth, height, width] = data.shape\n scale = [self.input_D*1.0/depth, self.input_H*1.0/height, self.input_W*1.0/width]\n data = ndimage.interpolation.zoom(data, scale, order=0)\n\n return data\n\n\n def __crop_data__(self, data, label):\n \"\"\"\n Random crop with different methods:\n \"\"\" \n # random center crop\n data, label = self.__random_center_crop__ (data, label)\n \n return data, label\n\n def __training_data_process__(self, data, label): \n # drop out the invalid range\n # data, label = self.__drop_invalid_range__(data, label)\n \n # crop data\n # data, label = self.__crop_data__(data, label)\n\n # resize data\n data = self.__resize_data__(data)\n label = self.__resize_data__(label)\n\n # normalization datas\n data = self.__itensity_normalize_one_volume__(data)\n\n return data, label\n\n\n def __testing_data_process__(self, data):\n # resize data\n data = self.__resize_data__(data)\n\n # normalization datas\n data = self.__itensity_normalize_one_volume__(data)\n\n return data\n" ]
[ [ "numpy.max", "numpy.array", "numpy.reshape", "numpy.min", "numpy.where", "scipy.ndimage.interpolation.zoom" ] ]
alexshtf/inc_prox_pt
[ "a826c7179a528757399e661c5619a68dad254711" ]
[ "examples/minibatch_cvxlin_logistic.py" ]
[ "import math\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset\n\nfrom minibatch_cvxlin import MiniBatchConvLinOptimizer, Logistic\n\n# generate a two-dimensional least-squares problem\nN = 10000\nx_star = torch.tensor([2., -5.]) # the label-generating separating hyperplane\nfeatures = torch.rand((N, 2)) # A randomly generated data matrix\n\n# create binary labels in {-1, 1}\nlabels = torch.mv(features, x_star) + torch.distributions.Normal(0, 0.02).sample((N,)) # the labels, contaminated by noise\nlabels = 2 * torch.heaviside(labels, torch.tensor([1.])) - 1\n\n# attempt to recover x_star using a L2 (Tikhonov) regularized least squares problem.\nx = torch.zeros(x_star.shape)\noptimizer = MiniBatchConvLinOptimizer(x, Logistic())\ndataset = TensorDataset(features, labels)\nfor epoch in range(10):\n epoch_loss = 0.\n for t, (f, y) in enumerate(DataLoader(dataset, batch_size=32), start=1):\n eta = 1. / math.sqrt(t)\n a = -y.unsqueeze(1) * f # in logistic regression, the losses are ln(1+exp(-y*f))\n b = torch.zeros_like(y)\n epoch_loss += optimizer.step(eta, a, b).mean().item()\n\n print('Epoch loss = ', (epoch_loss / N))\n\nprint('Model parameters = ', x)\n" ]
[ [ "torch.zeros", "torch.rand", "torch.mv", "torch.distributions.Normal", "torch.tensor", "torch.utils.data.DataLoader", "torch.zeros_like", "torch.utils.data.TensorDataset" ] ]
mayurand/deepRL-p2-Continuous-Control
[ "02b9eef254dde34c3c13f1736b4e3ef88d642f7d" ]
[ "p2_continuous-control/tests/policy_agent_reinforce.py" ]
[ "import numpy as np\nimport random\nfrom collections import namedtuple, deque\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nclass Policy(nn.Module):\n \"\"\"\n This class implements a Neural Network as a function approximator for a policy.\n \"\"\"\n \n def __init__(self, state_size=33, action_size=4, seed=0):\n \n # We need to initialize the nn.Module class within Policy(). \n super(Policy, self).__init__() # The super initializes the nn.Module parentclass \n h1_size=32\n h2_size=64\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(seed)\n \n self.fc1 = nn.Linear(state_size, h1_size) # First layer\n self.fc2 = nn.Linear(h1_size, h2_size) # Second layer\n self.fc3 = nn.Linear(h2_size, action_size) # Output layer\n \n def forward(self, state):\n h1_state = F.relu(self.fc1(state))\n h2_state = F.relu(self.fc2(h1_state))\n action_probs = F.tanh(self.fc3(h2_state)) \n return action_probs\n \n def act(self, state):\n # Convert the state (as a numpy array) into a torch tensor \n state_in_torch = torch.from_numpy(state).float().unsqueeze(0)\n \n # Pass the input state from the network and get action probs\n action_probs = self.forward(state_in_torch).cpu()\n return action_probs\n\nclass Policy_REINFORCE(nn.Module):\n \"\"\"\n This class implements a Neural Network as a function approximator for a policy.\n \"\"\"\n \n def __init__(self, state_size=33, action_size=4, seed=0):\n \n # We need to initialize the nn.Module class within Policy(). \n super(Policy_REINFORCE, self).__init__() # The super initializes the nn.Module parentclass \n h1_size=32\n h2_size=64\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(seed)\n \n self.fc1 = nn.Linear(state_size, h1_size) # First layer\n self.fc2 = nn.Linear(h1_size, h2_size) # Second layer\n \n # The output layer gives the mean and variance values. Thus action_size*2 are the output values.\n self.fc3 = nn.Linear(h2_size, action_size*2) # Output layer \n \n def forward(self, state):\n \n h1_state = F.relu(self.fc1(state))\n h2_state = F.relu(self.fc2(h1_state))\n action_probs = F.softmax(self.fc3(h2_state), dim=1)\n return action_probs\n \n def act(self, state):\n # Convert the state (as a numpy array) into a torch tensor \n state_in_torch = torch.from_numpy(state).float().unsqueeze(0).to(device)\n \n # Pass the input state from the network and get action probs\n action_probs = self.forward(state_in_torch).cpu()\n \n return action_probs\n \n \n \ndef collect_random_trajectory(env, policy, tmax=500):\n pass\n\n\ndef collect_trajectory_REINFORCE(env, policy, tmax=500):\n #initialize returning lists and start the game!\n state_list=[]\n reward_list=[]\n action_vals_list=[]\n actions = []\n \n \n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n \n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n \n # For the number of time steps\n for t in range(tmax):\n \n action_vals = policy.act(state).detach().numpy() # select an action\n \n action = []\n # Draw the actions from normal distribution\n for i in range(0,len(action_vals[0]),2):\n mean = action_vals[0][i]\n variance = action_vals[0][i+1]\n a = np.random.normal(mean,variance) \n action.append(a)\n \n action = np.clip(action, -1, 1) # all actions between -1 and 1\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n \n # store the result\n state_list.append(state)\n reward_list.append(reward)\n action_vals_list.append(action_vals)\n actions.append(action)\n \n state = next_state # roll over the state to next time step\n \n # stop if any of the trajectories is done\n # we want all the lists to be retangular\n if done:\n break\n\n action_vals_list = torch.Tensor(action_vals_list).to(device)\n \n # return action vals, states, rewards\n return action_vals_list, actions , state_list, reward_list\n\n\n\ndef collect_trajectory(env, policy, tmax=500):\n #initialize returning lists and start the game!\n state_list=[]\n reward_list=[]\n action_vals_list=[]\n \n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n \n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n \n # For the number of time steps\n for t in range(tmax):\n \n action_val = policy.act(state) # select an action\n #actions = np.clip(action_val.detach().numpy(), -1, 1) # all actions between -1 and 1\n env_info = env.step(action_val.detach().numpy())[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n \n # store the result\n state_list.append(state)\n reward_list.append(reward)\n action_vals_list.append(action_val)\n \n state = next_state # roll over the state to next time step\n \n # stop if any of the trajectories is done\n # we want all the lists to be retangular\n if done:\n break\n\n # return action vals, states, rewards\n return action_vals_list, state_list, reward_list\n\n\n\n" ]
[ [ "torch.nn.Linear", "numpy.random.normal", "torch.from_numpy", "torch.cuda.is_available", "numpy.clip", "torch.Tensor" ] ]
dennyzz/Pathfinder-v2
[ "e7853c1b86d0c76ebbf7c1b68aa76fadcf37f7fc" ]
[ "python/misc.py" ]
[ "#import the necessary packages\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport time\nimport cv2\nimport numpy as np\nimport motorshield\nimport VL53L0X\n\n# # initialize the camera and grab a reference to the raw camera capture\ncamera = PiCamera()\ncamera.resolution = (320, 240)\ncamera.framerate = 30\nrawCapture = PiRGBArray(camera, size=(320, 240))\n\n# Create a VL53L0X object\ntof = VL53L0X.VL53L0X()\n\n# Start ranging\ntof.start_ranging(VL53L0X.VL53L0X_BETTER_ACCURACY_MODE)\n \n# # allow the camera to warmup\ntime.sleep(0.1)\n \n# capture frames from the camera\nfor frame in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=True):\n # grab the raw NumPy array representing the image, then initialize the timestamp\n # and occupied/unoccupied text\n distance = tof.get_distance()\n if (distance < 150 and distance != -1):\n # stop the motors from running\n motorcmd(1,0,N,1)\n motorcmd(2,0,S,1)\n print(\"Am breaking\")\n break\n else:\n image = frame.array\n left = frame[0:ysize, 0:int(xsize/2)]\n right = frame[0:ysize, int(xsize/2):xsize]\n\n grayright = cv2.cvtColor(right, cv2.COLOR_BGR2GRAY)\n grayleft = cv2.cvtColor(left, cv2.COLOR_BGR2GRAY)\n\n edgesl = cv2.Canny(grayleft,75,100,apertureSize = 3)\n edgesr = cv2.Canny(grayright,75,100,apertureSize = 3)\n minLineLength = 100\n maxLineGap = 10\n linesl = cv2.HoughLines(edgesl,35,np.pi/180,100,minLineLength,maxLineGap)\n linesr = cv2.HoughLines(edgesr,35,np.pi/180,100,minLineLength,maxLineGap)\n\n if linesr != None:\n for rho,theta in linesr[0]:\n\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n\n cv2.line(right,(x1,y1),(x2,y2),(0,0,255),2)\n\n sloper = (y1 - y2) / ((x2 - x1) + 0.0000001)\n\n if linesl != None:\n # print(linesl)\n for rho,theta in linesl[0]:\n\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n\n cv2.line(left,(x1,y1),(x2,y2),(0,0,255),2)\n\n slopel = (y1 - y2) / ((x2 - x1) + 0.0000001)\n\n\n if sloper > 75 and slopel > 75:\n motorcmd(1,2048,N,0)\n motorcmd(2,2048,S,0)\n\n elif sloper < -75 and slopel < -75:\n motorcmd(1,2048,N,0)\n motorcmd(2,2048,S,0)\n\n elif sloper < 75 and sloper > 50 and slopel < 75 and slopel > 50:\n motorcmd(1,2458,N,0)\n motorcmd(2,1638,S,0)\n\n elif sloper > -75 and sloper < -50 and slopel > -75 and slopel < -50:\n motorcmd(1,1638,N,0)\n motorcmd(2,2458,S,0)\n\n elif sloper < 50 and sloper > 25 and slopel < 50 and slopel > 25:\n motorcmd(1,3277,N,0)\n motorcmd(1,819,S,0)\n\n elif sloper > -50 and sloper < -25 and slopel > -50 and slopel < -25:\n motorcmd(1,819,N,0)\n motorcmd(2,3277,S,0)\n\n \n cv2.imshow('cannyl',edgesl)\n cv2.imshow('cannyr',edgesr)\n cv2.imshow('left', left)\n cv2.imshow('right', right)\n\n print(\" %s \" %(time.time() - start_time))\n\n # show the frame\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n # clear the stream in preparation for the next frame\n rawCapture.truncate(0)\n\n #if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\ncap.release()\n" ]
[ [ "numpy.sin", "numpy.cos" ] ]
NeoBert/czipline
[ "e7a5e097c419bed7816d3cd6c370b5171db37b33" ]
[ "zipline/finance/risk/cumulative.py" ]
[ "#\n# Copyright 2014 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport logbook\nimport math\nimport numpy as np\n\nfrom zipline.finance import trading\nimport zipline.utils.math_utils as zp_math\n\nimport pandas as pd\nfrom pandas.tseries.tools import normalize_date\n\nfrom six import iteritems\n\nfrom . risk import (\n alpha,\n check_entry,\n choose_treasury,\n downside_risk,\n sharpe_ratio,\n sortino_ratio,\n)\n\nlog = logbook.Logger('Risk Cumulative')\n\n\nchoose_treasury = functools.partial(choose_treasury, lambda *args: '10year',\n compound=False)\n\n\ndef information_ratio(algo_volatility, algorithm_return, benchmark_return):\n \"\"\"\n http://en.wikipedia.org/wiki/Information_ratio\n\n Args:\n algorithm_returns (np.array-like):\n All returns during algorithm lifetime.\n benchmark_returns (np.array-like):\n All benchmark returns during algo lifetime.\n\n Returns:\n float. Information ratio.\n \"\"\"\n if zp_math.tolerant_equals(algo_volatility, 0):\n return np.nan\n\n return (\n (algorithm_return - benchmark_return)\n # The square of the annualization factor is in the volatility,\n # because the volatility is also annualized,\n # i.e. the sqrt(annual factor) is in the volatility's numerator.\n # So to have the the correct annualization factor for the\n # Sharpe value's numerator, which should be the sqrt(annual factor).\n # The square of the sqrt of the annual factor, i.e. the annual factor\n # itself, is needed in the numerator to factor out the division by\n # its square root.\n / algo_volatility)\n\n\nclass RiskMetricsCumulative(object):\n \"\"\"\n :Usage:\n Instantiate RiskMetricsCumulative once.\n Call update() method on each dt to update the metrics.\n \"\"\"\n\n METRIC_NAMES = (\n 'alpha',\n 'beta',\n 'sharpe',\n 'algorithm_volatility',\n 'benchmark_volatility',\n 'downside_risk',\n 'sortino',\n 'information',\n )\n\n def __init__(self, sim_params,\n returns_frequency=None,\n create_first_day_stats=False):\n \"\"\"\n - @returns_frequency allows for configuration of the whether\n the benchmark and algorithm returns are in units of minutes or days,\n if `None` defaults to the `emission_rate` in `sim_params`.\n \"\"\"\n\n self.treasury_curves = trading.environment.treasury_curves\n self.start_date = sim_params.period_start.replace(\n hour=0, minute=0, second=0, microsecond=0\n )\n self.end_date = sim_params.period_end.replace(\n hour=0, minute=0, second=0, microsecond=0\n )\n\n self.trading_days = trading.environment.days_in_range(\n self.start_date,\n self.end_date)\n\n # Hold on to the trading day before the start,\n # used for index of the zero return value when forcing returns\n # on the first day.\n self.day_before_start = self.start_date - \\\n trading.environment.trading_days.freq\n\n last_day = normalize_date(sim_params.period_end)\n if last_day not in self.trading_days:\n last_day = pd.tseries.index.DatetimeIndex(\n [last_day]\n )\n self.trading_days = self.trading_days.append(last_day)\n\n self.sim_params = sim_params\n\n self.create_first_day_stats = create_first_day_stats\n\n if returns_frequency is None:\n returns_frequency = self.sim_params.emission_rate\n\n self.returns_frequency = returns_frequency\n\n if returns_frequency == 'daily':\n cont_index = self.get_daily_index()\n elif returns_frequency == 'minute':\n cont_index = self.get_minute_index(sim_params)\n\n self.cont_index = cont_index\n\n self.algorithm_returns_cont = pd.Series(index=cont_index)\n self.benchmark_returns_cont = pd.Series(index=cont_index)\n self.mean_returns_cont = pd.Series(index=cont_index)\n self.annualized_mean_returns_cont = pd.Series(index=cont_index)\n self.mean_benchmark_returns_cont = pd.Series(index=cont_index)\n self.annualized_mean_benchmark_returns_cont = pd.Series(\n index=cont_index)\n\n # The returns at a given time are read and reset from the respective\n # returns container.\n self.algorithm_returns = None\n self.benchmark_returns = None\n self.mean_returns = None\n self.annualized_mean_returns = None\n self.mean_benchmark_returns = None\n self.annualized_mean_benchmark_returns = None\n\n self.algorithm_cumulative_returns = pd.Series(index=cont_index)\n self.benchmark_cumulative_returns = pd.Series(index=cont_index)\n self.excess_returns = pd.Series(index=cont_index)\n\n self.latest_dt = cont_index[0]\n\n self.metrics = pd.DataFrame(index=cont_index,\n columns=self.METRIC_NAMES)\n\n self.drawdowns = pd.Series(index=cont_index)\n self.max_drawdowns = pd.Series(index=cont_index)\n self.max_drawdown = 0\n self.current_max = -np.inf\n self.daily_treasury = pd.Series(index=self.trading_days)\n self.treasury_period_return = np.nan\n\n self.num_trading_days = 0\n\n def get_minute_index(self, sim_params):\n \"\"\"\n Stitches together multiple days worth of business minutes into\n one continous index.\n \"\"\"\n trading_minutes = None\n for day in self.trading_days:\n minutes_for_day = trading.environment.market_minutes_for_day(day)\n if trading_minutes is None:\n # Create container for all minutes on first iteration\n trading_minutes = minutes_for_day\n else:\n trading_minutes = trading_minutes + minutes_for_day\n return trading_minutes\n\n def get_daily_index(self):\n return self.trading_days\n\n def update(self, dt, algorithm_returns, benchmark_returns):\n # Keep track of latest dt for use in to_dict and other methods\n # that report current state.\n self.latest_dt = dt\n\n self.algorithm_returns_cont[dt] = algorithm_returns\n self.algorithm_returns = self.algorithm_returns_cont[:dt]\n\n self.num_trading_days = len(self.algorithm_returns)\n\n if self.create_first_day_stats:\n if len(self.algorithm_returns) == 1:\n self.algorithm_returns = pd.Series(\n {self.day_before_start: 0.0}).append(\n self.algorithm_returns)\n\n self.algorithm_cumulative_returns[dt] = \\\n self.calculate_cumulative_returns(self.algorithm_returns)\n\n algo_cumulative_returns_to_date = \\\n self.algorithm_cumulative_returns[:dt]\n\n self.mean_returns_cont[dt] = \\\n algo_cumulative_returns_to_date[dt] / self.num_trading_days\n\n self.mean_returns = self.mean_returns_cont[:dt]\n\n self.annualized_mean_returns_cont[dt] = \\\n self.mean_returns_cont[dt] * 252\n\n self.annualized_mean_returns = self.annualized_mean_returns_cont[:dt]\n\n if self.create_first_day_stats:\n if len(self.mean_returns) == 1:\n self.mean_returns = pd.Series(\n {self.day_before_start: 0.0}).append(self.mean_returns)\n self.annualized_mean_returns = pd.Series(\n {self.day_before_start: 0.0}).append(\n self.annualized_mean_returns)\n\n self.benchmark_returns_cont[dt] = benchmark_returns\n self.benchmark_returns = self.benchmark_returns_cont[:dt]\n\n if self.create_first_day_stats:\n if len(self.benchmark_returns) == 1:\n self.benchmark_returns = pd.Series(\n {self.day_before_start: 0.0}).append(\n self.benchmark_returns)\n\n self.benchmark_cumulative_returns[dt] = \\\n self.calculate_cumulative_returns(self.benchmark_returns)\n\n benchmark_cumulative_returns_to_date = \\\n self.benchmark_cumulative_returns[:dt]\n\n self.mean_benchmark_returns_cont[dt] = \\\n benchmark_cumulative_returns_to_date[dt] / self.num_trading_days\n\n self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt]\n\n self.annualized_mean_benchmark_returns_cont[dt] = \\\n self.mean_benchmark_returns_cont[dt] * 252\n\n self.annualized_mean_benchmark_returns = \\\n self.annualized_mean_benchmark_returns_cont[:dt]\n\n if not self.algorithm_returns.index.equals(\n self.benchmark_returns.index\n ):\n message = \"Mismatch between benchmark_returns ({bm_count}) and \\\nalgorithm_returns ({algo_count}) in range {start} : {end} on {dt}\"\n message = message.format(\n bm_count=len(self.benchmark_returns),\n algo_count=len(self.algorithm_returns),\n start=self.start_date,\n end=self.end_date,\n dt=dt\n )\n raise Exception(message)\n\n self.update_current_max()\n self.metrics.benchmark_volatility[dt] = \\\n self.calculate_volatility(self.benchmark_returns)\n self.metrics.algorithm_volatility[dt] = \\\n self.calculate_volatility(self.algorithm_returns)\n\n # caching the treasury rates for the minutely case is a\n # big speedup, because it avoids searching the treasury\n # curves on every minute.\n # In both minutely and daily, the daily curve is always used.\n treasury_end = dt.replace(hour=0, minute=0)\n if np.isnan(self.daily_treasury[treasury_end]):\n treasury_period_return = choose_treasury(\n self.treasury_curves,\n self.start_date,\n treasury_end\n )\n self.daily_treasury[treasury_end] = treasury_period_return\n self.treasury_period_return = self.daily_treasury[treasury_end]\n self.excess_returns[self.latest_dt] = (\n self.algorithm_cumulative_returns[self.latest_dt]\n -\n self.treasury_period_return)\n self.metrics.beta[dt] = self.calculate_beta()\n self.metrics.alpha[dt] = self.calculate_alpha()\n self.metrics.sharpe[dt] = self.calculate_sharpe()\n self.metrics.downside_risk[dt] = self.calculate_downside_risk()\n self.metrics.sortino[dt] = self.calculate_sortino()\n self.metrics.information[dt] = self.calculate_information()\n self.max_drawdown = self.calculate_max_drawdown()\n self.max_drawdowns[dt] = self.max_drawdown\n\n def to_dict(self):\n \"\"\"\n Creates a dictionary representing the state of the risk report.\n Returns a dict object of the form:\n \"\"\"\n dt = self.latest_dt\n period_label = dt.strftime(\"%Y-%m\")\n rval = {\n 'trading_days': self.num_trading_days,\n 'benchmark_volatility': self.metrics.benchmark_volatility[dt],\n 'algo_volatility': self.metrics.algorithm_volatility[dt],\n 'treasury_period_return': self.treasury_period_return,\n # Though the two following keys say period return,\n # they would be more accurately called the cumulative return.\n # However, the keys need to stay the same, for now, for backwards\n # compatibility with existing consumers.\n 'algorithm_period_return': self.algorithm_cumulative_returns[dt],\n 'benchmark_period_return': self.benchmark_cumulative_returns[dt],\n 'beta': self.metrics.beta[dt],\n 'alpha': self.metrics.alpha[dt],\n 'sharpe': self.metrics.sharpe[dt],\n 'sortino': self.metrics.sortino[dt],\n 'information': self.metrics.information[dt],\n 'excess_return': self.excess_returns[dt],\n 'max_drawdown': self.max_drawdown,\n 'period_label': period_label\n }\n\n return {k: (None if check_entry(k, v) else v)\n for k, v in iteritems(rval)}\n\n def __repr__(self):\n statements = []\n for metric in self.METRIC_NAMES:\n value = getattr(self.metrics, metric)[-1]\n if isinstance(value, list):\n if len(value) == 0:\n value = np.nan\n else:\n value = value[-1]\n statements.append(\"{m}:{v}\".format(m=metric, v=value))\n\n return '\\n'.join(statements)\n\n def calculate_cumulative_returns(self, returns):\n return (1. + returns).prod() - 1\n\n def update_current_max(self):\n if len(self.algorithm_cumulative_returns) == 0:\n return\n current_cumulative_return = \\\n self.algorithm_cumulative_returns[self.latest_dt]\n if self.current_max < current_cumulative_return:\n self.current_max = current_cumulative_return\n\n def calculate_max_drawdown(self):\n if len(self.algorithm_cumulative_returns) == 0:\n return self.max_drawdown\n\n # The drawdown is defined as: (high - low) / high\n # The above factors out to: 1.0 - (low / high)\n #\n # Instead of explicitly always using the low, use the current total\n # return value, and test that against the max drawdown, which will\n # exceed the previous max_drawdown iff the current return is lower than\n # the previous low in the current drawdown window.\n cur_drawdown = 1.0 - (\n (1.0 + self.algorithm_cumulative_returns[self.latest_dt])\n /\n (1.0 + self.current_max))\n\n self.drawdowns[self.latest_dt] = cur_drawdown\n\n if self.max_drawdown < cur_drawdown:\n return cur_drawdown\n else:\n return self.max_drawdown\n\n def calculate_sharpe(self):\n \"\"\"\n http://en.wikipedia.org/wiki/Sharpe_ratio\n \"\"\"\n return sharpe_ratio(self.metrics.algorithm_volatility[self.latest_dt],\n self.annualized_mean_returns[self.latest_dt],\n self.daily_treasury[self.latest_dt.date()])\n\n def calculate_sortino(self):\n \"\"\"\n http://en.wikipedia.org/wiki/Sortino_ratio\n \"\"\"\n return sortino_ratio(self.annualized_mean_returns[self.latest_dt],\n self.daily_treasury[self.latest_dt.date()],\n self.metrics.downside_risk[self.latest_dt])\n\n def calculate_information(self):\n \"\"\"\n http://en.wikipedia.org/wiki/Information_ratio\n \"\"\"\n return information_ratio(\n self.metrics.algorithm_volatility[self.latest_dt],\n self.annualized_mean_returns[self.latest_dt],\n self.annualized_mean_benchmark_returns[self.latest_dt])\n\n def calculate_alpha(self):\n \"\"\"\n http://en.wikipedia.org/wiki/Alpha_(investment)\n \"\"\"\n return alpha(self.annualized_mean_returns[self.latest_dt],\n self.treasury_period_return,\n self.annualized_mean_benchmark_returns[self.latest_dt],\n self.metrics.beta[self.latest_dt])\n\n def calculate_volatility(self, daily_returns):\n if len(daily_returns) <= 1:\n return 0.0\n return np.std(daily_returns, ddof=1) * math.sqrt(252)\n\n def calculate_downside_risk(self):\n return downside_risk(self.algorithm_returns,\n self.mean_returns,\n 252)\n\n def calculate_beta(self):\n \"\"\"\n\n .. math::\n\n \\\\beta_a = \\\\frac{\\mathrm{Cov}(r_a,r_p)}{\\mathrm{Var}(r_p)}\n\n http://en.wikipedia.org/wiki/Beta_(finance)\n \"\"\"\n # it doesn't make much sense to calculate beta for less than two days,\n # so return none.\n if len(self.annualized_mean_returns) < 2:\n return 0.0\n\n returns_matrix = np.vstack([self.algorithm_returns,\n self.benchmark_returns])\n C = np.cov(returns_matrix, ddof=1)\n algorithm_covariance = C[0][1]\n benchmark_variance = C[1][1]\n beta = algorithm_covariance / benchmark_variance\n\n return beta\n" ]
[ [ "numpy.isnan", "numpy.cov", "pandas.tseries.index.DatetimeIndex", "pandas.DataFrame", "numpy.std", "pandas.Series", "pandas.tseries.tools.normalize_date", "numpy.vstack" ] ]
epizzigoni/pandas
[ "3b66021ecb74da2c35e16958121bd224d5de5264" ]
[ "pandas/tests/indexes/period/test_period.py" ]
[ "import numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs.period import IncompatibleFrequency\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n NaT,\n Period,\n PeriodIndex,\n Series,\n date_range,\n offsets,\n period_range,\n)\nimport pandas._testing as tm\n\nfrom ..datetimelike import DatetimeLike\n\n\nclass TestPeriodIndex(DatetimeLike):\n _holder = PeriodIndex\n\n @pytest.fixture(\n params=[\n tm.makePeriodIndex(10),\n period_range(\"20130101\", periods=10, freq=\"D\")[::-1],\n ],\n ids=[\"index_inc\", \"index_dec\"],\n )\n def indices(self, request):\n return request.param\n\n def create_index(self) -> PeriodIndex:\n return period_range(\"20130101\", periods=5, freq=\"D\")\n\n def test_pickle_compat_construction(self):\n pass\n\n @pytest.mark.parametrize(\"freq\", [\"D\", \"M\", \"A\"])\n def test_pickle_round_trip(self, freq):\n idx = PeriodIndex([\"2016-05-16\", \"NaT\", NaT, np.NaN], freq=freq)\n result = tm.round_trip_pickle(idx)\n tm.assert_index_equal(result, idx)\n\n def test_where(self):\n # This is handled in test_indexing\n pass\n\n @pytest.mark.parametrize(\"use_numpy\", [True, False])\n @pytest.mark.parametrize(\n \"index\",\n [\n period_range(\"2000-01-01\", periods=3, freq=\"D\"),\n period_range(\"2001-01-01\", periods=3, freq=\"2D\"),\n PeriodIndex([\"2001-01\", \"NaT\", \"2003-01\"], freq=\"M\"),\n ],\n )\n def test_repeat_freqstr(self, index, use_numpy):\n # GH10183\n expected = PeriodIndex([p for p in index for _ in range(3)])\n result = np.repeat(index, 3) if use_numpy else index.repeat(3)\n tm.assert_index_equal(result, expected)\n assert result.freqstr == index.freqstr\n\n def test_fillna_period(self):\n # GH 11343\n idx = PeriodIndex([\"2011-01-01 09:00\", NaT, \"2011-01-01 11:00\"], freq=\"H\")\n\n exp = PeriodIndex(\n [\"2011-01-01 09:00\", \"2011-01-01 10:00\", \"2011-01-01 11:00\"], freq=\"H\"\n )\n tm.assert_index_equal(idx.fillna(Period(\"2011-01-01 10:00\", freq=\"H\")), exp)\n\n exp = Index(\n [\n Period(\"2011-01-01 09:00\", freq=\"H\"),\n \"x\",\n Period(\"2011-01-01 11:00\", freq=\"H\"),\n ],\n dtype=object,\n )\n tm.assert_index_equal(idx.fillna(\"x\"), exp)\n\n exp = Index(\n [\n Period(\"2011-01-01 09:00\", freq=\"H\"),\n Period(\"2011-01-01\", freq=\"D\"),\n Period(\"2011-01-01 11:00\", freq=\"H\"),\n ],\n dtype=object,\n )\n tm.assert_index_equal(idx.fillna(Period(\"2011-01-01\", freq=\"D\")), exp)\n\n def test_no_millisecond_field(self):\n msg = \"type object 'DatetimeIndex' has no attribute 'millisecond'\"\n with pytest.raises(AttributeError, match=msg):\n DatetimeIndex.millisecond\n\n msg = \"'DatetimeIndex' object has no attribute 'millisecond'\"\n with pytest.raises(AttributeError, match=msg):\n DatetimeIndex([]).millisecond\n\n def test_hash_error(self):\n index = period_range(\"20010101\", periods=10)\n msg = f\"unhashable type: '{type(index).__name__}'\"\n with pytest.raises(TypeError, match=msg):\n hash(index)\n\n def test_make_time_series(self):\n index = period_range(freq=\"A\", start=\"1/1/2001\", end=\"12/1/2009\")\n series = Series(1, index=index)\n assert isinstance(series, Series)\n\n def test_shallow_copy_empty(self):\n # GH13067\n idx = PeriodIndex([], freq=\"M\")\n result = idx._shallow_copy()\n expected = idx\n\n tm.assert_index_equal(result, expected)\n\n def test_shallow_copy_disallow_i8(self):\n # GH-24391\n pi = period_range(\"2018-01-01\", periods=3, freq=\"2D\")\n with pytest.raises(AssertionError, match=\"ndarray\"):\n pi._shallow_copy(pi.asi8)\n\n def test_shallow_copy_requires_disallow_period_index(self):\n pi = period_range(\"2018-01-01\", periods=3, freq=\"2D\")\n with pytest.raises(AssertionError, match=\"PeriodIndex\"):\n pi._shallow_copy(pi)\n\n def test_view_asi8(self):\n idx = PeriodIndex([], freq=\"M\")\n\n exp = np.array([], dtype=np.int64)\n tm.assert_numpy_array_equal(idx.view(\"i8\"), exp)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n idx = PeriodIndex([\"2011-01\", NaT], freq=\"M\")\n\n exp = np.array([492, -9223372036854775808], dtype=np.int64)\n tm.assert_numpy_array_equal(idx.view(\"i8\"), exp)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n exp = np.array([14975, -9223372036854775808], dtype=np.int64)\n idx = PeriodIndex([\"2011-01-01\", NaT], freq=\"D\")\n tm.assert_numpy_array_equal(idx.view(\"i8\"), exp)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n def test_values(self):\n idx = PeriodIndex([], freq=\"M\")\n\n exp = np.array([], dtype=np.object)\n tm.assert_numpy_array_equal(idx.values, exp)\n tm.assert_numpy_array_equal(idx.to_numpy(), exp)\n\n exp = np.array([], dtype=np.int64)\n tm.assert_numpy_array_equal(idx._ndarray_values, exp)\n\n idx = PeriodIndex([\"2011-01\", NaT], freq=\"M\")\n\n exp = np.array([Period(\"2011-01\", freq=\"M\"), NaT], dtype=object)\n tm.assert_numpy_array_equal(idx.values, exp)\n tm.assert_numpy_array_equal(idx.to_numpy(), exp)\n exp = np.array([492, -9223372036854775808], dtype=np.int64)\n tm.assert_numpy_array_equal(idx._ndarray_values, exp)\n\n idx = PeriodIndex([\"2011-01-01\", NaT], freq=\"D\")\n\n exp = np.array([Period(\"2011-01-01\", freq=\"D\"), NaT], dtype=object)\n tm.assert_numpy_array_equal(idx.values, exp)\n tm.assert_numpy_array_equal(idx.to_numpy(), exp)\n exp = np.array([14975, -9223372036854775808], dtype=np.int64)\n tm.assert_numpy_array_equal(idx._ndarray_values, exp)\n\n def test_period_index_length(self):\n pi = period_range(freq=\"A\", start=\"1/1/2001\", end=\"12/1/2009\")\n assert len(pi) == 9\n\n pi = period_range(freq=\"Q\", start=\"1/1/2001\", end=\"12/1/2009\")\n assert len(pi) == 4 * 9\n\n pi = period_range(freq=\"M\", start=\"1/1/2001\", end=\"12/1/2009\")\n assert len(pi) == 12 * 9\n\n start = Period(\"02-Apr-2005\", \"B\")\n i1 = period_range(start=start, periods=20)\n assert len(i1) == 20\n assert i1.freq == start.freq\n assert i1[0] == start\n\n end_intv = Period(\"2006-12-31\", \"W\")\n i1 = period_range(end=end_intv, periods=10)\n assert len(i1) == 10\n assert i1.freq == end_intv.freq\n assert i1[-1] == end_intv\n\n end_intv = Period(\"2006-12-31\", \"1w\")\n i2 = period_range(end=end_intv, periods=10)\n assert len(i1) == len(i2)\n assert (i1 == i2).all()\n assert i1.freq == i2.freq\n\n end_intv = Period(\"2006-12-31\", (\"w\", 1))\n i2 = period_range(end=end_intv, periods=10)\n assert len(i1) == len(i2)\n assert (i1 == i2).all()\n assert i1.freq == i2.freq\n\n msg = \"start and end must have same freq\"\n with pytest.raises(ValueError, match=msg):\n period_range(start=start, end=end_intv)\n\n end_intv = Period(\"2005-05-01\", \"B\")\n i1 = period_range(start=start, end=end_intv)\n\n msg = (\n \"Of the three parameters: start, end, and periods, exactly two \"\n \"must be specified\"\n )\n with pytest.raises(ValueError, match=msg):\n period_range(start=start)\n\n # infer freq from first element\n i2 = PeriodIndex([end_intv, Period(\"2005-05-05\", \"B\")])\n assert len(i2) == 2\n assert i2[0] == end_intv\n\n i2 = PeriodIndex(np.array([end_intv, Period(\"2005-05-05\", \"B\")]))\n assert len(i2) == 2\n assert i2[0] == end_intv\n\n # Mixed freq should fail\n vals = [end_intv, Period(\"2006-12-31\", \"w\")]\n msg = r\"Input has different freq=W-SUN from PeriodIndex\\(freq=B\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n PeriodIndex(vals)\n vals = np.array(vals)\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(vals)\n\n def test_fields(self):\n # year, month, day, hour, minute\n # second, weekofyear, week, dayofweek, weekday, dayofyear, quarter\n # qyear\n pi = period_range(freq=\"A\", start=\"1/1/2001\", end=\"12/1/2005\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"Q\", start=\"1/1/2001\", end=\"12/1/2002\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"M\", start=\"1/1/2001\", end=\"1/1/2002\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"D\", start=\"12/1/2001\", end=\"6/1/2001\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"B\", start=\"12/1/2001\", end=\"6/1/2001\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"H\", start=\"12/31/2001\", end=\"1/1/2002 23:00\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"Min\", start=\"12/31/2001\", end=\"1/1/2002 00:20\")\n self._check_all_fields(pi)\n\n pi = period_range(\n freq=\"S\", start=\"12/31/2001 00:00:00\", end=\"12/31/2001 00:05:00\"\n )\n self._check_all_fields(pi)\n\n end_intv = Period(\"2006-12-31\", \"W\")\n i1 = period_range(end=end_intv, periods=10)\n self._check_all_fields(i1)\n\n def _check_all_fields(self, periodindex):\n fields = [\n \"year\",\n \"month\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"weekofyear\",\n \"week\",\n \"dayofweek\",\n \"dayofyear\",\n \"quarter\",\n \"qyear\",\n \"days_in_month\",\n ]\n\n periods = list(periodindex)\n s = pd.Series(periodindex)\n\n for field in fields:\n field_idx = getattr(periodindex, field)\n assert len(periodindex) == len(field_idx)\n for x, val in zip(periods, field_idx):\n assert getattr(x, field) == val\n\n if len(s) == 0:\n continue\n\n field_s = getattr(s.dt, field)\n assert len(periodindex) == len(field_s)\n for x, val in zip(periods, field_s):\n assert getattr(x, field) == val\n\n def test_period_set_index_reindex(self):\n # GH 6631\n df = DataFrame(np.random.random(6))\n idx1 = period_range(\"2011/01/01\", periods=6, freq=\"M\")\n idx2 = period_range(\"2013\", periods=6, freq=\"A\")\n\n df = df.set_index(idx1)\n tm.assert_index_equal(df.index, idx1)\n df = df.set_index(idx2)\n tm.assert_index_equal(df.index, idx2)\n\n @pytest.mark.parametrize(\n \"p_values, o_values, values, expected_values\",\n [\n (\n [Period(\"2019Q1\", \"Q-DEC\"), Period(\"2019Q2\", \"Q-DEC\")],\n [Period(\"2019Q1\", \"Q-DEC\"), Period(\"2019Q2\", \"Q-DEC\"), \"All\"],\n [1.0, 1.0],\n [1.0, 1.0, np.nan],\n ),\n (\n [Period(\"2019Q1\", \"Q-DEC\"), Period(\"2019Q2\", \"Q-DEC\")],\n [Period(\"2019Q1\", \"Q-DEC\"), Period(\"2019Q2\", \"Q-DEC\")],\n [1.0, 1.0],\n [1.0, 1.0],\n ),\n ],\n )\n def test_period_reindex_with_object(\n self, p_values, o_values, values, expected_values\n ):\n # GH 28337\n period_index = PeriodIndex(p_values)\n object_index = Index(o_values)\n\n s = pd.Series(values, index=period_index)\n result = s.reindex(object_index)\n expected = pd.Series(expected_values, index=object_index)\n tm.assert_series_equal(result, expected)\n\n def test_factorize(self):\n idx1 = PeriodIndex(\n [\"2014-01\", \"2014-01\", \"2014-02\", \"2014-02\", \"2014-03\", \"2014-03\"], freq=\"M\"\n )\n\n exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)\n exp_idx = PeriodIndex([\"2014-01\", \"2014-02\", \"2014-03\"], freq=\"M\")\n\n arr, idx = idx1.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n arr, idx = idx1.factorize(sort=True)\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n idx2 = PeriodIndex(\n [\"2014-03\", \"2014-03\", \"2014-02\", \"2014-01\", \"2014-03\", \"2014-01\"], freq=\"M\"\n )\n\n exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)\n arr, idx = idx2.factorize(sort=True)\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)\n exp_idx = PeriodIndex([\"2014-03\", \"2014-02\", \"2014-01\"], freq=\"M\")\n arr, idx = idx2.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n def test_is_(self):\n create_index = lambda: period_range(freq=\"A\", start=\"1/1/2001\", end=\"12/1/2009\")\n index = create_index()\n assert index.is_(index)\n assert not index.is_(create_index())\n assert index.is_(index.view())\n assert index.is_(index.view().view().view().view().view())\n assert index.view().is_(index)\n ind2 = index.view()\n index.name = \"Apple\"\n assert ind2.is_(index)\n assert not index.is_(index[:])\n assert not index.is_(index.asfreq(\"M\"))\n assert not index.is_(index.asfreq(\"A\"))\n\n assert not index.is_(index - 2)\n assert not index.is_(index - 0)\n\n def test_contains(self):\n rng = period_range(\"2007-01\", freq=\"M\", periods=10)\n\n assert Period(\"2007-01\", freq=\"M\") in rng\n assert not Period(\"2007-01\", freq=\"D\") in rng\n assert not Period(\"2007-01\", freq=\"2M\") in rng\n\n def test_contains_nat(self):\n # see gh-13582\n idx = period_range(\"2007-01\", freq=\"M\", periods=10)\n assert NaT not in idx\n assert None not in idx\n assert float(\"nan\") not in idx\n assert np.nan not in idx\n\n idx = PeriodIndex([\"2011-01\", \"NaT\", \"2011-02\"], freq=\"M\")\n assert NaT in idx\n assert None in idx\n assert float(\"nan\") in idx\n assert np.nan in idx\n\n def test_periods_number_check(self):\n msg = (\n \"Of the three parameters: start, end, and periods, exactly two \"\n \"must be specified\"\n )\n with pytest.raises(ValueError, match=msg):\n period_range(\"2011-1-1\", \"2012-1-1\", \"B\")\n\n def test_index_duplicate_periods(self):\n # monotonic\n idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq=\"A-JUN\")\n ts = Series(np.random.randn(len(idx)), index=idx)\n\n result = ts[\"2007\"]\n expected = ts[1:3]\n tm.assert_series_equal(result, expected)\n result[:] = 1\n assert (ts[1:3] == 1).all()\n\n # not monotonic\n idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq=\"A-JUN\")\n ts = Series(np.random.randn(len(idx)), index=idx)\n\n result = ts[\"2007\"]\n expected = ts[idx == \"2007\"]\n tm.assert_series_equal(result, expected)\n\n def test_index_unique(self):\n idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq=\"A-JUN\")\n expected = PeriodIndex([2000, 2007, 2009], freq=\"A-JUN\")\n tm.assert_index_equal(idx.unique(), expected)\n assert idx.nunique() == 3\n\n idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq=\"A-JUN\", tz=\"US/Eastern\")\n expected = PeriodIndex([2000, 2007, 2009], freq=\"A-JUN\", tz=\"US/Eastern\")\n tm.assert_index_equal(idx.unique(), expected)\n assert idx.nunique() == 3\n\n def test_shift(self):\n # This is tested in test_arithmetic\n pass\n\n @td.skip_if_32bit\n def test_ndarray_compat_properties(self):\n super().test_ndarray_compat_properties()\n\n def test_negative_ordinals(self):\n Period(ordinal=-1000, freq=\"A\")\n Period(ordinal=0, freq=\"A\")\n\n idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq=\"A\")\n idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq=\"A\")\n tm.assert_index_equal(idx1, idx2)\n\n def test_pindex_fieldaccessor_nat(self):\n idx = PeriodIndex(\n [\"2011-01\", \"2011-02\", \"NaT\", \"2012-03\", \"2012-04\"], freq=\"D\", name=\"name\"\n )\n\n exp = Index([2011, 2011, -1, 2012, 2012], dtype=np.int64, name=\"name\")\n tm.assert_index_equal(idx.year, exp)\n exp = Index([1, 2, -1, 3, 4], dtype=np.int64, name=\"name\")\n tm.assert_index_equal(idx.month, exp)\n\n def test_pindex_qaccess(self):\n pi = PeriodIndex([\"2Q05\", \"3Q05\", \"4Q05\", \"1Q06\", \"2Q06\"], freq=\"Q\")\n s = Series(np.random.rand(len(pi)), index=pi).cumsum()\n # Todo: fix these accessors!\n assert s[\"05Q4\"] == s[2]\n\n def test_pindex_multiples(self):\n expected = PeriodIndex(\n [\"2011-01\", \"2011-03\", \"2011-05\", \"2011-07\", \"2011-09\", \"2011-11\"],\n freq=\"2M\",\n )\n\n pi = period_range(start=\"1/1/11\", end=\"12/31/11\", freq=\"2M\")\n tm.assert_index_equal(pi, expected)\n assert pi.freq == offsets.MonthEnd(2)\n assert pi.freqstr == \"2M\"\n\n pi = period_range(start=\"1/1/11\", periods=6, freq=\"2M\")\n tm.assert_index_equal(pi, expected)\n assert pi.freq == offsets.MonthEnd(2)\n assert pi.freqstr == \"2M\"\n\n def test_iteration(self):\n index = period_range(start=\"1/1/10\", periods=4, freq=\"B\")\n\n result = list(index)\n assert isinstance(result[0], Period)\n assert result[0].freq == index.freq\n\n def test_is_full(self):\n index = PeriodIndex([2005, 2007, 2009], freq=\"A\")\n assert not index.is_full\n\n index = PeriodIndex([2005, 2006, 2007], freq=\"A\")\n assert index.is_full\n\n index = PeriodIndex([2005, 2005, 2007], freq=\"A\")\n assert not index.is_full\n\n index = PeriodIndex([2005, 2005, 2006], freq=\"A\")\n assert index.is_full\n\n index = PeriodIndex([2006, 2005, 2005], freq=\"A\")\n with pytest.raises(ValueError, match=\"Index is not monotonic\"):\n index.is_full\n\n assert index[:0].is_full\n\n def test_with_multi_index(self):\n # #1705\n index = date_range(\"1/1/2012\", periods=4, freq=\"12H\")\n index_as_arrays = [index.to_period(freq=\"D\"), index.hour]\n\n s = Series([0, 1, 2, 3], index_as_arrays)\n\n assert isinstance(s.index.levels[0], PeriodIndex)\n\n assert isinstance(s.index.values[0][0], Period)\n\n def test_convert_array_of_periods(self):\n rng = period_range(\"1/1/2000\", periods=20, freq=\"D\")\n periods = list(rng)\n\n result = Index(periods)\n assert isinstance(result, PeriodIndex)\n\n def test_append_concat(self):\n # #1815\n d1 = date_range(\"12/31/1990\", \"12/31/1999\", freq=\"A-DEC\")\n d2 = date_range(\"12/31/2000\", \"12/31/2009\", freq=\"A-DEC\")\n\n s1 = Series(np.random.randn(10), d1)\n s2 = Series(np.random.randn(10), d2)\n\n s1 = s1.to_period()\n s2 = s2.to_period()\n\n # drops index\n result = pd.concat([s1, s2])\n assert isinstance(result.index, PeriodIndex)\n assert result.index[0] == s1.index[0]\n\n def test_pickle_freq(self):\n # GH2891\n prng = period_range(\"1/1/2011\", \"1/1/2012\", freq=\"M\")\n new_prng = tm.round_trip_pickle(prng)\n assert new_prng.freq == offsets.MonthEnd()\n assert new_prng.freqstr == \"M\"\n\n def test_map(self):\n # test_map_dictlike generally tests\n\n index = PeriodIndex([2005, 2007, 2009], freq=\"A\")\n result = index.map(lambda x: x.ordinal)\n exp = Index([x.ordinal for x in index])\n tm.assert_index_equal(result, exp)\n\n def test_insert(self):\n # GH 18295 (test missing)\n expected = PeriodIndex([\"2017Q1\", NaT, \"2017Q2\", \"2017Q3\", \"2017Q4\"], freq=\"Q\")\n for na in (np.nan, NaT, None):\n result = period_range(\"2017Q1\", periods=4, freq=\"Q\").insert(1, na)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"msg, key\",\n [\n (r\"Period\\('2019', 'A-DEC'\\), 'foo', 'bar'\", (Period(2019), \"foo\", \"bar\")),\n (r\"Period\\('2019', 'A-DEC'\\), 'y1', 'bar'\", (Period(2019), \"y1\", \"bar\")),\n (r\"Period\\('2019', 'A-DEC'\\), 'foo', 'z1'\", (Period(2019), \"foo\", \"z1\")),\n (\n r\"Period\\('2018', 'A-DEC'\\), Period\\('2016', 'A-DEC'\\), 'bar'\",\n (Period(2018), Period(2016), \"bar\"),\n ),\n (r\"Period\\('2018', 'A-DEC'\\), 'foo', 'y1'\", (Period(2018), \"foo\", \"y1\")),\n (\n r\"Period\\('2017', 'A-DEC'\\), 'foo', Period\\('2015', 'A-DEC'\\)\",\n (Period(2017), \"foo\", Period(2015)),\n ),\n (r\"Period\\('2017', 'A-DEC'\\), 'z1', 'bar'\", (Period(2017), \"z1\", \"bar\")),\n ],\n )\n def test_contains_raise_error_if_period_index_is_in_multi_index(self, msg, key):\n # issue 20684\n \"\"\"\n parse_time_string return parameter if type not matched.\n PeriodIndex.get_loc takes returned value from parse_time_string as a tuple.\n If first argument is Period and a tuple has 3 items,\n process go on not raise exception\n \"\"\"\n df = DataFrame(\n {\n \"A\": [Period(2019), \"x1\", \"x2\"],\n \"B\": [Period(2018), Period(2016), \"y1\"],\n \"C\": [Period(2017), \"z1\", Period(2015)],\n \"V1\": [1, 2, 3],\n \"V2\": [10, 20, 30],\n }\n ).set_index([\"A\", \"B\", \"C\"])\n with pytest.raises(KeyError, match=msg):\n df.loc[key]\n\n\ndef test_maybe_convert_timedelta():\n pi = PeriodIndex([\"2000\", \"2001\"], freq=\"D\")\n offset = offsets.Day(2)\n assert pi._maybe_convert_timedelta(offset) == 2\n assert pi._maybe_convert_timedelta(2) == 2\n\n offset = offsets.BusinessDay()\n msg = r\"Input has different freq=B from PeriodIndex\\(freq=D\\)\"\n with pytest.raises(ValueError, match=msg):\n pi._maybe_convert_timedelta(offset)\n\n\ndef test_is_monotonic_with_nat():\n # GH#31437\n # PeriodIndex.is_monotonic should behave analogously to DatetimeIndex,\n # in particular never be monotonic when we have NaT\n dti = date_range(\"2016-01-01\", periods=3)\n pi = dti.to_period(\"D\")\n tdi = Index(dti.view(\"timedelta64[ns]\"))\n\n for obj in [pi, pi._engine, dti, dti._engine, tdi, tdi._engine]:\n if isinstance(obj, Index):\n # i.e. not Engines\n assert obj.is_monotonic\n assert obj.is_monotonic_increasing\n assert not obj.is_monotonic_decreasing\n assert obj.is_unique\n\n dti1 = dti.insert(0, NaT)\n pi1 = dti1.to_period(\"D\")\n tdi1 = Index(dti1.view(\"timedelta64[ns]\"))\n\n for obj in [pi1, pi1._engine, dti1, dti1._engine, tdi1, tdi1._engine]:\n if isinstance(obj, Index):\n # i.e. not Engines\n assert not obj.is_monotonic\n assert not obj.is_monotonic_increasing\n assert not obj.is_monotonic_decreasing\n assert obj.is_unique\n\n dti2 = dti.insert(3, NaT)\n pi2 = dti2.to_period(\"H\")\n tdi2 = Index(dti2.view(\"timedelta64[ns]\"))\n\n for obj in [pi2, pi2._engine, dti2, dti2._engine, tdi2, tdi2._engine]:\n if isinstance(obj, Index):\n # i.e. not Engines\n assert not obj.is_monotonic\n assert not obj.is_monotonic_increasing\n assert not obj.is_monotonic_decreasing\n assert obj.is_unique\n\n\[email protected](\"array\", [True, False])\ndef test_dunder_array(array):\n obj = PeriodIndex([\"2000-01-01\", \"2001-01-01\"], freq=\"D\")\n if array:\n obj = obj._data\n\n expected = np.array([obj[0], obj[1]], dtype=object)\n result = np.array(obj)\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.asarray(obj)\n tm.assert_numpy_array_equal(result, expected)\n\n expected = obj.asi8\n for dtype in [\"i8\", \"int64\", np.int64]:\n result = np.array(obj, dtype=dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.asarray(obj, dtype=dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n for dtype in [\"float64\", \"int32\", \"uint64\"]:\n msg = \"argument must be\"\n with pytest.raises(TypeError, match=msg):\n np.array(obj, dtype=dtype)\n with pytest.raises(TypeError, match=msg):\n np.array(obj, dtype=getattr(np, dtype))\n" ]
[ [ "pandas.DatetimeIndex", "pandas.offsets.Day", "pandas.concat", "numpy.random.random", "pandas._testing.assert_series_equal", "pandas.period_range", "pandas._testing.round_trip_pickle", "pandas.PeriodIndex", "pandas.offsets.BusinessDay", "pandas.Period", "numpy.array", "pandas._testing.makePeriodIndex", "numpy.random.randn", "pandas._testing.assert_index_equal", "pandas.offsets.MonthEnd", "pandas.Index", "numpy.asarray", "pandas.date_range", "pandas._testing.assert_numpy_array_equal", "pandas.Series", "numpy.repeat" ] ]
zilnuken/python
[ "b989f1f10b8c56b5e8a900245e6e9090a0f702e0" ]
[ "examples/command-line/pilimage.py" ]
[ "from PIL import Image # pip install Pillow\nim = Image.open('qr.jpg')\n# im.show()\n\nimport numpy\nimport dbr\nimport cv2\n\ndbr.initLicense('LICENSE-LEY')\n\nopencvImage = cv2.cvtColor(numpy.array(im), cv2.COLOR_RGB2BGR)\n\nresults = dbr.decodeBuffer(opencvImage, 0x3FF | 0x2000000 | 0x4000000 | 0x8000000 | 0x10000000) # 1D, PDF417, QRCODE, DataMatrix, Aztec Code\n\nfor result in results:\n\n print('barcode format: ' + result[0])\n print('barcode value: ' + result[1])\n" ]
[ [ "numpy.array" ] ]
pzheng2018/MutualGuide
[ "e0ea62abf128925836e4337a7fef400b135b7cbe" ]
[ "utils/box/prior_box.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport torch\nimport math\nfrom math import sqrt as sqrt\nfrom itertools import product as product\n\n\ndef PriorBox(base_anchor, size, base_size):\n \"\"\"Predefined anchor boxes\"\"\"\n \n if base_size == 320:\n repeat = 4\n elif base_size == 512:\n repeat = 5\n else:\n raise ValueError('Error: Sorry size {} is not supported!'.format(base_size))\n \n feature_map = [math.ceil(size / 2 ** (3 + i)) for i in range(repeat)]\n\n output = []\n for (k, (f_h, f_w)) in enumerate(zip(feature_map, feature_map)):\n for (i, j) in product(range(f_h), range(f_w)):\n \n cy = (i + 0.5) / f_h\n cx = (j + 0.5) / f_w\n\n anchor = base_anchor * 2 ** k / size\n output += [cx, cy, anchor, anchor]\n output += [cx, cy, anchor * sqrt(2), anchor / sqrt(2)]\n output += [cx, cy, anchor / sqrt(2), anchor * sqrt(2)]\n \n anchor *= sqrt(2)\n output += [cx, cy, anchor, anchor]\n output += [cx, cy, anchor * sqrt(2), anchor / sqrt(2)]\n output += [cx, cy, anchor / sqrt(2), anchor * sqrt(2)]\n\n output = torch.Tensor(output).view(-1, 4)\n output.clamp_(max=1, min=0)\n return output\n" ]
[ [ "torch.Tensor" ] ]
KingStorm/AutoEq
[ "832675eb960b5ea5e7746decf57adfc08da9c24d" ]
[ "frequency_response.py" ]
[ "# -*- coding: utf-8 -*_\n\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport argparse\nimport math\nimport pandas as pd\nfrom io import StringIO\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nfrom scipy.signal import savgol_filter, find_peaks, minimum_phase, firwin2\nfrom scipy.special import expit\nimport soundfile as sf\nimport numpy as np\nfrom glob import glob\nimport urllib\nfrom time import time\nfrom tabulate import tabulate\nfrom PIL import Image\nimport re\nimport biquad\nimport warnings\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))\n\nDEFAULT_F_MIN = 20\nDEFAULT_F_MAX = 20000\nDEFAULT_STEP = 1.01\n\nDEFAULT_MAX_GAIN = 6.0\nDEFAULT_TREBLE_F_LOWER = 6000.0\nDEFAULT_TREBLE_F_UPPER = 8000.0\nDEFAULT_TREBLE_MAX_GAIN = 0.0\nDEFAULT_TREBLE_GAIN_K = 1.0\n\nDEFAULT_SMOOTHING_WINDOW_SIZE = 1 / 6\nDEFAULT_SMOOTHING_ITERATIONS = 1\nDEFAULT_TREBLE_SMOOTHING_F_LOWER = 100.0\nDEFAULT_TREBLE_SMOOTHING_F_UPPER = 10000.0\nDEFAULT_TREBLE_SMOOTHING_WINDOW_SIZE = 1 / 3\nDEFAULT_TREBLE_SMOOTHING_ITERATIONS = 1\nDEFAULT_TILT = 0.0\nDEFAULT_FS = 44100\nDEFAULT_BIT_DEPTH = 16\nDEFAULT_PHASE = 'minimum'\nDEFAULT_F_RES = 10\n\nDEFAULT_OE_BASS_BOOST_F_LOWER = 35\nDEFAULT_OE_BASS_BOOST_F_UPPER = 280\nDEFAULT_IE_BASS_BOOST_F_LOWER = 25\nDEFAULT_IE_BASS_BOOST_F_UPPER = 350\n\nGRAPHIC_EQ_STEP = 1.1\n\n\nclass FrequencyResponse:\n def __init__(self,\n name=None,\n frequency=None,\n raw=None,\n error=None,\n smoothed=None,\n error_smoothed=None,\n equalization=None,\n parametric_eq=None,\n fixed_band_eq=None,\n equalized_raw=None,\n equalized_smoothed=None,\n target=None):\n self.name = name.strip()\n\n self.frequency = self._init_data(frequency)\n if not len(self.frequency):\n self.frequency = self.generate_frequencies()\n\n self.raw = self._init_data(raw)\n self.smoothed = self._init_data(smoothed)\n self.error = self._init_data(error)\n self.error_smoothed = self._init_data(error_smoothed)\n self.equalization = self._init_data(equalization)\n self.parametric_eq = self._init_data(parametric_eq)\n self.fixed_band_eq = self._init_data(fixed_band_eq)\n self.equalized_raw = self._init_data(equalized_raw)\n self.equalized_smoothed = self._init_data(equalized_smoothed)\n self.target = self._init_data(target)\n self._sort()\n\n def copy(self, name=None):\n return FrequencyResponse(\n name=self.name + '_copy' if name is None else name,\n frequency=self._init_data(self.frequency),\n raw=self._init_data(self.raw),\n error=self._init_data(self.error),\n smoothed=self._init_data(self.smoothed),\n error_smoothed=self._init_data(self.error_smoothed),\n equalization=self._init_data(self.equalization),\n parametric_eq=self._init_data(self.parametric_eq),\n fixed_band_eq=self._init_data(self.fixed_band_eq),\n equalized_raw=self._init_data(self.equalized_raw),\n equalized_smoothed=self._init_data(self.equalized_smoothed),\n target=self._init_data(self.target)\n )\n\n @staticmethod\n def _init_data(data):\n \"\"\"Initializes data to a clean format. If None is passed and empty array is created. Non-numbers are removed.\"\"\"\n data = data if data is not None else []\n data = [None if x is None or math.isnan(x) else x for x in data]\n data = np.array(data)\n return data\n\n def _sort(self):\n sorted_inds = self.frequency.argsort()\n self.frequency = self.frequency[sorted_inds]\n for i in range(1, len(self.frequency)):\n if self.frequency[i] == self.frequency[i-1]:\n raise ValueError('Duplicate values found at frequency {}. Remove duplicates manually.'.format(\n self.frequency[i])\n )\n if len(self.raw):\n self.raw = self.raw[sorted_inds]\n if len(self.error):\n self.error = self.error[sorted_inds]\n if len(self.smoothed):\n self.smoothed = self.smoothed[sorted_inds]\n if len(self.error_smoothed):\n self.error_smoothed = self.error_smoothed[sorted_inds]\n if len(self.equalization):\n self.equalization = self.equalization[sorted_inds]\n if len(self.parametric_eq):\n self.parametric_eq = self.parametric_eq[sorted_inds]\n if len(self.fixed_band_eq):\n self.fixed_band_eq = self.fixed_band_eq[sorted_inds]\n if len(self.equalized_raw):\n self.equalized_raw = self.equalized_raw[sorted_inds]\n if len(self.equalized_smoothed):\n self.equalized_smoothed = self.equalized_smoothed[sorted_inds]\n if len(self.target):\n self.target = self.target[sorted_inds]\n\n def reset(self,\n raw=False,\n smoothed=True,\n error=True,\n error_smoothed=True,\n equalization=True,\n fixed_band_eq=True,\n parametric_eq=True,\n equalized_raw=True,\n equalized_smoothed=True,\n target=True):\n \"\"\"Resets data.\"\"\"\n if raw:\n self.raw = self._init_data(None)\n if smoothed:\n self.smoothed = self._init_data(None)\n if error:\n self.error = self._init_data(None)\n if error_smoothed:\n self.error_smoothed = self._init_data(None)\n if equalization:\n self.equalization = self._init_data(None)\n if parametric_eq:\n self.parametric_eq = self._init_data(None)\n if fixed_band_eq:\n self.fixed_band_eq = self._init_data(None)\n if equalized_raw:\n self.equalized_raw = self._init_data(None)\n if equalized_smoothed:\n self.equalized_smoothed = self._init_data(None)\n if target:\n self.target = self._init_data(None)\n\n @classmethod\n def read_from_csv(cls, file_path):\n \"\"\"Reads data from CSV file and constructs class instance.\"\"\"\n name = os.path.split(file_path)[-1].split('.')[0]\n\n # Read file\n f = open(file_path, 'r')\n s = f.read()\n\n # Regex for AutoEq style CSV\n header_pattern = r'frequency(,(raw|smoothed|error|error_smoothed|equalization|parametric_eq|equalized_raw|equalized_smoothed|target))+'\n float_pattern = r'-?\\d+\\.?\\d+'\n data_2_pattern = r'{fl}[ ,;:\\t]+{fl}?'.format(fl=float_pattern)\n data_n_pattern = r'{fl}([ ,;:\\t]+{fl})+?'.format(fl=float_pattern)\n autoeq_pattern = r'^{header}(\\n{data})+\\n*$'.format(header=header_pattern, data=data_n_pattern)\n\n if re.match(autoeq_pattern, s):\n # Known AutoEq CSV format\n df = pd.read_csv(StringIO(s), sep=',', header=0)\n frequency = list(df['frequency'])\n raw = list(df['raw']) if 'raw' in df else None\n smoothed = list(df['smoothed']) if 'smoothed' in df else None\n error = list(df['error']) if 'error' in df else None\n error_smoothed = list(df['error_smoothed']) if 'error_smoothed' in df else None\n equalization = list(df['equalization']) if 'equalization' in df else None\n parametric_eq = list(df['parametric_eq']) if 'parametric_eq' in df else None\n equalized_raw = list(df['equalized_raw']) if 'equalized_raw' in df else None\n equalized_smoothed = list(df['equalized_smoothed']) if 'equalized_smoothed' in df else None\n target = list(df['target']) if 'target' in df else None\n return cls(\n name=name,\n frequency=frequency,\n raw=raw,\n smoothed=smoothed,\n error=error,\n error_smoothed=error_smoothed,\n equalization=equalization,\n parametric_eq=parametric_eq,\n equalized_raw=equalized_raw,\n equalized_smoothed=equalized_smoothed,\n target=target\n )\n else:\n # Unknown format, try to guess\n lines = s.split('\\n')\n frequency = []\n raw = []\n for line in lines:\n if re.match(data_2_pattern, line): # float separator float\n floats = re.findall(float_pattern, line)\n frequency.append(float(floats[0])) # Assume first to be frequency\n raw.append(float(floats[1])) # Assume second to be raw\n # Discard all lines which don't match data pattern\n return cls(name=name, frequency=frequency, raw=raw)\n \n def to_dict(self):\n d = dict()\n if len(self.frequency):\n d['frequency'] = self.frequency.tolist()\n if len(self.raw):\n d['raw'] = [x if x is not None else 'NaN' for x in self.raw]\n if len(self.error):\n d['error'] = [x if x is not None else 'NaN' for x in self.error]\n if len(self.smoothed):\n d['smoothed'] = [x if x is not None else 'NaN' for x in self.smoothed]\n if len(self.error_smoothed):\n d['error_smoothed'] = [x if x is not None else 'NaN' for x in self.error_smoothed]\n if len(self.equalization):\n d['equalization'] = [x if x is not None else 'NaN' for x in self.equalization]\n if len(self.parametric_eq):\n d['parametric_eq'] = [x if x is not None else 'NaN' for x in self.parametric_eq]\n if len(self.fixed_band_eq):\n d['parametric_eq'] = [x if x is not None else 'NaN' for x in self.fixed_band_eq]\n if len(self.equalized_raw):\n d['equalized_raw'] = [x if x is not None else 'NaN' for x in self.equalized_raw]\n if len(self.equalized_smoothed):\n d['equalized_smoothed'] = [x if x is not None else 'NaN' for x in self.equalized_smoothed]\n if len(self.target):\n d['target'] = [x if x is not None else 'NaN' for x in self.target]\n return d\n\n def write_to_csv(self, file_path=None):\n \"\"\"Writes data to files as CSV.\"\"\"\n file_path = os.path.abspath(file_path)\n df = pd.DataFrame(self.to_dict())\n df.to_csv(file_path, header=True, index=False, float_format='%.2f')\n\n def write_eqapo_graphic_eq(self, file_path, normalize=True):\n \"\"\"Writes equalization graph to a file as Equalizer APO config.\"\"\"\n file_path = os.path.abspath(file_path)\n\n fr = FrequencyResponse(name='hack', frequency=self.frequency, raw=self.equalization)\n fr.interpolate(f_min=DEFAULT_F_MIN, f_max=DEFAULT_F_MAX, f_step=GRAPHIC_EQ_STEP)\n if normalize:\n fr.raw -= np.max(fr.raw) + 0.5\n if fr.raw[0] > 0.0:\n # Prevent bass boost below lowest frequency\n fr.raw[0] = 0.0\n\n # Remove leading zeros\n while np.abs(fr.raw[-1]) < 0.1 and np.abs(fr.raw[-2]) < 0.1: # Last two are zeros\n fr.raw = fr.raw[:-1]\n\n with open(file_path, 'w') as f:\n s = '; '.join(['{f} {a:.1f}'.format(f=f, a=a) for f, a in zip(fr.frequency, fr.raw)])\n s = 'GraphicEQ: ' + s\n f.write(s)\n return s\n\n @staticmethod\n def optimize_biquad_filters(frequency, target, max_time=5, max_filters=None, fs=DEFAULT_FS, fc=None, q=None):\n import tensorflow.compat.v1 as tf\n tf.get_logger().setLevel('ERROR')\n tf.disable_v2_behavior()\n\n if fc is not None or q is not None:\n if fc is None:\n raise TypeError('\"fc\" must be given if \"q\" is given.')\n if q is None:\n raise TypeError('\"q\" must be give nif \"fc\" is given.')\n if max_filters is not None:\n raise TypeError('\"max_filters\" must not be given when \"fc\" and \"q\" are given.')\n fc = np.array(fc, dtype='float32')\n q = np.array(q, dtype='float32')\n\n parametric = fc is None\n\n # Reset graph to be able to run this again\n tf.reset_default_graph()\n # Sampling frequency\n fs_tf = tf.constant(fs, name='f', dtype='float32')\n\n # Smoothen heavily\n fr_target = FrequencyResponse(name='Filter Initialization', frequency=frequency, raw=target)\n fr_target.smoothen_fractional_octave(window_size=1 / 7, iterations=1000)\n\n # Equalization target\n eq_target = tf.constant(target, name='eq_target', dtype='float32')\n\n n_ls = n_hs = 0\n\n if parametric:\n # Fc and Q not given, parametric equalizer, find initial estimation of peaks and gains\n fr_target_pos = np.clip(fr_target.smoothed, a_min=0.0, a_max=None)\n peak_inds = find_peaks(fr_target_pos)[0]\n fr_target_neg = np.clip(-fr_target.smoothed, a_min=0.0, a_max=None)\n peak_inds = np.concatenate((peak_inds, find_peaks(fr_target_neg)[0]))\n peak_inds.sort()\n peak_inds = peak_inds[np.abs(fr_target.smoothed[peak_inds]) > 0.1]\n\n # Peak center frequencies and gains\n peak_fc = frequency[peak_inds].astype('float32')\n\n if peak_fc[0] > 80:\n # First peak is beyond 80Hz, add peaks to 20Hz and 60Hz\n peak_fc = np.concatenate((np.array([20, 60], dtype='float32'), peak_fc))\n elif peak_fc[0] > 40:\n # First peak is beyond 40Hz, add peak to 20Hz\n peak_fc = np.concatenate((np.array([20], dtype='float32'), peak_fc))\n\n # Gains at peak center frequencies\n interpolator = InterpolatedUnivariateSpline(np.log10(frequency), fr_target.smoothed, k=1)\n peak_g = interpolator(np.log10(peak_fc)).astype('float32')\n\n def remove_small_filters(min_gain):\n # Remove peaks with too little gain\n nonlocal peak_fc, peak_g\n peak_fc = peak_fc[np.abs(peak_g) > min_gain]\n peak_g = peak_g[np.abs(peak_g) > min_gain]\n\n def merge_filters():\n # Merge two filters which have small integral between them\n nonlocal peak_fc, peak_g\n # Form filter pairs, select only filters with equal gain sign\n pair_inds = []\n for j in range(len(peak_fc) - 1):\n if np.sign(peak_g[j]) == np.sign(peak_g[j + 1]):\n pair_inds.append(j)\n\n min_err = None\n min_err_ind = None\n for pair_ind in pair_inds:\n # Interpolate between the two points\n f_0 = peak_fc[pair_ind]\n g_0 = peak_g[pair_ind]\n i_0 = np.where(frequency == f_0)[0][0]\n f_1 = peak_fc[pair_ind + 1]\n i_1 = np.where(frequency == f_1)[0][0]\n g_1 = peak_g[pair_ind]\n interp = InterpolatedUnivariateSpline(np.log10([f_0, f_1]), [g_0, g_1], k=1)\n line = interp(frequency[i_0:i_1 + 1])\n err = line - fr_target.smoothed[i_0:i_1 + 1]\n err = np.sqrt(np.mean(np.square(err))) # Root mean squared error\n if min_err is None or err < min_err:\n min_err = err\n min_err_ind = pair_ind\n\n if min_err is None:\n # No pairs detected\n return False\n\n # Select smallest error if err < threshold\n if min_err < 0.3:\n # New filter\n c = peak_fc[min_err_ind] * np.sqrt(peak_fc[min_err_ind + 1] / peak_fc[min_err_ind])\n c = frequency[np.argmin(np.abs(frequency - c))]\n g = np.mean([peak_g[min_err_ind], peak_g[min_err_ind + 1]])\n # Remove filters\n peak_fc = np.delete(peak_fc, [min_err_ind, min_err_ind + 1])\n peak_g = np.delete(peak_g, [min_err_ind, min_err_ind + 1])\n # Add filter in-between\n peak_fc = np.insert(peak_fc, min_err_ind, c)\n peak_g = np.insert(peak_g, min_err_ind, g)\n return True\n return False # No prominent filter pairs\n\n # Remove insignificant filters\n remove_small_filters(0.1)\n if len(peak_fc) == 0:\n # All filters were insignificant, exit\n return np.zeros(frequency.shape), 0.0, np.array([]), np.array([]), np.array([])\n\n # Limit filter number to max_filters by removing least significant filters and merging close filters\n if max_filters is not None:\n if len(peak_fc) > max_filters:\n # Remove too small filters\n remove_small_filters(0.2)\n\n if len(peak_fc) > max_filters:\n # Try to remove some more\n remove_small_filters(0.33)\n\n # Merge filters if needed\n while merge_filters() and len(peak_fc) > max_filters:\n pass\n\n if len(peak_fc) > max_filters:\n # Remove smallest filters\n sorted_inds = np.flip(np.argsort(np.abs(peak_g)))\n sorted_inds = sorted_inds[:max_filters]\n peak_fc = peak_fc[sorted_inds]\n peak_g = peak_g[sorted_inds]\n\n sorted_inds = np.argsort(peak_fc)\n peak_fc = peak_fc[sorted_inds]\n peak_g = peak_g[sorted_inds]\n\n n = n_pk = len(peak_fc)\n\n # Frequencies\n f = tf.constant(np.repeat(np.expand_dims(frequency, axis=0), n, axis=0), name='f', dtype='float32')\n\n # Center frequencies\n fc = tf.get_variable('fc', initializer=np.expand_dims(np.log10(peak_fc), axis=1), dtype='float32')\n\n # Q\n Q_init = np.ones([n, 1], dtype='float32') * np.ones([n_pk, 1], dtype='float32')\n Q = tf.get_variable('Q', initializer=Q_init, dtype='float32')\n\n else:\n # Fc and Q given, fixed band equalizer\n Q = tf.get_variable(\n 'Q',\n initializer=np.expand_dims(q, axis=1),\n dtype='float32',\n trainable=False\n )\n\n # Gains at peak center frequencies\n interpolator = InterpolatedUnivariateSpline(np.log10(frequency), fr_target.smoothed, k=1)\n peak_g = interpolator(np.log10(fc)).astype('float32')\n\n # Number of filters\n n = n_pk = len(fc)\n\n # Frequencies\n f = tf.constant(np.repeat(np.expand_dims(frequency, axis=0), n, axis=0), name='f', dtype='float32')\n\n # Center frequencies\n fc = tf.get_variable(\n 'fc',\n initializer=np.expand_dims(np.log10(fc), axis=1),\n dtype='float32',\n trainable=False\n )\n\n # Gain\n gain = tf.get_variable('gain', initializer=np.expand_dims(peak_g, axis=1), dtype='float32')\n\n # Filter design\n\n # Low shelf filter\n # This is not used at the moment but is kept for future\n A = 10 ** (gain[:n_ls, :] / 40)\n w0 = 2 * np.pi * tf.pow(10.0, fc[:n_ls, :]) / fs_tf\n alpha = tf.sin(w0) / (2 * Q[:n_ls, :])\n\n a0_ls = ((A + 1) + (A - 1) * tf.cos(w0) + 2 * tf.sqrt(A) * alpha)\n a1_ls = (-(-2 * ((A - 1) + (A + 1) * tf.cos(w0))) / a0_ls)\n a2_ls = (-((A + 1) + (A - 1) * tf.cos(w0) - 2 * tf.sqrt(A) * alpha) / a0_ls)\n\n b0_ls = ((A * ((A + 1) - (A - 1) * tf.cos(w0) + 2 * tf.sqrt(A) * alpha)) / a0_ls)\n b1_ls = ((2 * A * ((A - 1) - (A + 1) * tf.cos(w0))) / a0_ls)\n b2_ls = ((A * ((A + 1) - (A - 1) * tf.cos(w0) - 2 * tf.sqrt(A) * alpha)) / a0_ls)\n\n # Peak filter\n A = 10 ** (gain[n_ls:n_ls+n_pk, :] / 40)\n w0 = 2 * np.pi * tf.pow(10.0, fc[n_ls:n_ls+n_pk, :]) / fs_tf\n alpha = tf.sin(w0) / (2 * Q[n_ls:n_ls+n_pk, :])\n\n a0_pk = (1 + alpha / A)\n a1_pk = -(-2 * tf.cos(w0)) / a0_pk\n a2_pk = -(1 - alpha / A) / a0_pk\n\n b0_pk = (1 + alpha * A) / a0_pk\n b1_pk = (-2 * tf.cos(w0)) / a0_pk\n b2_pk = (1 - alpha * A) / a0_pk\n\n # High self filter\n # This is not kept at the moment but kept for future\n A = 10 ** (gain[n_ls+n_pk:, :] / 40)\n w0 = 2 * np.pi * tf.pow(10.0, fc[n_ls+n_pk:, :]) / fs_tf\n alpha = tf.sin(w0) / (2 * Q[n_ls+n_pk:, :])\n\n a0_hs = (A + 1) - (A - 1) * tf.cos(w0) + 2 * tf.sqrt(A) * alpha\n a1_hs = -(2 * ((A - 1) - (A + 1) * tf.cos(w0))) / a0_hs\n a2_hs = -((A + 1) - (A - 1) * tf.cos(w0) - 2 * tf.sqrt(A) * alpha) / a0_hs\n\n b0_hs = (A * ((A + 1) + (A - 1) * tf.cos(w0) + 2 * tf.sqrt(A) * alpha)) / a0_hs\n b1_hs = (-2 * A * ((A - 1) + (A + 1) * tf.cos(w0))) / a0_hs\n b2_hs = (A * ((A + 1) + (A - 1) * tf.cos(w0) - 2 * tf.sqrt(A) * alpha)) / a0_hs\n\n # Concatenate all\n a0 = tf.concat([a0_ls, a0_pk, a0_hs], axis=0)\n a1 = tf.concat([a1_ls, a1_pk, a1_hs], axis=0)\n a2 = tf.concat([a2_ls, a2_pk, a2_hs], axis=0)\n b0 = tf.concat([b0_ls, b0_pk, b0_hs], axis=0)\n b1 = tf.concat([b1_ls, b1_pk, b1_hs], axis=0)\n b2 = tf.concat([b2_ls, b2_pk, b2_hs], axis=0)\n\n w = 2 * np.pi * f / fs_tf\n phi = 4 * tf.sin(w / 2) ** 2\n\n a0 = 1.0\n a1 *= -1\n a2 *= -1\n\n # Equalizer frequency response\n eq_op = 10 * tf.log(\n (b0 + b1 + b2) ** 2 + (b0 * b2 * phi - (b1 * (b0 + b2) + 4 * b0 * b2)) * phi\n ) / tf.log(10.0) - 10 * tf.log(\n (a0 + a1 + a2) ** 2 + (a0 * a2 * phi - (a1 * (a0 + a2) + 4 * a0 * a2)) * phi\n ) / tf.log(10.0)\n eq_op = tf.reduce_sum(eq_op, axis=0)\n\n # RMSE as loss\n loss = tf.reduce_mean(tf.square(eq_op - eq_target))\n learning_rate_value = 0.1\n decay = 0.9995\n learning_rate = tf.placeholder('float32', shape=(), name='learning_rate')\n train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n\n # Optimization loop\n min_loss = None\n threshold = 0.01\n momentum = 100\n bad_steps = 0\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n t = time()\n while time() - t < max_time:\n step_loss, _ = sess.run([loss, train_step], feed_dict={learning_rate: learning_rate_value})\n if min_loss is None or step_loss < min_loss:\n # Improvement, update model\n _eq, _fc, _Q, _gain = sess.run([eq_op, fc, Q, gain])\n _fc = 10**_fc\n\n if min_loss is None or min_loss - step_loss > threshold:\n # Loss improved\n min_loss = step_loss\n bad_steps = 0\n else:\n # No improvement, increment bad step counter\n bad_steps += 1\n if bad_steps > momentum:\n # Bad steps exceed maximum number of bad steps, break\n break\n learning_rate_value = learning_rate_value * decay\n\n rmse = np.sqrt(min_loss) # RMSE\n\n # Fold center frequencies back to normal\n _fc = np.abs(np.round(_fc / fs) * fs - _fc)\n\n # Squeeze to rank-1 arrays\n _fc = np.squeeze(_fc)\n _Q = np.squeeze(_Q)\n _gain = np.squeeze(_gain)\n\n if parametric:\n # Filter selection slice\n sl = np.logical_and(np.abs(_gain) > 0.1, _fc > 10)\n _fc = _fc[sl]\n _Q = np.abs(_Q[sl])\n _gain = _gain[sl]\n\n # Sort filters by center frequency\n sorted_inds = np.argsort(_fc)\n _fc = _fc[sorted_inds]\n _Q = _Q[sorted_inds]\n _gain = _gain[sorted_inds]\n\n # Expand dimensionality for biquad\n _fc = np.expand_dims(_fc, axis=1)\n _Q = np.expand_dims(np.abs(_Q), axis=1)\n _gain = np.expand_dims(_gain, axis=1)\n # Re-compute eq\n a0, a1, a2, b0, b1, b2 = biquad.peaking(_fc, _Q, _gain, fs=fs)\n frequency = np.repeat(np.expand_dims(frequency, axis=0), len(_fc), axis=0)\n _eq = np.sum(biquad.digital_coeffs(frequency, fs, a0, a1, a2, b0, b1, b2), axis=0)\n\n coeffs_a = np.hstack((np.tile(a0, a1.shape), a1, a2))\n coeffs_b = np.hstack((b0, b1, b2))\n return _eq, rmse, np.squeeze(_fc, axis=1), np.squeeze(_Q, axis=1), np.squeeze(_gain, axis=1), coeffs_a, coeffs_b\n\n def optimize_parametric_eq(self, max_filters=None, fs=DEFAULT_FS):\n \"\"\"Fits multiple biquad filters to equalization curve. If max_filters is a list with more than one element, one\n optimization run will be ran for each element. Each optimization run will continue from the previous. Each\n optimization run results must be combined with results of all the previous runs but can be used independently of\n the preceeding runs' results. If max_filters is [5, 5, 5] the first 5, 10 and 15 filters can be used\n independently.\n\n Args:\n max_filters: List of maximum number of filters available for each filter group optimization.\n fs: Sampling frequency\n\n Returns:\n - **filters:** Numpy array of filters where each row contains one filter fc, Q and gain\n - **n_produced:** Actual number of filters produced for each filter group. Calling with [5, 5] max_filters\n might actually produce [4, 5] filters meaning that first 4 filters can be used\n independently.\n - **max_gains:** Maximum gain value of the equalizer frequency response after each filter group\n optimization. When using sub-set of filters independently the actual max gain of that\n sub-set's frequency response must be applied as a negative digital preamp to avoid\n clipping.\n \"\"\"\n if not len(self.equalization):\n raise ValueError('Equalization has not been done yet.')\n\n if type(max_filters) != list:\n max_filters = [max_filters]\n\n self.parametric_eq = np.zeros(self.frequency.shape)\n fc = Q = gain = np.array([])\n coeffs_a = coeffs_b = np.empty((0, 3))\n n_produced = []\n max_gains = []\n for n in max_filters:\n _eq, rmse, _fc, _Q, _gain, _coeffs_a, _coeffs_b = self.optimize_biquad_filters(\n frequency=self.frequency,\n target=self.equalization - self.parametric_eq,\n max_filters=n,\n fs=fs\n )\n n_produced.append(len(_fc))\n # print('RMSE: {:.2f}dB'.format(rmse))\n self.parametric_eq += _eq\n max_gains.append(np.max(self.parametric_eq))\n fc = np.concatenate((fc, _fc))\n Q = np.concatenate((Q, _Q))\n gain = np.concatenate((gain, _gain))\n coeffs_a = np.vstack((coeffs_a, _coeffs_a))\n coeffs_b = np.vstack((coeffs_b, _coeffs_b))\n\n filters = np.transpose(np.vstack([fc, Q, gain]))\n return filters, n_produced, max_gains\n\n def optimize_fixed_band_eq(self, fc=None, q=None, fs=DEFAULT_FS):\n \"\"\"Fits multiple fixed Fc and Q biquad filters to equalization curve.\n\n Args:\n fc: List of center frequencies for the filters\n q: List of Q values for the filters\n fs: Sampling frequency\n\n Returns:\n - **filters:** Numpy array of filters where each row contains one filter fc, Q and gain\n - **n_produced:** Number of filters. Equals to length or inputs.\n - **max_gains:** Maximum gain value of the equalizer frequency response.\n \"\"\"\n eq, rmse, fc, Q, gain, coeffs_a, coeffs_b = self.optimize_biquad_filters(\n frequency=self.frequency,\n target=self.equalization,\n fc=fc,\n q=q,\n fs=fs\n )\n self.fixed_band_eq = eq\n filters = np.transpose(np.vstack([fc, Q, gain]))\n return filters, len(fc), np.max(self.fixed_band_eq)\n\n @staticmethod\n def write_eqapo_parametric_eq(file_path, filters):\n \"\"\"Writes EqualizerAPO Parameteric eq settings to a file.\"\"\"\n file_path = os.path.abspath(file_path)\n\n with open(file_path, 'w') as f:\n f.write('\\n'.join(['Filter {i}: ON {type} Fc {fc:.0f} Hz Gain {gain:.1f} dB Q {Q:.2f}'.format(\n i=i+1,\n type='PK',\n fc=filters[i, 0],\n Q=filters[i, 1],\n gain=filters[i, 2]\n ) for i in range(len(filters))]))\n\n @staticmethod\n def _split_path(path):\n \"\"\"Splits file system path into components.\"\"\"\n folders = []\n while 1:\n path, folder = os.path.split(path)\n\n if folder != \"\":\n folders.append(folder)\n else:\n if path != \"\":\n folders.append(path)\n\n break\n\n folders.reverse()\n return folders\n\n def minimum_phase_impulse_response(self, fs=DEFAULT_FS, f_res=DEFAULT_F_RES, normalize=True):\n \"\"\"Generates minimum phase impulse response\n\n Inspired by:\n https://sourceforge.net/p/equalizerapo/code/HEAD/tree/tags/1.2/filters/GraphicEQFilter.cpp#l45\n\n Args:\n fs: Sampling frequency in Hz\n f_res: Frequency resolution as sampling interval. 20 would result in sampling at 0 Hz, 20 Hz, 40 Hz, ...\n normalize: Normalize gain to -0.5 dB\n\n Returns:\n Minimum phase impulse response\n \"\"\"\n # Double frequency resolution because it will be halved when converting linear phase IR to minimum phase\n f_res /= 2\n # Interpolate to even sample interval\n fr = FrequencyResponse(name='fr_data', frequency=self.frequency.copy(), raw=self.equalization.copy())\n # Save gain at lowest available frequency\n f_min = np.max([fr.frequency[0], f_res])\n interpolator = InterpolatedUnivariateSpline(np.log10(fr.frequency), fr.raw, k=1)\n gain_f_min = interpolator(np.log10(f_min))\n # Run interpolation\n fr.interpolate(np.arange(0, fs // 2, f_res), pol_order=1)\n # Set gain for all frequencies below original minimum frequency to match gain at the original minimum frequency\n fr.raw[fr.frequency <= f_min] = gain_f_min\n if normalize:\n # Reduce by max gain to avoid clipping with 1 dB of headroom\n fr.raw -= np.max(fr.raw)\n fr.raw -= 0.5\n # Minimum phase transformation by scipy's homomorphic method halves dB gain\n fr.raw *= 2\n # Convert amplitude to linear scale\n fr.raw = 10**(fr.raw / 20)\n # Calculate response\n fr.frequency = np.append(fr.frequency, fs // 2)\n fr.raw = np.append(fr.raw, 0.0)\n ir = firwin2(len(fr.frequency)*2+1, fr.frequency, fr.raw, fs=fs)\n # Convert to minimum phase\n ir = minimum_phase(ir)\n return ir\n\n def linear_phase_impulse_response(self, fs=DEFAULT_FS, f_res=DEFAULT_F_RES, normalize=True):\n \"\"\"Generates impulse response implementation of equalization filter.\"\"\"\n # Interpolate to even sample interval\n fr = FrequencyResponse(name='fr_data', frequency=self.frequency, raw=self.equalization)\n # Save gain at lowest available frequency\n f_min = np.max([fr.frequency[0], f_res])\n interpolator = InterpolatedUnivariateSpline(np.log10(fr.frequency), fr.raw, k=1)\n gain_f_min = interpolator(np.log10(f_min))\n # Run interpolation\n fr.interpolate(np.arange(0, fs // 2, f_res))\n # Set gain for all frequencies below original minimum frequency to match gain at the original minimum frequency\n fr.raw[fr.frequency <= f_min] = gain_f_min\n if normalize:\n # Reduce by max gain to avoid clipping with 1 dB of headroom\n fr.raw -= np.max(fr.raw)\n fr.raw -= 0.5\n # Convert amplitude to linear scale\n fr.raw = 10**(fr.raw / 20)\n # Calculate response\n fr.frequency = np.append(fr.frequency, fs // 2)\n fr.raw = np.append(fr.raw, 0.0)\n ir = firwin2(len(fr.frequency)*2, fr.frequency, fr.raw, fs=fs)\n return ir\n\n def write_readme(self,\n file_path,\n max_filters=None,\n max_gains=None):\n \"\"\"Writes README.md with picture and Equalizer APO settings.\"\"\"\n file_path = os.path.abspath(file_path)\n dir_path = os.path.dirname(file_path)\n model = self.name\n\n # Write model\n s = '# {}\\n'.format(model)\n s += 'See [usage instructions](https://github.com/jaakkopasanen/AutoEq#usage) for more options and ' \\\n 'info.\\n'\n\n # Add parametric EQ settings\n parametric_eq_path = os.path.join(dir_path, model + ' ParametricEQ.txt')\n if os.path.isfile(parametric_eq_path) and self.parametric_eq is not None and len(self.parametric_eq):\n max_gains = [x + 0.5 for x in max_gains]\n\n # Read Parametric eq\n with open(parametric_eq_path, 'r') as f:\n parametric_eq_str = f.read().strip()\n\n # Filters as Markdown table\n filters = []\n for line in parametric_eq_str.split('\\n'):\n if line == '':\n continue\n filter_type = line[line.index('ON')+3:line.index('Fc')-1]\n if filter_type == 'PK':\n filter_type = 'Peaking'\n if filter_type == 'LS':\n filter_type = 'Low Shelf'\n if filter_type == 'HS':\n filter_type = 'High Shelf'\n fc = line[line.index('Fc')+3:line.index('Gain')-1]\n gain = line[line.index('Gain')+5:line.index('Q')-1]\n q = line[line.index('Q')+2:]\n filters.append([filter_type, fc, q, gain])\n filters_table_str = tabulate(\n filters,\n headers=['Type', 'Fc', 'Q', 'Gain'],\n tablefmt='orgtbl'\n ).replace('+', '|').replace('|-', '|:')\n\n max_filters_str = ''\n if type(max_filters) == list and len(max_filters) > 1:\n n = [0]\n for x in max_filters:\n n.append(n[-1] + x)\n del n[0]\n if len(max_filters) > 3:\n max_filters_str = ', '.join([str(x) for x in n[:-2]]) + ' or {}'.format(n[-2])\n if len(max_filters) == 3:\n max_filters_str = '{n0} or {n1}'.format(n0=n[0], n1=n[1])\n if len(max_filters) == 2:\n max_filters_str = str(n[0])\n max_filters_str = 'The first {} filters can be used independently.'.format(max_filters_str)\n\n preamp_str = ''\n if type(max_gains) == list and len(max_gains) > 1:\n max_gains = [x + 0.1 for x in max_gains]\n if len(max_gains) > 3:\n _s = 'When using independent subset of filters, apply preamp of {}, respectively.'\n preamp_str = ', '.join(['-{:.1f}dB'.format(x) for x in max_gains[:-2]])\n preamp_str += ' or -{:.1f}dB'.format(max_gains[-2])\n if len(max_gains) == 3:\n _s = 'When using independent subset of filters, apply preamp of {}, respectively.'\n preamp_str = '-{g0:.1f}dB or -{g1:.1f}dB'.format(g0=max_gains[0], g1=max_gains[1])\n if len(max_gains) == 2:\n _s = 'When using independent subset of filters, apply preamp of **{}**.'\n preamp_str = '-{:.1f}dB'.format(max_gains[0])\n preamp_str = _s.format(preamp_str)\n\n s += '''\n ### Parametric EQs\n In case of using parametric equalizer, apply preamp of **-{preamp:.1f}dB** and build filters manually\n with these parameters. {max_filters_str}\n {preamp_str}\n\n {filters_table}\n '''.format(\n model=model,\n preamp=max_gains[-1],\n max_filters_str=max_filters_str,\n preamp_str=preamp_str,\n filters_table=filters_table_str\n )\n\n # Add fixed band eq\n fixed_band_eq_path = os.path.join(dir_path, model + ' FixedBandEQ.txt')\n if os.path.isfile(fixed_band_eq_path) and self.fixed_band_eq is not None and len(self.fixed_band_eq):\n preamp = np.min([0.0, float(-np.max(self.fixed_band_eq))]) - 0.5\n\n # Read Parametric eq\n with open(fixed_band_eq_path, 'r') as f:\n fixed_band_eq_str = f.read().strip()\n\n # Filters as Markdown table\n filters = []\n for line in fixed_band_eq_str.split('\\n'):\n if line == '':\n continue\n filter_type = line[line.index('ON') + 3:line.index('Fc') - 1]\n if filter_type == 'PK':\n filter_type = 'Peaking'\n if filter_type == 'LS':\n filter_type = 'Low Shelf'\n if filter_type == 'HS':\n filter_type = 'High Shelf'\n fc = line[line.index('Fc') + 3:line.index('Gain') - 1]\n gain = line[line.index('Gain') + 5:line.index('Q') - 1]\n q = line[line.index('Q') + 2:]\n filters.append([filter_type, fc, q, gain])\n filters_table_str = tabulate(\n filters,\n headers=['Type', 'Fc', 'Q', 'Gain'],\n tablefmt='orgtbl'\n ).replace('+', '|').replace('|-', '|:')\n\n s += '''\n ### Fixed Band EQs\n In case of using fixed band (also called graphic) equalizer, apply preamp of **{preamp:.1f}dB**\n (if available) and set gains manually with these parameters.\n\n {filters_table}\n '''.format(\n model=model,\n preamp=preamp,\n filters_table=filters_table_str\n )\n\n # Write image link\n img_path = os.path.join(dir_path, model + '.png')\n if os.path.isfile(img_path):\n img_url = f'./{os.path.split(img_path)[1]}'\n img_url = urllib.parse.quote(img_url, safe=\"%/:=&?~#+!$,;'@()*[]\")\n s += '''\n ### Graphs\n ![]({})\n '''.format(img_url)\n\n # Write file\n with open(file_path, 'w') as f:\n f.write(re.sub('\\n[ \\t]+', '\\n', s).strip())\n\n @staticmethod\n def generate_frequencies(f_min=DEFAULT_F_MIN, f_max=DEFAULT_F_MAX, f_step=DEFAULT_STEP):\n freq_new = []\n # Frequencies from 20kHz down\n f = np.min([20000, f_max])\n while f > f_min:\n freq_new.append(int(round(f)))\n f = f / f_step\n # Frequencies from 20kHZ up\n f = np.min([20000, f_max])\n while f < f_max:\n freq_new.append(int(round(f)))\n f = f * f_step\n freq_new = sorted(set(freq_new)) # Remove duplicates and sort ascending\n return np.array(freq_new)\n\n def interpolate(self, f=None, f_step=DEFAULT_STEP, pol_order=1, f_min=DEFAULT_F_MIN, f_max=DEFAULT_F_MAX):\n \"\"\"Interpolates missing values from previous and next value. Resets all but raw data.\"\"\"\n # Remove None values\n i = 0\n while i < len(self.raw):\n if self.raw[i] is None:\n self.raw = np.delete(self.raw, i)\n self.frequency = np.delete(self.frequency, i)\n else:\n i += 1\n interpolator = InterpolatedUnivariateSpline(np.log10(self.frequency), self.raw, k=pol_order)\n\n if f is None:\n self.frequency = self.generate_frequencies(f_min=f_min, f_max=f_max, f_step=f_step)\n else:\n self.frequency = f\n if self.frequency[0] == 0:\n self.frequency[0] = 1 # Prevent log10 from exploding\n self.raw = interpolator(np.log10(self.frequency))\n self.frequency[0] = 0\n else:\n self.raw = interpolator(np.log10(self.frequency))\n # Everything but raw data is affected by interpolating, reset them\n self.reset(raw=False)\n\n def center(self, frequency=1000):\n \"\"\"Removed bias from frequency response.\n\n Args:\n frequency: Frequency which is set to 0 dB. If this is a list with two values then an average between the two\n frequencies is set to 0 dB.\n\n Returns:\n Gain shifted\n \"\"\"\n equal_energy_fr = FrequencyResponse(name='equal_energy', frequency=self.frequency.copy(), raw=self.raw.copy())\n equal_energy_fr.interpolate()\n interpolator = InterpolatedUnivariateSpline(np.log10(equal_energy_fr.frequency), equal_energy_fr.raw, k=1)\n if type(frequency) in [list, np.ndarray] and len(frequency) > 1:\n # Use the average of the gain values between the given frequencies as the difference to be subtracted\n diff = np.mean(equal_energy_fr.raw[np.logical_and(\n equal_energy_fr.frequency >= frequency[0],\n equal_energy_fr.frequency <= frequency[1]\n )])\n else:\n if type(frequency) in [list, np.ndarray]:\n # List or array with only one element\n frequency = frequency[0]\n # Use the gain value at the given frequency as the difference to be subtracted\n diff = interpolator(np.log10(frequency))\n\n self.raw -= diff\n if len(self.smoothed):\n self.smoothed -= diff\n\n # Everything but raw, smoothed and target is affected by centering, reset them\n self.reset(raw=False, smoothed=False, target=False)\n\n return -diff\n\n def _tilt(self, tilt=DEFAULT_TILT):\n \"\"\"Creates a tilt for equalization.\n\n Args:\n tilt: Slope steepness in dB/octave\n\n Returns:\n Tilted data\n \"\"\"\n # Center in logarithmic scale\n c = DEFAULT_F_MIN * np.sqrt(DEFAULT_F_MAX / DEFAULT_F_MIN)\n # N octaves above center\n n_oct = np.log2(self.frequency / c)\n return n_oct * tilt\n\n def _target(self,\n bass_boost=None,\n bass_boost_f_lower=None,\n bass_boost_f_upper=None,\n tilt=None):\n \"\"\"Creates target curve with bass boost as described by harman target response.\n\n Args:\n bass_boost: Bass boost in dB\n\n Returns:\n Target for equalization\n \"\"\"\n if bass_boost is not None:\n bass_boost = self._sigmoid(\n f_lower=bass_boost_f_lower,\n f_upper=bass_boost_f_upper,\n a_normal=bass_boost,\n a_treble=0.0\n )\n else:\n bass_boost = np.zeros(len(self.frequency))\n if tilt is not None:\n tilt = self._tilt(tilt=tilt)\n else:\n tilt = np.zeros(len(self.frequency))\n return bass_boost + tilt\n\n def compensate(self,\n compensation,\n bass_boost=None,\n bass_boost_f_lower=None,\n bass_boost_f_upper=None,\n tilt=None,\n sound_signature=None,\n min_mean_error=False):\n \"\"\"Sets target and error curves.\"\"\"\n # Copy and center compensation data\n compensation = FrequencyResponse(name='compensation', frequency=compensation.frequency, raw=compensation.raw)\n compensation.smoothen_fractional_octave(\n window_size=DEFAULT_TREBLE_SMOOTHING_WINDOW_SIZE,\n iterations=DEFAULT_TREBLE_SMOOTHING_ITERATIONS\n )\n compensation.center()\n compensation.raw = compensation.smoothed\n compensation.smoothed = np.array([])\n\n # Set target\n self.target = compensation.raw + self._target(\n bass_boost=bass_boost,\n bass_boost_f_lower=bass_boost_f_lower,\n bass_boost_f_upper=bass_boost_f_upper,\n tilt=tilt\n )\n if sound_signature is not None:\n # Sound signature give, add it to target curve\n if not np.all(sound_signature.frequency == self.frequency):\n # Interpolate sound signature to match self on the frequency axis\n sound_signature.interpolate(self.frequency)\n self.target += sound_signature.raw\n\n # Set error\n self.error = self.raw - self.target\n if min_mean_error:\n # Shift error by it's mean in range 100 Hz to 10 kHz\n delta = np.mean(self.error[np.logical_and(self.frequency >= 100, self.frequency <= 10000)])\n self.error -= delta\n self.target += delta\n\n # Smoothed error and equalization results are affected by compensation, reset them\n self.reset(\n raw=False,\n smoothed=False,\n error=False,\n error_smoothed=True,\n equalization=True,\n parametric_eq=True,\n fixed_band_eq=True,\n equalized_raw=True,\n equalized_smoothed=True,\n target=False\n )\n\n def _window_size(self, octaves):\n \"\"\"Calculates moving average window size in indices from octaves.\"\"\"\n # Octaves to coefficient\n k = 2 ** octaves\n # Calculate average step size in frequencies\n steps = []\n for i in range(1, len(self.frequency)):\n steps.append(self.frequency[i] / self.frequency[i - 1])\n step_size = sum(steps) / len(steps)\n # Calculate window size in indices\n # step_size^x = k --> x = ...\n window_size = math.log(k) / math.log(step_size)\n # Half window size\n window_size = window_size\n # Round to integer to be usable as index\n window_size = round(window_size)\n if not window_size % 2:\n window_size += 1\n return window_size\n\n def _sigmoid(self, f_lower, f_upper, a_normal=0.0, a_treble=1.0):\n f_center = np.sqrt(f_upper / f_lower) * f_lower\n half_range = np.log10(f_upper) - np.log10(f_center)\n f_center = np.log10(f_center)\n a = expit((np.log10(self.frequency) - f_center) / (half_range / 4))\n a = a * -(a_normal - a_treble) + a_normal\n return a\n\n def _smoothen_fractional_octave(self,\n data,\n window_size=DEFAULT_SMOOTHING_WINDOW_SIZE,\n iterations=DEFAULT_SMOOTHING_ITERATIONS,\n treble_window_size=None,\n treble_iterations=None,\n treble_f_lower=DEFAULT_TREBLE_SMOOTHING_F_LOWER,\n treble_f_upper=DEFAULT_TREBLE_SMOOTHING_F_UPPER):\n \"\"\"Smooths data.\n\n Args:\n window_size: Filter window size in octaves.\n iterations: Number of iterations to run the filter. Each new iteration is using output of previous one.\n treble_window_size: Filter window size for high frequencies.\n treble_iterations: Number of iterations for treble filter.\n treble_f_lower: Lower boundary of transition frequency region. In the transition region normal filter is \\\n switched to treble filter with sigmoid weighting function.\n treble_f_upper: Upper boundary of transition frequency reqion. In the transition region normal filter is \\\n switched to treble filter with sigmoid weighting function.\n \"\"\"\n if None in self.frequency or None in data:\n # Must not contain None values\n raise ValueError('None values present, cannot smoothen!')\n\n # Normal filter\n y_normal = data\n with warnings.catch_warnings():\n # Savgol filter uses array indexing which is not future proof, ignoring the warning and trusting that this\n # will be fixed in the future release\n warnings.simplefilter(\"ignore\")\n for i in range(iterations):\n y_normal = savgol_filter(y_normal, self._window_size(window_size), 2)\n\n # Treble filter\n y_treble = data\n for _ in range(treble_iterations):\n y_treble = savgol_filter(y_treble, self._window_size(treble_window_size), 2)\n\n # Transition weighted with sigmoid\n k_treble = self._sigmoid(treble_f_lower, treble_f_upper)\n k_normal = k_treble * -1 + 1\n return y_normal * k_normal + y_treble * k_treble\n\n def smoothen_fractional_octave(self,\n window_size=DEFAULT_SMOOTHING_WINDOW_SIZE,\n iterations=DEFAULT_SMOOTHING_ITERATIONS,\n treble_window_size=DEFAULT_TREBLE_SMOOTHING_WINDOW_SIZE,\n treble_iterations=DEFAULT_TREBLE_SMOOTHING_ITERATIONS,\n treble_f_lower=DEFAULT_TREBLE_SMOOTHING_F_LOWER,\n treble_f_upper=DEFAULT_TREBLE_SMOOTHING_F_UPPER):\n \"\"\"Smooths data.\n\n Args:\n window_size: Filter window size in octaves.\n iterations: Number of iterations to run the filter. Each new iteration is using output of previous one.\n treble_window_size: Filter window size for high frequencies.\n treble_iterations: Number of iterations for treble filter.\n treble_f_lower: Lower boundary of transition frequency region. In the transition region normal filter is \\\n switched to treble filter with sigmoid weighting function.\n treble_f_upper: Upper boundary of transition frequency reqion. In the transition region normal filter is \\\n switched to treble filter with sigmoid weighting function.\n \"\"\"\n if treble_f_upper <= treble_f_lower:\n raise ValueError('Upper transition boundary must be greater than lower boundary')\n\n # Smoothen raw data\n self.smoothed = self._smoothen_fractional_octave(\n self.raw,\n window_size=window_size,\n iterations=iterations,\n treble_window_size=treble_window_size,\n treble_iterations=treble_iterations,\n treble_f_lower=treble_f_lower,\n treble_f_upper=treble_f_upper\n )\n\n if len(self.error):\n # Smoothen error data\n self.error_smoothed = self._smoothen_fractional_octave(\n self.error,\n window_size=window_size,\n iterations=iterations,\n treble_window_size=treble_window_size,\n treble_iterations=treble_iterations,\n treble_f_lower=treble_f_lower,\n treble_f_upper=treble_f_upper\n )\n\n # Equalization is affected by smoothing, reset equalization results\n self.reset(\n raw=False,\n smoothed=False,\n error=False,\n error_smoothed=False,\n equalization=True,\n parametric_eq=True,\n fixed_band_eq=True,\n equalized_raw=True,\n equalized_smoothed=True,\n target=False\n )\n\n def smoothen_heavy_light(self):\n \"\"\"Smoothens data by combining light and heavy smoothing and taking maximum.\n\n Returns:\n None\n \"\"\"\n light = self.copy()\n light.name = 'Light'\n light.smoothen_fractional_octave(\n window_size=1 / 6,\n iterations=1,\n treble_f_lower=100,\n treble_f_upper=10000,\n treble_window_size=1 / 3,\n treble_iterations=1\n )\n\n heavy = self.copy()\n heavy.name = 'Heavy'\n heavy.smoothen_fractional_octave(\n window_size=1 / 3,\n iterations=1,\n treble_f_lower=1000,\n treble_f_upper=6000,\n treble_window_size=1.3,\n treble_iterations=1\n )\n\n combination = self.copy()\n combination.name = 'Combination'\n combination.error = np.max(np.vstack([light.error_smoothed, heavy.error_smoothed]), axis=0)\n combination.smoothen_fractional_octave(\n window_size=1 / 3,\n iterations=1,\n treble_f_lower=100,\n treble_f_upper=10000,\n treble_window_size=1 / 3,\n treble_iterations=1\n )\n\n self.smoothed = combination.smoothed.copy()\n self.error_smoothed = combination.error_smoothed.copy()\n\n # Equalization is affected by smoothing, reset equalization results\n self.reset(\n raw=False,\n smoothed=False,\n error=False,\n error_smoothed=False,\n equalization=True,\n parametric_eq=True,\n fixed_band_eq=True,\n equalized_raw=True,\n equalized_smoothed=True,\n target=False\n )\n\n def equalize(self,\n max_gain=DEFAULT_MAX_GAIN,\n smoothen=True,\n treble_f_lower=DEFAULT_TREBLE_F_LOWER,\n treble_f_upper=DEFAULT_TREBLE_F_UPPER,\n treble_max_gain=DEFAULT_TREBLE_MAX_GAIN,\n treble_gain_k=DEFAULT_TREBLE_GAIN_K):\n \"\"\"Creates equalization curve and equalized curve.\n\n Args:\n max_gain: Maximum positive gain in dB\n smoothen: Smooth kinks caused by clipping gain to max gain?\n treble_f_lower: Lower frequency boundary for transition region between normal parameters and treble parameters\n treble_f_upper: Upper frequency boundary for transition reqion between normal parameters and treble parameters\n treble_max_gain: Maximum positive gain in dB in treble region\n treble_gain_k: Coefficient for treble gain, positive and negative. Useful for disbling or reducing \\\n equalization power in treble region. Defaults to 1.0 (not limited).\n \"\"\"\n self.equalization = []\n self.equalized_raw = []\n\n if len(self.error_smoothed):\n error = self.error_smoothed\n elif len(self.error):\n error = self.error\n else:\n raise ValueError('Error data is missing. Call FrequencyResponse.compensate().')\n\n if None in error or None in self.equalization or None in self.equalized_raw:\n # Must not contain None values\n raise ValueError('None values detected during equalization, interpolating data with default parameters.')\n\n # Invert with max gain clipping\n previous_clipped = False\n kink_inds = []\n\n # Max gain at each frequency\n max_gain = self._sigmoid(treble_f_lower, treble_f_upper, a_normal=max_gain, a_treble=treble_max_gain)\n gain_k = self._sigmoid(treble_f_lower, treble_f_upper, a_normal=1.0, a_treble=treble_gain_k)\n for i in range(len(error)):\n gain = - error[i] * gain_k[i]\n clipped = gain > max_gain[i]\n if previous_clipped != clipped:\n kink_inds.append(i)\n previous_clipped = clipped\n if clipped:\n gain = max_gain[i]\n self.equalization.append(gain)\n\n if len(kink_inds) and kink_inds[0] == 0:\n del kink_inds[0]\n\n if smoothen:\n # Smooth out kinks\n window_size = self._window_size(1 / 12)\n doomed_inds = set()\n for i in kink_inds:\n start = i - min(i, (window_size - 1) // 2)\n end = i + 1 + min(len(self.equalization) - i - 1, (window_size - 1) // 2)\n doomed_inds.update(range(start, end))\n doomed_inds = sorted(doomed_inds)\n\n for i in range(1, 3):\n if len(self.frequency) - i in doomed_inds:\n del doomed_inds[doomed_inds.index(len(self.frequency) - i)]\n\n f = np.array([x for i, x in enumerate(self.frequency) if i not in doomed_inds])\n e = np.array([x for i, x in enumerate(self.equalization) if i not in doomed_inds])\n interpolator = InterpolatedUnivariateSpline(np.log10(f), e, k=2)\n self.equalization = interpolator(np.log10(self.frequency))\n else:\n self.equalization = np.array(self.equalization)\n\n # Equalized\n self.equalized_raw = self.raw + self.equalization\n if len(self.smoothed):\n self.equalized_smoothed = self.smoothed + self.equalization\n\n def plot_graph(self,\n fig=None,\n ax=None,\n show=True,\n raw=True,\n error=True,\n smoothed=True,\n error_smoothed=True,\n equalization=True,\n parametric_eq=True,\n fixed_band_eq=True,\n equalized=True,\n target=True,\n file_path=None,\n f_min=DEFAULT_F_MIN,\n f_max=DEFAULT_F_MAX,\n a_min=None,\n a_max=None,\n color='black',\n close=False):\n \"\"\"Plots frequency response graph.\"\"\"\n if fig is None:\n fig, ax = plt.subplots()\n fig.set_size_inches(12, 8)\n legend = []\n if not len(self.frequency):\n raise ValueError('\\'frequency\\' has no data!')\n if target and len(self.target):\n ax.plot(self.frequency, self.target, linewidth=5, color='lightblue')\n legend.append('Target')\n if smoothed and len(self.smoothed):\n ax.plot(self.frequency, self.smoothed, linewidth=5, color='lightgrey')\n legend.append('Raw Smoothed')\n if error_smoothed and len(self.error_smoothed):\n ax.plot(self.frequency, self.error_smoothed, linewidth=5, color='pink')\n legend.append('Error Smoothed')\n if raw and len(self.raw):\n ax.plot(self.frequency, self.raw, linewidth=1, color=color)\n legend.append('Raw')\n if error and len(self.error):\n ax.plot(self.frequency, self.error, linewidth=1, color='red')\n legend.append('Error')\n if parametric_eq and len(self.parametric_eq):\n ax.plot(self.frequency, self.parametric_eq, linewidth=5, color='lightgreen')\n legend.append('Parametric Eq')\n if fixed_band_eq and len(self.fixed_band_eq):\n ax.plot(self.frequency, self.fixed_band_eq, linewidth=5, color='limegreen')\n legend.append('Fixed Band Eq')\n if equalization and len(self.equalization):\n ax.plot(self.frequency, self.equalization, linewidth=1, color='darkgreen')\n legend.append('Equalization')\n if equalized and len(self.equalized_raw) and not len(self.equalized_smoothed):\n ax.plot(self.frequency, self.equalized_raw, linewidth=1, color='magenta')\n legend.append('Equalized raw')\n if equalized and len(self.equalized_smoothed):\n ax.plot(self.frequency, self.equalized_smoothed, linewidth=1, color='blue')\n legend.append('Equalized smoothed')\n\n ax.set_xlabel('Frequency (Hz)')\n ax.semilogx()\n ax.set_xlim([f_min, f_max])\n ax.set_ylabel('Amplitude (dBr)')\n ax.set_ylim([a_min, a_max])\n ax.set_title(self.name)\n ax.legend(legend, fontsize=8)\n ax.grid(True, which='major')\n ax.grid(True, which='minor')\n ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:.0f}'))\n if file_path is not None:\n file_path = os.path.abspath(file_path)\n fig.savefig(file_path, dpi=120)\n im = Image.open(file_path)\n im = im.convert('P', palette=Image.ADAPTIVE, colors=60)\n im.save(file_path, optimize=True)\n if show:\n plt.show()\n elif close:\n plt.close(fig)\n return fig, ax\n\n def process(self,\n compensation=None,\n min_mean_error=False,\n equalize=False,\n parametric_eq=False,\n fixed_band_eq=False,\n fc=None,\n q=None,\n ten_band_eq=None,\n max_filters=None,\n bass_boost=None,\n iem_bass_boost=None,\n tilt=None,\n sound_signature=None,\n max_gain=DEFAULT_MAX_GAIN,\n treble_f_lower=DEFAULT_TREBLE_F_LOWER,\n treble_f_upper=DEFAULT_TREBLE_F_UPPER,\n treble_max_gain=DEFAULT_TREBLE_MAX_GAIN,\n treble_gain_k=DEFAULT_TREBLE_GAIN_K,\n fs=DEFAULT_FS):\n \"\"\"Runs processing pipeline with interpolation, centering, compensation and equalization.\n\n Args:\n compensation: Compensation FrequencyResponse. Must be interpolated and centered.\n min_mean_error: Minimize mean error. Normally all curves cross at 1 kHz but this makes it possible to shift\n error curve so that mean between 100 Hz and 10 kHz is at minimum. Target curve is shifted\n accordingly. Useful for avoiding large bias caused by a narrow notch or peak at 1 kHz.\n equalize: Run equalization?\n parametric_eq: Optimize peaking filters for parametric eq?\n fixed_band_eq: Optimize peaking filters for fixed band (graphic) eq?\n fc: List of center frequencies for fixed band eq\n q: List of Q values for fixed band eq\n ten_band_eq: Optimize filters for standard ten band eq?\n max_filters: List of maximum number of peaking filters for each additive filter optimization run.\n bass_boost: Bass boost amount in dB for over-ear headphones.\n iem_bass_boost: Bass boost amount in dB for in-ear headphones.\n tilt: Target frequency response tilt in db / octave\n sound_signature: Sound signature as FrequencyResponse instance. Raw data will be used.\n max_gain: Maximum positive gain in dB\n treble_f_lower: Lower bound for treble transition region\n treble_f_upper: Upper boud for treble transition region\n treble_max_gain: Maximum gain in treble region\n treble_gain_k: Gain coefficient in treble region\n fs: Sampling frequency\n\n Returns:\n - **peq_filters:** Numpy array of produced parametric eq peaking filters. Each row contains Fc, Q and gain\n - **n_peq_filters:** Number of produced parametric eq peaking filters for each group.\n - **peq_max_gains:** Maximum positive gains in each parametric eq peaking filter group.\n - **fbeq_filters:** Numpy array of produced fixed band peaking filters. Each row contains Fc, Q and gain\n - **n_fbeq_filters:** Number of produced fixed band peaking filters.\n - **fbeq_max_gains:** Maximum positive gain for fixed band eq.\n \"\"\"\n if parametric_eq and not equalize:\n raise ValueError('equalize must be True when parametric_eq is True.')\n\n if ten_band_eq:\n # Ten band eq is a shortcut for setting Fc and Q values to standard 10-band equalizer filters parameters\n fixed_band_eq = True\n fc = np.array([31.25, 62.5, 125, 250, 500, 1000, 2000, 4000, 8000, 16000], dtype='float32')\n q = np.ones(10, dtype='float32') * np.sqrt(2)\n\n if fixed_band_eq:\n if fc is None or q is None:\n raise ValueError('\"fc\" and \"q\" must be given when \"fixed_band_eq\" is given.')\n # Center frequencies are given but Q is a single value\n # Repeat Q to length of Fc\n if type(q) in [list, np.ndarray]:\n if len(q) == 1:\n q = np.repeat(q[0], len(fc))\n elif len(q) != len(fc):\n raise ValueError('q must have one elemet or the same number of elements as fc.')\n elif type(q) not in [list, np.ndarray]:\n q = np.repeat(q, len(fc))\n\n if fixed_band_eq and not equalize:\n raise ValueError('equalize must be True when fixed_band_eq or ten_band_eq is True.')\n\n # Use either normal bass boost (for on-ears) or iem bass boost\n if bass_boost is not None and iem_bass_boost is not None:\n raise TypeError('\"bass_boost\" or \"iem_bass_boost\" can be given but not both')\n elif bass_boost is not None and iem_bass_boost is None:\n bass_boost_f_lower = DEFAULT_OE_BASS_BOOST_F_LOWER\n bass_boost_f_upper = DEFAULT_OE_BASS_BOOST_F_UPPER\n elif iem_bass_boost is not None and bass_boost is None:\n bass_boost = iem_bass_boost\n bass_boost_f_lower = DEFAULT_IE_BASS_BOOST_F_LOWER\n bass_boost_f_upper = DEFAULT_IE_BASS_BOOST_F_UPPER\n else:\n bass_boost = None\n bass_boost_f_lower = None\n bass_boost_f_upper = None\n\n if max_filters is not None and type(max_filters) != list:\n max_filters = [max_filters]\n\n # Interpolate to standard frequency vector\n self.interpolate()\n\n # Center by 1kHz\n self.center()\n\n if compensation is not None:\n # Compensate\n self.compensate(\n compensation,\n bass_boost=bass_boost,\n bass_boost_f_lower=bass_boost_f_lower,\n bass_boost_f_upper=bass_boost_f_upper,\n tilt=tilt,\n sound_signature=sound_signature,\n min_mean_error=min_mean_error\n )\n\n # Smooth data\n self.smoothen_heavy_light()\n\n peq_filters = n_peq_filters = peq_max_gains = fbeq_filters = n_fbeq_filters = nfbeq_max_gains = None\n # Equalize\n if equalize:\n self.equalize(\n max_gain=max_gain,\n smoothen=True,\n treble_f_lower=treble_f_lower,\n treble_f_upper=treble_f_upper,\n treble_max_gain=treble_max_gain,\n treble_gain_k=treble_gain_k\n )\n if parametric_eq:\n # Get the filters\n peq_filters, n_peq_filters, peq_max_gains = self.optimize_parametric_eq(max_filters=max_filters, fs=fs)\n if fixed_band_eq:\n fbeq_filters, n_fbeq_filters, nfbeq_max_gains = self.optimize_fixed_band_eq(fc=fc, q=q, fs=fs)\n\n return peq_filters, n_peq_filters, peq_max_gains, fbeq_filters, n_fbeq_filters, nfbeq_max_gains\n\n @staticmethod\n def main(input_dir=None,\n output_dir=None,\n new_only=False,\n standardize_input=False,\n compensation=None,\n equalize=False,\n parametric_eq=False,\n fixed_band_eq=False,\n fc=None,\n q=None,\n ten_band_eq=False,\n max_filters=None,\n fs=DEFAULT_FS,\n bit_depth=DEFAULT_BIT_DEPTH,\n phase=DEFAULT_PHASE,\n f_res=DEFAULT_F_RES,\n bass_boost=None,\n iem_bass_boost=None,\n tilt=None,\n sound_signature=None,\n max_gain=DEFAULT_MAX_GAIN,\n treble_f_lower=DEFAULT_TREBLE_F_LOWER,\n treble_f_upper=DEFAULT_TREBLE_F_UPPER,\n treble_max_gain=DEFAULT_TREBLE_MAX_GAIN,\n treble_gain_k=DEFAULT_TREBLE_GAIN_K,\n show_plot=False):\n \"\"\"Parses files in input directory and produces equalization results in output directory.\"\"\"\n\n start_time = time()\n\n # Dir paths to absolute\n input_dir = os.path.abspath(input_dir)\n glob_files = glob(os.path.join(input_dir, '**', '*.csv'), recursive=True)\n if len(glob_files) == 0:\n raise FileNotFoundError('No CSV files found in \"{}\"'.format(input_dir))\n\n if compensation:\n # Creates FrequencyResponse for compensation data\n compensation_path = os.path.abspath(compensation)\n compensation = FrequencyResponse.read_from_csv(compensation_path)\n compensation.interpolate()\n compensation.center()\n \n if bit_depth == 16:\n bit_depth = \"PCM_16\"\n elif bit_depth == 24:\n bit_depth = \"PCM_24\"\n elif bit_depth == 32:\n bit_depth = \"PCM_32\"\n else:\n raise ValueError('Invalid bit depth. Accepted values are 16, 24 e 32.')\n\n if sound_signature is not None:\n sound_signature = FrequencyResponse.read_from_csv(sound_signature)\n if len(sound_signature.error) > 0:\n # Error data present, replace raw data with it\n sound_signature.raw = sound_signature.error\n sound_signature.interpolate()\n sound_signature.center()\n\n n = 0\n n_total = len(list(glob_files))\n for input_file_path in glob_files:\n if output_dir:\n relative_path = os.path.relpath(input_file_path, input_dir)\n output_file_path = os.path.join(output_dir, relative_path)\n if os.path.isfile(output_file_path) and new_only:\n # Skip file for which result already exists\n continue\n output_dir_path = os.path.dirname(output_file_path)\n\n # Read data from input file\n fr = FrequencyResponse.read_from_csv(input_file_path)\n\n if standardize_input:\n # Overwrite input data in standard sampling and bias\n fr.interpolate()\n fr.center()\n fr.write_to_csv(input_file_path)\n\n # Process and equalize\n peq_filters, n_peq_filters, peq_max_gains, fbeq_filters, n_fbeq_filters, fbeq_max_gains = fr.process(\n compensation=compensation,\n min_mean_error=True,\n equalize=equalize,\n parametric_eq=parametric_eq,\n fixed_band_eq=fixed_band_eq,\n fc=fc,\n q=q,\n ten_band_eq=ten_band_eq,\n max_filters=max_filters,\n bass_boost=bass_boost,\n iem_bass_boost=iem_bass_boost,\n tilt=tilt,\n sound_signature=sound_signature,\n max_gain=max_gain,\n treble_f_lower=treble_f_lower,\n treble_f_upper=treble_f_upper,\n treble_max_gain=treble_max_gain,\n treble_gain_k=treble_gain_k,\n fs=fs\n )\n\n if output_dir:\n # Copy relative path to output directory\n if not os.path.isdir(output_dir_path):\n os.makedirs(output_dir_path, exist_ok=True)\n\n if equalize:\n # Write EqualizerAPO GraphicEq settings to file\n fr.write_eqapo_graphic_eq(output_file_path.replace('.csv', ' GraphicEQ.txt'), normalize=True)\n if parametric_eq:\n # Write ParametricEq settings to file\n fr.write_eqapo_parametric_eq(output_file_path.replace('.csv', ' ParametricEQ.txt'), peq_filters)\n\n # Write fixed band eq\n if fixed_band_eq or ten_band_eq:\n # Write fixed band eq settings to file\n fr.write_eqapo_parametric_eq(output_file_path.replace('.csv', ' FixedBandEQ.txt'), fbeq_filters)\n\n # Write impulse response as WAV\n fss = [44100, 48000] if fs in [44100, 48000] else [fs]\n for _fs in fss:\n if phase in ['linear', 'both']:\n # Write linear phase impulse response\n linear_phase_ir = fr.linear_phase_impulse_response(fs=_fs, f_res=f_res, normalize=True)\n linear_phase_ir = np.tile(linear_phase_ir, (2, 1)).T\n sf.write(\n output_file_path.replace('.csv', ' linear phase {}Hz.wav'.format(_fs)),\n linear_phase_ir,\n _fs,\n bit_depth\n )\n if phase in ['minimum', 'both']:\n # Write minimum phase impulse response\n minimum_phase_ir = fr.minimum_phase_impulse_response(fs=_fs, f_res=f_res, normalize=True)\n minimum_phase_ir = np.tile(minimum_phase_ir, (2, 1)).T\n sf.write(\n output_file_path.replace('.csv', ' minimum phase {}Hz.wav'.format(_fs)),\n minimum_phase_ir,\n _fs,\n bit_depth\n )\n\n # Write results to CSV file\n fr.write_to_csv(output_file_path)\n\n # Write plots to file and optionally display them\n fr.plot_graph(\n show=show_plot,\n close=not show_plot,\n file_path=output_file_path.replace('.csv', '.png'),\n )\n\n # Write README.md\n _readme_path = os.path.join(output_dir_path, 'README.md')\n fr.write_readme(\n _readme_path,\n max_filters=n_peq_filters,\n max_gains=peq_max_gains\n )\n\n elif show_plot:\n fr.plot_graph(show=True, close=False)\n\n n += 1\n print(f'{n}/{n_total} ({n/n_total*100:.1f}%) {time()-start_time:.0f}s: {fr.name}')\n\n @staticmethod\n def cli_args():\n \"\"\"Parses command line arguments.\"\"\"\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument('--input_dir', type=str, required=True,\n help='Path to input data directory. Will look for CSV files in the data directory and '\n 'recursively in sub-directories.')\n arg_parser.add_argument('--output_dir', type=str, default=argparse.SUPPRESS,\n help='Path to results directory. Will keep the same relative paths for files found '\n 'in input_dir.')\n arg_parser.add_argument('--standardize_input', action='store_true',\n help='Overwrite input data in standardized sampling and bias?')\n arg_parser.add_argument('--new_only', action='store_true',\n help='Only process input files which don\\'t have results in output directory.')\n arg_parser.add_argument('--compensation', type=str,\n help='File path to CSV containing compensation (target) curve. Compensation is '\n 'necessary when equalizing because all input data is raw microphone data. See '\n '\"compensation\", \"innerfidelity/resources\" and \"headphonecom/resources\".')\n arg_parser.add_argument('--equalize', action='store_true',\n help='Will run equalization if this parameter exists, no value needed.')\n arg_parser.add_argument('--parametric_eq', action='store_true',\n help='Will produce parametric eq settings if this parameter exists, no value needed.')\n arg_parser.add_argument('--fixed_band_eq', action='store_true',\n help='Will produce fixed band eq settings if this parameter exists, no value needed.')\n arg_parser.add_argument('--fc', type=str, help='Comma separated list of center frequencies for fixed band eq.')\n arg_parser.add_argument('--q', type=str,\n help='Comma separated list of Q values for fixed band eq. If only one '\n 'value is passed it is used for all bands. Q value can be '\n 'calculated from bandwidth in N octaves by Q = 2^(N/2)/(2^N-1).')\n arg_parser.add_argument('--ten_band_eq', action='store_true',\n help='Shortcut parameter for activating standard ten band eq optimization.')\n arg_parser.add_argument('--max_filters', type=str, default=argparse.SUPPRESS,\n help='Maximum number of filters for parametric EQ. Multiple cumulative optimization '\n 'runs can be done by giving multiple filter counts separated by \"+\". \"5+5\" would '\n 'create 10 filters where the first 5 are usable independently from the rest 5 and '\n 'the last 5 can only be used with the first 5. This allows to have muliple '\n 'configurations for equalizers with different number of bands available. '\n 'Not limited by default.')\n arg_parser.add_argument('--fs', type=int, default=DEFAULT_FS,\n help='Sampling frequency for impulse response and parametric eq filters. '\n 'Defaults to {}.'.format(DEFAULT_FS))\n arg_parser.add_argument('--bit_depth', type=int, default=DEFAULT_BIT_DEPTH,\n help='Number of bits for every sample in impulse response. '\n 'Defaults to {}.'.format(DEFAULT_BIT_DEPTH))\n arg_parser.add_argument('--phase', type=str, default=DEFAULT_PHASE,\n help='Impulse response phase characteristic. \"minimum\", \"linear\" or \"both\". '\n 'Defaults to \"{}\"'.format(DEFAULT_PHASE))\n arg_parser.add_argument('--f_res', type=float, default=DEFAULT_F_RES,\n help='Frequency resolution for impulse responses. If this is 20 then impulse response '\n 'frequency domain will be sampled every 20 Hz. Filter length for '\n 'impulse responses will be fs/f_res. Defaults to {}.'.format(DEFAULT_F_RES))\n arg_parser.add_argument('--bass_boost', type=float, default=argparse.SUPPRESS,\n help='Target gain for sub-bass in dB. Has sigmoid slope down from {f_min} Hz to '\n '{f_max} Hz. \"--bass_boost\" is mutually exclusive with \"--iem_bass_boost\".'.format(\n f_min=DEFAULT_OE_BASS_BOOST_F_LOWER,\n f_max=DEFAULT_OE_BASS_BOOST_F_UPPER\n )\n )\n arg_parser.add_argument('--iem_bass_boost', type=float, default=argparse.SUPPRESS,\n help='Target gain for sub-bass in dB. Has sigmoid slope down from {f_min} Hz to '\n '{f_max} Hz. \"--iem_bass_boost\" is mutually exclusive with \"--bass_boost\".'.format(\n f_min=DEFAULT_IE_BASS_BOOST_F_LOWER,\n f_max=DEFAULT_IE_BASS_BOOST_F_UPPER\n )\n )\n arg_parser.add_argument('--tilt', type=float, default=argparse.SUPPRESS,\n help='Target tilt in dB/octave. Positive value (upwards slope) will result in brighter '\n 'frequency response and negative value (downwards slope) will result in darker '\n 'frequency response. 1 dB/octave will produce nearly 10 dB difference in '\n 'desired value between 20 Hz and 20 kHz. Tilt is applied with bass boost and both '\n 'will affect the bass gain.')\n arg_parser.add_argument('--sound_signature', type=str,\n help='File path to a sound signature CSV file. The CSV file must be in an AutoEQ '\n 'understandable format. Error data will be used as the sound signature target if '\n 'the CSV file contains an error column and otherwise the raw column will be used. '\n 'This means there are two different options for using sound signature: 1st is '\n 'pointing it to a result CSV file of a previous run and the 2nd is to create a '\n 'CSV file with just frequency and raw columns by hand (or other means). The Sound '\n 'signature graph will be interpolated so any number of point at any frequencies '\n 'will do, making it easy to create simple signatures with as little as two or '\n 'three points.')\n arg_parser.add_argument('--max_gain', type=float, default=DEFAULT_MAX_GAIN,\n help='Maximum positive gain in equalization. Higher max gain allows to equalize deeper '\n 'dips in frequency response but will limit output volume if no analog gain is '\n 'available because positive gain requires negative digital preamp equal to '\n 'maximum positive gain. Defaults to {}.'.format(DEFAULT_MAX_GAIN))\n arg_parser.add_argument('--treble_f_lower', type=float, default=DEFAULT_TREBLE_F_LOWER,\n help='Lower bound for transition region between normal and treble frequencies. Treble '\n 'frequencies can have different max gain and gain K. Defaults to '\n '{}.'.format(DEFAULT_TREBLE_F_LOWER))\n arg_parser.add_argument('--treble_f_upper', type=float, default=DEFAULT_TREBLE_F_UPPER,\n help='Upper bound for transition region between normal and treble frequencies. Treble '\n 'frequencies can have different max gain and gain K. Defaults to '\n '{}.'.format(DEFAULT_TREBLE_F_UPPER))\n arg_parser.add_argument('--treble_max_gain', type=float, default=DEFAULT_TREBLE_MAX_GAIN,\n help='Maximum positive gain for equalization in treble region. Defaults to '\n '{}.'.format(DEFAULT_TREBLE_MAX_GAIN))\n arg_parser.add_argument('--treble_gain_k', type=float, default=DEFAULT_TREBLE_GAIN_K,\n help='Coefficient for treble gain, affects both positive and negative gain. Useful for '\n 'disabling or reducing equalization power in treble region. Defaults to '\n '{}.'.format(DEFAULT_TREBLE_GAIN_K))\n arg_parser.add_argument('--show_plot', action='store_true',\n help='Plot will be shown if this parameter exists, no value needed.')\n args = vars(arg_parser.parse_args())\n if 'bass_boost' in args and 'iem_bass_boost' in args:\n raise TypeError('\"--bass_boost\" or \"--iem_bass_boost\" can be given but not both')\n if 'max_filters' in args:\n args['max_filters'] = [int(x) for x in args['max_filters'].split('+')]\n if 'fc' in args and args['fc'] is not None:\n args['fc'] = [float(x) for x in args['fc'].split(',')]\n if 'q' in args and args['q'] is not None:\n args['q'] = [float(x) for x in args['q'].split(',')]\n return args\n\n\nif __name__ == '__main__':\n FrequencyResponse.main(**FrequencyResponse.cli_args())\n" ]
[ [ "tensorflow.compat.v1.disable_v2_behavior", "tensorflow.compat.v1.log", "numpy.tile", "numpy.min", "numpy.mean", "numpy.sign", "numpy.where", "tensorflow.compat.v1.reset_default_graph", "tensorflow.compat.v1.reduce_sum", "tensorflow.compat.v1.constant", "tensorflow.compat.v1.placeholder", "numpy.max", "numpy.concatenate", "tensorflow.compat.v1.global_variables_initializer", "numpy.empty", "tensorflow.compat.v1.sin", "tensorflow.compat.v1.train.AdamOptimizer", "matplotlib.pyplot.subplots", "numpy.logical_and", "tensorflow.compat.v1.sqrt", "numpy.arange", "numpy.sqrt", "numpy.append", "numpy.log10", "scipy.signal.find_peaks", "numpy.expand_dims", "numpy.vstack", "matplotlib.ticker.StrMethodFormatter", "tensorflow.compat.v1.pow", "numpy.array", "numpy.delete", "numpy.square", "numpy.zeros", "numpy.round", "matplotlib.pyplot.close", "tensorflow.compat.v1.Session", "numpy.argsort", "numpy.clip", "scipy.signal.minimum_phase", "numpy.hstack", "matplotlib.pyplot.show", "numpy.squeeze", "numpy.log2", "numpy.insert", "tensorflow.compat.v1.get_variable", "tensorflow.compat.v1.get_logger", "tensorflow.compat.v1.square", "tensorflow.compat.v1.concat", "numpy.ones", "tensorflow.compat.v1.cos", "numpy.abs", "numpy.all" ] ]
Anustup900/models
[ "a6a4402cec646925be50c768da45ae79d88c8398" ]
[ "official/vision/detection/GSOC 21/Mask RCNN/Experiments/Experiment 01/Custom Code Base/utils/generate_anchors.py" ]
[ "\n\nimport numpy as np\nfrom six.moves import range\n\n\ndef generate_anchors(base_size=16, ratios=[0.5, 1, 2],\n scales=2**np.arange(3, 6)):\n \"\"\"\n Generate anchor (reference) windows by enumerating aspect ratios X\n scales wrt a reference (0, 0, 15, 15) window.\n \"\"\"\n\n base_anchor = np.array([1, 1, base_size, base_size], dtype='float32') - 1\n ratio_anchors = _ratio_enum(base_anchor, ratios)\n anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)\n for i in range(ratio_anchors.shape[0])])\n return anchors\n\n\ndef _whctrs(anchor):\n \"\"\"\n Return width, height, x center, and y center for an anchor (window).\n \"\"\"\n\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_ctr = anchor[0] + 0.5 * (w - 1)\n y_ctr = anchor[1] + 0.5 * (h - 1)\n return w, h, x_ctr, y_ctr\n\n\ndef _mkanchors(ws, hs, x_ctr, y_ctr):\n \"\"\"\n Given a vector of widths (ws) and heights (hs) around a center\n (x_ctr, y_ctr), output a set of anchors (windows).\n \"\"\"\n\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1)))\n return anchors\n\n\ndef _ratio_enum(anchor, ratios):\n \"\"\"\n Enumerate a set of anchors for each aspect ratio wrt an anchor.\n \"\"\"\n\n w, h, x_ctr, y_ctr = _whctrs(anchor)\n size = w * h\n size_ratios = size / ratios\n ws = np.round(np.sqrt(size_ratios))\n hs = np.round(ws * ratios)\n anchors = _mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n\n\ndef _scale_enum(anchor, scales):\n \"\"\"\n Enumerate a set of anchors for each scale wrt an anchor.\n \"\"\"\n\n w, h, x_ctr, y_ctr = _whctrs(anchor)\n ws = w * scales\n hs = h * scales\n anchors = _mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n" ]
[ [ "numpy.array", "numpy.round", "numpy.arange", "numpy.sqrt", "numpy.hstack" ] ]
YongBeomKim/py-finance
[ "7c7830904b67cd23c47e793e1f47a9702e7765f3" ]
[ "Chapter06/chapter_6_utils.py" ]
[ "import numpy as np\nfrom scipy.stats import norm\n\n\ndef simulate_gbm(s_0, mu, sigma, n_sims, T, N, random_seed=42, antithetic_var=False):\n '''\n Function used for simulating stock returns using Geometric Brownian Motion.\n \n Parameters\n ----------\n s_0 : float\n Initial stock price\n mu : float\n Drift coefficient\n sigma : float\n Diffusion coefficient\n n_sims : int\n Number of simulations paths\n dt : float\n Time increment, most commonly a day\n T : float\n Length of the forecast horizon, same unit as dt\n N : int\n Number of time increments in the forecast horizon\n random_seed : int\n Random seed for reproducibility\n antithetic_var : bool\n Boolean whether to use antithetic variates approach to reduce variance\n\n Returns\n -------\n S_t : np.ndarray\n Matrix (size: n_sims x (T+1)) containing the simulation results. \n Rows respresent sample paths, while columns point of time.\n '''\n\n np.random.seed(random_seed)\n\n # time increment\n dt = T/N\n\n # Brownian\n if antithetic_var:\n dW_ant = np.random.normal(scale=np.sqrt(dt),\n size=(int(n_sims/2), N + 1))\n dW = np.concatenate((dW_ant, -dW_ant), axis=0)\n else:\n dW = np.random.normal(scale=np.sqrt(dt),\n size=(n_sims, N + 1))\n\n # simulate the evolution of the process\n S_t = s_0 * np.exp(np.cumsum((mu - 0.5 * sigma ** 2) * dt + sigma * dW,\n axis=1))\n S_t[:, 0] = s_0\n\n return S_t\n\ndef black_scholes_analytical(S_0, K, T, r, sigma, type='call'):\n '''\n Function used for calculating the price of European options using the analytical form of the Black-Scholes model.\n \n Parameters\n ------------\n s_0 : float\n Initial stock price\n K : float\n Strike price\n T : float\n Time to maturity in years\n r : float\n Annualized risk-free rate\n sigma : float\n Standard deviation of the stock returns\n \n Returns\n -----------\n option_premium : float\n The premium on the option calculated using the Black-Scholes model\n '''\n\n d1 = (np.log(S_0 / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n d2 = (np.log(S_0 / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n\n if type == 'call':\n option_premium = (S_0 * norm.cdf(d1, 0, 1) - K * np.exp(-r * T) * norm.cdf(d2, 0, 1))\n elif type == 'put':\n option_premium = (K * np.exp(-r * T) * norm.cdf(-d2, 0, 1) - S_0 * norm.cdf(-d1, 0, 1))\n else:\n raise ValueError('Wrong input for type!')\n\n return option_premium\n\n\ndef lsmc_american_option(S_0, K, T, N, r, sigma, n_sims, option_type, poly_degree, random_seed=42):\n '''\n Function used for calculating the price of American options using Least Squares Monte Carlo \n algorithm of Longstaff and Schwartz (2001).\n \n Parameters\n ------------\n S_0 : float\n Initial stock price\n K : float\n Strike price\n T : float\n Time to maturity in years\n N : int\n Number of time increments in the forecast horizon\n r : float\n Annualized risk-free rate\n sigma : float\n Standard deviation of the stock returns\n n_sims : int\n Number of paths to simulate\n option_type : str\n Type of the option. Allowable: ['call', 'put']\n poly_degree : int\n Degree of the polynomial to fit in the LSMC algorithm\n random_seed : int\n Random seed for reproducibility\n \n Returns\n -----------\n option_premium : float\n The premium on the option \n '''\n\n dt = T / N\n discount_factor = np.exp(-r * dt)\n\n gbm_simulations = simulate_gbm(s_0=S_0, mu=r, sigma=sigma, \n n_sims=n_sims, T=T, N=N,\n random_seed=random_seed)\n\n if option_type == 'call':\n payoff_matrix = np.maximum(\n gbm_simulations - K, np.zeros_like(gbm_simulations))\n elif option_type == 'put':\n payoff_matrix = np.maximum(\n K - gbm_simulations, np.zeros_like(gbm_simulations))\n\n value_matrix = np.zeros_like(payoff_matrix)\n value_matrix[:, -1] = payoff_matrix[:, -1]\n\n for t in range(N - 1, 0, -1):\n regression = np.polyfit(\n gbm_simulations[:, t], value_matrix[:, t + 1] * discount_factor, poly_degree)\n continuation_value = np.polyval(regression, gbm_simulations[:, t])\n value_matrix[:, t] = np.where(payoff_matrix[:, t] > continuation_value,\n payoff_matrix[:, t],\n value_matrix[:, t + 1] * discount_factor)\n\n option_premium = np.mean(value_matrix[:, 1] * discount_factor)\n return option_premium\n" ]
[ [ "numpy.concatenate", "numpy.zeros_like", "numpy.log", "numpy.random.seed", "numpy.exp", "numpy.mean", "numpy.polyval", "numpy.where", "numpy.polyfit", "numpy.sqrt", "numpy.cumsum", "scipy.stats.norm.cdf" ] ]
zeynepCankara/NTU_DLCV2019
[ "2dc44584ec7b9e1d84e688551eb8cef48d501b45" ]
[ "final/beto/GatedConvolution_pytorch/evaluation/inception_score/inception_score.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nimport torch.utils.data\n\nfrom torchvision.models.inception import inception_v3\n\nimport numpy as np\nfrom scipy.stats import entropy\n\ndef inception_score(imgs, cuda=True, batch_size=32, resize=True, splits=1):\n \"\"\"Computes the inception score of the generated images imgs\n\n imgs -- Torch dataset of (3xHxW) numpy images normalized in the range [-1, 1]\n cuda -- whether or not to run on GPU\n batch_size -- batch size for feeding into Inception v3\n splits -- number of splits\n \"\"\"\n\n N = len(imgs)\n\n assert batch_size > 0\n assert N > batch_size\n\n # Set up dtype\n if cuda:\n dtype = torch.cuda.FloatTensor\n else:\n if torch.cuda.is_available():\n print(\"WARNING: You have a CUDA device, so you should probably set cuda=True\")\n dtype = torch.FloatTensor\n\n # Set up dataloader\n dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)\n\n # Load inception model\n inception_model = inception_v3(pretrained=True, transform_input=False).type(dtype)\n inception_model.eval();\n #up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype)\n def get_pred(x):\n if resize:\n x = F.interpolate(x, size=(299, 299), mode='bilinear')\n x = inception_model(x)\n return F.softmax(x, dim=1).data.cpu().numpy()\n\n # Get predictions\n preds = np.zeros((N, 1000))\n\n for i, batch in enumerate(dataloader, 0):\n batch = batch.type(dtype)\n batchv = Variable(batch)\n batch_size_i = batch.size()[0]\n\n preds[i*batch_size:i*batch_size + batch_size_i] = get_pred(batchv)\n\n # Now compute the mean kl-div\n split_scores = []\n\n for k in range(splits):\n part = preds[k * (N // splits): (k+1) * (N // splits), :]\n py = np.mean(part, axis=0)\n scores = []\n for i in range(part.shape[0]):\n pyx = part[i, :]\n scores.append(entropy(pyx, py))\n split_scores.append(np.exp(np.mean(scores)))\n\n return np.mean(split_scores), np.std(split_scores)\n\nif __name__ == '__main__':\n class IgnoreLabelDataset(torch.utils.data.Dataset):\n def __init__(self, orig):\n self.orig = orig\n\n def __getitem__(self, index):\n return self.orig[index][0]\n\n def __len__(self):\n return len(self.orig)\n\n import torchvision.datasets as dset\n import torchvision.transforms as transforms\n\n cifar = dset.CIFAR10(root='data/', download=True,\n transform=transforms.Compose([\n transforms.Scale(32),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n )\n\n #IgnoreLabelDataset(cifar)\n\n print (\"Calculating Inception Score...\")\n print (inception_score(IgnoreLabelDataset(cifar), cuda=True, batch_size=32, resize=True, splits=10))\n" ]
[ [ "numpy.zeros", "torch.autograd.Variable", "torch.nn.functional.interpolate", "scipy.stats.entropy", "numpy.mean", "numpy.std", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.nn.functional.softmax" ] ]
gkanapathy/neural_prophet
[ "ceabfca7a11b9501e318b78b032251601268eaeb" ]
[ "neuralprophet/forecaster.py" ]
[ "import time\nfrom collections import OrderedDict\nimport numpy as np\nimport pandas as pd\n\nimport torch\nfrom torch.utils.data import DataLoader\nimport logging\nfrom tqdm import tqdm\n\nfrom neuralprophet import configure\nfrom neuralprophet import time_net\nfrom neuralprophet import time_dataset\nfrom neuralprophet import df_utils\nfrom neuralprophet import utils\nfrom neuralprophet.plot_forecast import plot, plot_components\nfrom neuralprophet.plot_model_parameters import plot_parameters\nfrom neuralprophet import metrics\n\nlog = logging.getLogger(\"NP.forecaster\")\n\n\nMETRICS = {\n \"mae\": metrics.MAE,\n \"mse\": metrics.MSE,\n \"rmse\": metrics.RMSE,\n}\n\n\nclass NeuralProphet:\n \"\"\"NeuralProphet forecaster.\n\n A simple yet powerful forecaster that models:\n Trend, seasonality, events, holidays, auto-regression, lagged covariates, and future-known regressors.\n Can be regualrized and configured to model nonlinear relationships.\n\n Parameters\n ----------\n COMMENT\n Trend Config\n COMMENT\n growth : {'off' or 'linear'}, default 'linear'\n Set use of trend growth type.\n\n Options:\n * ``off``: no trend.\n * (default) ``linear``: fits a piece-wise linear trend with ``n_changepoints + 1`` segments\n * ``discontinuous``: For advanced users only - not a conventional trend,\n allows arbitrary jumps at each trend changepoint\n\n changepoints : {list of str, list of np.datetimes or np.array of np.datetimes}, optional\n Manually set dates at which to include potential changepoints.\n\n Note\n ----\n Does not accept ``np.array`` of ``np.str``. If not specified, potential changepoints are selected automatically.\n\n n_changepoints : int\n Number of potential trend changepoints to include.\n\n Note\n ----\n Changepoints are selected uniformly from the first ``changepoint_range`` proportion of the history.\n Ignored if manual ``changepoints`` list is supplied.\n changepoints_range : float\n Proportion of history in which trend changepoints will be estimated.\n\n e.g. set to 0.8 to allow changepoints only in the first 80% of training data.\n Ignored if manual ``changepoints`` list is supplied.\n trend_reg : float, optional\n Parameter modulating the flexibility of the automatic changepoint selection.\n\n Note\n ----\n Large values (~1-100) will limit the variability of changepoints.\n Small values (~0.001-1.0) will allow changepoints to change faster.\n default: 0 will fully fit a trend to each segment.\n\n trend_reg_threshold : bool, optional\n Allowance for trend to change without regularization.\n\n Options\n * ``True``: Automatically set to a value that leads to a smooth trend.\n * (default) ``False``: All changes in changepoints are regularized\n\n COMMENT\n Seasonality Config\n COMMENT\n yearly_seasonality : bool, int\n Fit yearly seasonality.\n\n Options\n * ``True`` or ``False``\n * ``auto``: set automatically\n * ``value``: number of Fourier/linear terms to generate\n weekly_seasonality : bool, int\n Fit monthly seasonality.\n\n Options\n * ``True`` or ``False``\n * ``auto``: set automatically\n * ``value``: number of Fourier/linear terms to generate\n daily_seasonality : bool, int\n Fit daily seasonality.\n\n Options\n * ``True`` or ``False``\n * ``auto``: set automatically\n * ``value``: number of Fourier/linear terms to generate\n seasonality_mode : str\n Specifies mode of seasonality\n\n Options\n * (default) ``additive``\n * ``multiplicative``\n seasonality_reg : float, optional\n Parameter modulating the strength of the seasonality model.\n\n Note\n ----\n Smaller values (~0.1-1) allow the model to fit larger seasonal fluctuations,\n larger values (~1-100) dampen the seasonality.\n default: None, no regularization\n\n COMMENT\n AR Config\n COMMENT\n n_lags : int\n Previous time series steps to include in auto-regression. Aka AR-order\n ar_reg : float, optional\n how much sparsity to enduce in the AR-coefficients\n\n Note\n ----\n Large values (~1-100) will limit the number of nonzero coefficients dramatically.\n Small values (~0.001-1.0) will allow more non-zero coefficients.\n default: 0 no regularization of coefficients.\n\n COMMENT\n Model Config\n COMMENT\n n_forecasts : int\n Number of steps ahead of prediction time step to forecast.\n num_hidden_layers : int, optional\n number of hidden layer to include in AR-Net (defaults to 0)\n d_hidden : int, optional\n dimension of hidden layers of the AR-Net. Ignored if ``num_hidden_layers`` == 0.\n\n COMMENT\n Train Config\n COMMENT\n learning_rate : float\n Maximum learning rate setting for 1cycle policy scheduler.\n\n Note\n ----\n Default ``None``: Automatically sets the ``learning_rate`` based on a learning rate range test.\n For manual user input, (try values ~0.001-10).\n epochs : int\n Number of epochs (complete iterations over dataset) to train model.\n\n Note\n ----\n Default ``None``: Automatically sets the number of epochs based on dataset size.\n For best results also leave batch_size to None. For manual values, try ~5-500.\n batch_size : int\n Number of samples per mini-batch.\n\n If not provided, ``batch_size`` is approximated based on dataset size.\n For manual values, try ~8-1024.\n For best results also leave ``epochs`` to ``None``.\n newer_samples_weight: float, default 2.0\n Sets factor by which the model fit is skewed towards more recent observations.\n\n Controls the factor by which final samples are weighted more compared to initial samples.\n Applies a positional weighting to each sample's loss value.\n\n e.g. ``newer_samples_weight = 2``: final samples are weighted twice as much as initial samples.\n newer_samples_start: float, default 0.0\n Sets beginning of 'newer' samples as fraction of training data.\n\n Throughout the range of 'newer' samples, the weight is increased\n from ``1.0/newer_samples_weight`` initially to 1.0 at the end,\n in a monotonously increasing function (cosine from pi to 2*pi).\n loss_func : str, torch.nn.functional.loss\n Type of loss to use:\n\n Options\n * (default) ``Huber``: Huber loss function\n * ``MSE``: Mean Squared Error loss function\n * ``MAE``: Mean Absolute Error loss function\n * ``torch.nn.functional.loss.``: loss or callable for custom loss, eg. L1-Loss\n\n Examples\n --------\n >>> from neuralprophet import NeuralProphet\n >>> import torch\n >>> import torch.nn as nn\n >>> m = NeuralProphet(loss_func=torch.nn.L1Loss)\n\n collect_metrics : list of str, bool\n Set metrics to compute.\n\n Valid: [``mae``, ``rmse``, ``mse``]\n\n Options\n * (default) ``True``: [``mae``, ``rmse``]\n * ``False``: No metrics\n\n COMMENT\n Missing Data\n COMMENT\n impute_missing : bool\n whether to automatically impute missing dates/values\n\n Note\n ----\n imputation follows a linear method up to 10 missing values, more are filled with trend.\n\n COMMENT\n Data Normalization\n COMMENT\n normalize : str\n Type of normalization to apply to the time series.\n\n Options\n * ``off`` bypasses data normalization\n * (default, binary timeseries) ``minmax`` scales the minimum value to 0.0 and the maximum value to 1.0\n * ``standardize`` zero-centers and divides by the standard deviation\n * (default) ``soft`` scales the minimum value to 0.0 and the 95th quantile to 1.0\n * ``soft1`` scales the minimum value to 0.1 and the 90th quantile to 0.9\n global_normalization : bool\n Activation of global normalization\n\n Options\n * ``True``: dict of dataframes is used as global_time_normalization\n * (default) ``False``: local normalization\n global_time_normalization (bool):\n Specifies global time normalization\n\n Options\n * (default) ``True``: only valid in case of global modeling local normalization\n * ``False``: set time data_params locally\n unknown_data_normalization : bool\n Specifies unknown data normalization\n\n Options\n * ``True``: test data is normalized with global data params even if trained with local data params (global modeling with local normalization)\n * (default) ``False``: no global modeling with local normalization\n \"\"\"\n\n def __init__(\n self,\n growth=\"linear\",\n changepoints=None,\n n_changepoints=10,\n changepoints_range=0.9,\n trend_reg=0,\n trend_reg_threshold=False,\n yearly_seasonality=\"auto\",\n weekly_seasonality=\"auto\",\n daily_seasonality=\"auto\",\n seasonality_mode=\"additive\",\n seasonality_reg=0,\n n_forecasts=1,\n n_lags=0,\n num_hidden_layers=0,\n d_hidden=None,\n ar_reg=None,\n learning_rate=None,\n epochs=None,\n batch_size=None,\n loss_func=\"Huber\",\n optimizer=\"AdamW\",\n newer_samples_weight=2,\n newer_samples_start=0.0,\n impute_missing=True,\n collect_metrics=True,\n normalize=\"auto\",\n global_normalization=False,\n global_time_normalization=True,\n unknown_data_normalization=False,\n ):\n kwargs = locals()\n\n # General\n self.name = \"NeuralProphet\"\n self.n_forecasts = n_forecasts\n\n # Data Normalization settings\n self.config_normalization = configure.Normalization(\n normalize=normalize,\n global_normalization=global_normalization,\n global_time_normalization=global_time_normalization,\n unknown_data_normalization=unknown_data_normalization,\n )\n\n # Missing Data Preprocessing\n self.impute_missing = impute_missing\n self.impute_limit_linear = 5\n self.impute_rolling = 20\n\n # Training\n self.config_train = configure.from_kwargs(configure.Train, kwargs)\n\n if collect_metrics is None:\n collect_metrics = []\n elif collect_metrics is True:\n collect_metrics = [\"mae\", \"rmse\"]\n elif isinstance(collect_metrics, str):\n if not collect_metrics.lower() in METRICS.keys():\n raise ValueError(\"Received unsupported argument for collect_metrics.\")\n collect_metrics = [collect_metrics]\n elif isinstance(collect_metrics, list):\n if not all([m.lower() in METRICS.keys() for m in collect_metrics]):\n raise ValueError(\"Received unsupported argument for collect_metrics.\")\n elif collect_metrics is not False:\n raise ValueError(\"Received unsupported argument for collect_metrics.\")\n\n self.metrics = None\n if isinstance(collect_metrics, list):\n self.metrics = metrics.MetricsCollection(\n metrics=[metrics.LossMetric(self.config_train.loss_func)]\n + [METRICS[m.lower()]() for m in collect_metrics],\n value_metrics=[metrics.ValueMetric(\"RegLoss\")],\n )\n\n # AR\n self.config_ar = configure.from_kwargs(configure.AR, kwargs)\n self.n_lags = self.config_ar.n_lags\n if n_lags == 0 and n_forecasts > 1:\n self.n_forecasts = 1\n log.warning(\n \"Changing n_forecasts to 1. Without lags, the forecast can be \"\n \"computed for any future time, independent of lagged values\"\n )\n\n # Model\n self.config_model = configure.from_kwargs(configure.Model, kwargs)\n\n # Trend\n self.config_trend = configure.from_kwargs(configure.Trend, kwargs)\n\n # Seasonality\n self.season_config = configure.AllSeason(\n mode=seasonality_mode,\n reg_lambda=seasonality_reg,\n yearly_arg=yearly_seasonality,\n weekly_arg=weekly_seasonality,\n daily_arg=daily_seasonality,\n )\n self.config_train.reg_lambda_season = self.season_config.reg_lambda\n\n # Events\n self.events_config = None\n self.country_holidays_config = None\n\n # Extra Regressors\n self.config_covar = None\n self.regressors_config = None\n\n # set during fit()\n self.data_freq = None\n\n # Set during _train()\n self.fitted = False\n self.data_params = None\n self.optimizer = None\n self.scheduler = None\n self.model = None\n\n # set during prediction\n self.future_periods = None\n # later set by user (optional)\n self.highlight_forecast_step_n = None\n self.true_ar_weights = None\n\n def add_lagged_regressor(self, names, regularization=None, normalize=\"auto\", only_last_value=False):\n \"\"\"Add a covariate or list of covariate time series as additional lagged regressors to be used for fitting and predicting.\n The dataframe passed to ``fit`` and ``predict`` will have the column with the specified name to be used as\n lagged regressor. When normalize=True, the covariate will be normalized unless it is binary.\n\n Parameters\n ----------\n names : string or list\n name of the regressor/list of regressors.\n regularization : float\n optional scale for regularization strength\n normalize : bool\n optional, specify whether this regressor will benormalized prior to fitting.\n if ``auto``, binary regressors will not be normalized.\n only_last_value : bool\n specifies last value handling\n\n Options\n * (default) ``False`` use same number of lags as auto-regression\n * ``True`` only use last known value as input\n \"\"\"\n if self.fitted:\n raise Exception(\"Covariates must be added prior to model fitting.\")\n if self.n_lags == 0:\n raise Exception(\"Covariates must be set jointly with Auto-Regression.\")\n if not isinstance(names, list):\n names = [names]\n for name in names:\n self._validate_column_name(name)\n if self.config_covar is None:\n self.config_covar = OrderedDict({})\n self.config_covar[name] = configure.Covar(\n reg_lambda=regularization,\n normalize=normalize,\n as_scalar=only_last_value,\n )\n return self\n\n def add_future_regressor(self, name, regularization=None, normalize=\"auto\", mode=\"additive\"):\n \"\"\"Add a regressor as lagged covariate with order 1 (scalar) or as known in advance (also scalar).\n The dataframe passed to :meth:`fit` and :meth:`predict` will have a column with the specified name to be used as\n a regressor. When normalize=True, the regressor will be normalized unless it is binary.\n\n Parameters\n ----------\n name : string\n name of the regressor.\n regularization : float\n optional scale for regularization strength\n normalize : bool\n optional, specify whether this regressor will be normalized prior to fitting.\n\n Note\n ----\n if ``auto``, binary regressors will not be normalized.\n mode : str\n ``additive`` (default) or ``multiplicative``.\n\n \"\"\"\n if self.fitted:\n raise Exception(\"Regressors must be added prior to model fitting.\")\n if regularization is not None:\n if regularization < 0:\n raise ValueError(\"regularization must be >= 0\")\n if regularization == 0:\n regularization = None\n self._validate_column_name(name)\n\n if self.regressors_config is None:\n self.regressors_config = {}\n self.regressors_config[name] = configure.Regressor(reg_lambda=regularization, normalize=normalize, mode=mode)\n return self\n\n def add_events(self, events, lower_window=0, upper_window=0, regularization=None, mode=\"additive\"):\n \"\"\"\n Add user specified events and their corresponding lower, upper windows and the\n regularization parameters into the NeuralProphet object\n\n Parameters\n ----------\n events : str, list\n name or list of names of user specified events\n lower_window : int\n the lower window for the events in the list of events\n upper_window : int\n the upper window for the events in the list of events\n regularization : float\n optional scale for regularization strength\n mode : str\n ``additive`` (default) or ``multiplicative``.\n\n \"\"\"\n if self.fitted:\n raise Exception(\"Events must be added prior to model fitting.\")\n\n if self.events_config is None:\n self.events_config = OrderedDict({})\n\n if regularization is not None:\n if regularization < 0:\n raise ValueError(\"regularization must be >= 0\")\n if regularization == 0:\n regularization = None\n\n if not isinstance(events, list):\n events = [events]\n\n for event_name in events:\n self._validate_column_name(event_name)\n self.events_config[event_name] = configure.Event(\n lower_window=lower_window, upper_window=upper_window, reg_lambda=regularization, mode=mode\n )\n return self\n\n def add_country_holidays(self, country_name, lower_window=0, upper_window=0, regularization=None, mode=\"additive\"):\n \"\"\"\n Add a country into the NeuralProphet object to include country specific holidays\n and create the corresponding configs such as lower, upper windows and the regularization\n parameters\n\n Parameters\n ----------\n country_name : string\n name of the country\n lower_window : int\n the lower window for all the country holidays\n upper_window : int\n the upper window for all the country holidays\n regularization : float\n optional scale for regularization strength\n mode : str\n ``additive`` (default) or ``multiplicative``.\n \"\"\"\n if self.fitted:\n raise Exception(\"Country must be specified prior to model fitting.\")\n\n if regularization is not None:\n if regularization < 0:\n raise ValueError(\"regularization must be >= 0\")\n if regularization == 0:\n regularization = None\n self.country_holidays_config = configure.Holidays(\n country=country_name,\n lower_window=lower_window,\n upper_window=upper_window,\n reg_lambda=regularization,\n mode=mode,\n )\n self.country_holidays_config.init_holidays()\n return self\n\n def add_seasonality(self, name, period, fourier_order):\n \"\"\"Add a seasonal component with specified period, number of Fourier components, and regularization.\n\n Increasing the number of Fourier components allows the seasonality to change more quickly\n (at risk of overfitting).\n Note: regularization and mode (additive/multiplicative) are set in the main init.\n\n Parameters\n ----------\n name : string\n name of the seasonality component.\n period : float\n number of days in one period.\n fourier_order : int\n number of Fourier components to use.\n\n \"\"\"\n if self.fitted:\n raise Exception(\"Seasonality must be added prior to model fitting.\")\n if name in [\"daily\", \"weekly\", \"yearly\"]:\n log.error(\"Please use inbuilt daily, weekly, or yearly seasonality or set another name.\")\n # Do not Allow overwriting built-in seasonalities\n self._validate_column_name(name, seasons=True)\n if fourier_order <= 0:\n raise ValueError(\"Fourier Order must be > 0\")\n self.season_config.append(name=name, period=period, resolution=fourier_order, arg=\"custom\")\n return self\n\n def fit(self, df, freq=\"auto\", validation_df=None, progress=\"bar\", minimal=False):\n \"\"\"Train, and potentially evaluate model.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n containing column ``ds``, ``y`` with all data\n freq : str\n Data step sizes. Frequency of data recording,\n\n Note\n ----\n Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.\n validation_df : pd.DataFrame, dict\n if provided, model with performance will be evaluated after each training epoch over this data.\n epochs : int\n number of epochs to train (overrides default setting).\n default: if not specified, uses self.epochs\n progress : str\n Method of progress display\n\n Options\n * (default) ``bar`` display updating progress bar (tqdm)\n * ``print`` print out progress (fallback option)\n * ``plot`` plot a live updating graph of the training loss, requires [live] install or livelossplot package installed.\n * ``plot-all`` extended to all recorded metrics.\n minimal : bool\n whether to train without any printouts or metrics collection\n\n Returns\n -------\n pd.DataFrame\n metrics with training and potentially evaluation metrics\n \"\"\"\n\n df_dict, _ = df_utils.prep_copy_df_dict(df)\n if self.fitted is True:\n log.error(\"Model has already been fitted. Re-fitting may break or produce different results.\")\n df_dict = self._check_dataframe(df_dict, check_y=True, exogenous=True)\n self.data_freq = df_utils.infer_frequency(df_dict, n_lags=self.n_lags, freq=freq)\n df_dict = self._handle_missing_data(df_dict, freq=self.data_freq)\n if validation_df is not None and (self.metrics is None or minimal):\n log.warning(\"Ignoring validation_df because no metrics set or minimal training set.\")\n validation_df = None\n if validation_df is None:\n if minimal:\n self._train_minimal(df_dict, progress_bar=progress == \"bar\")\n metrics_df = None\n else:\n metrics_df = self._train(df_dict, progress=progress)\n else:\n df_val_dict, _ = df_utils.prep_copy_df_dict(validation_df)\n df_val_dict = self._check_dataframe(df_val_dict, check_y=False, exogenous=False)\n df_val_dict = self._handle_missing_data(df_val_dict, freq=self.data_freq)\n metrics_df = self._train(df_dict, df_val_dict=df_val_dict, progress=progress)\n\n self.fitted = True\n return metrics_df\n\n def predict(self, df, decompose=True, raw=False):\n \"\"\"Runs the model to make predictions.\n\n Expects all data needed to be present in dataframe.\n If you are predicting into the unknown future and need to add future regressors or events,\n please prepare data with make_future_dataframe.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with data\n decompose : bool\n whether to add individual components of forecast to the dataframe\n raw : bool\n specifies raw data\n\n Options\n * (default) ``False``: returns forecasts sorted by target (highlighting forecast age)\n * ``True``: return the raw forecasts sorted by forecast start date\n\n Returns\n -------\n pd.DataFrame\n dependent on ``raw``\n\n Note\n ----\n\n ``raw == True``: columns ``ds``, ``y``, and [``step<i>``] where step<i> refers to the i-step-ahead\n prediction *made at* this row's datetime, e.g. step3 is the prediction for 3 steps into the future,\n predicted using information up to (excluding) this datetime.\n\n ``raw == False``: columns ``ds``, ``y``, ``trend`` and [``yhat<i>``] where yhat<i> refers to\n the i-step-ahead prediction for this row's datetime,\n e.g. yhat3 is the prediction for this datetime, predicted 3 steps ago, \"3 steps old\".\n \"\"\"\n if raw:\n log.warning(\"Raw forecasts are incompatible with plotting utilities\")\n if self.fitted is False:\n raise ValueError(\"Model has not been fitted. Predictions will be random.\")\n df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n # to get all forecasteable values with df given, maybe extend into future:\n df_dict, periods_added = self._maybe_extend_df(df_dict)\n df_dict = self._prepare_dataframe_to_predict(df_dict)\n # normalize\n df_dict = self._normalize(df_dict)\n for key, df_i in df_dict.items():\n dates, predicted, components = self._predict_raw(df_i, key, include_components=decompose)\n if raw:\n fcst = self._convert_raw_predictions_to_raw_df(dates, predicted, components)\n if periods_added[key] > 0:\n fcst = fcst[:-1]\n else:\n fcst = self._reshape_raw_predictions_to_forecst_df(df_i, predicted, components)\n if periods_added[key] > 0:\n fcst = fcst[: -periods_added[key]]\n df_dict[key] = fcst\n df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)\n return df\n\n def test(self, df):\n \"\"\"Evaluate model on holdout data.\n\n Parameters\n ----------\n df : pd.DataFrame,dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with with holdout data\n Returns\n -------\n pd.DataFrame\n evaluation metrics\n \"\"\"\n df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n if self.fitted is False:\n log.warning(\"Model has not been fitted. Test results will be random.\")\n df_dict = self._check_dataframe(df_dict, check_y=True, exogenous=True)\n _ = df_utils.infer_frequency(df_dict, n_lags=self.n_lags, freq=self.data_freq)\n df_dict = self._handle_missing_data(df_dict, freq=self.data_freq)\n loader = self._init_val_loader(df_dict)\n val_metrics_df = self._evaluate(loader)\n if not self.config_normalization.global_normalization:\n log.warning(\"Note that the metrics are displayed in normalized scale because of local normalization.\")\n return val_metrics_df\n\n def split_df(self, df, freq=\"auto\", valid_p=0.2, local_split=False):\n \"\"\"Splits timeseries df into train and validation sets.\n\n Prevents leakage of targets. Sharing/Overbleed of inputs can be configured.\n Also performs basic data checks and fills in missing data.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n freq : str\n data step sizes. Frequency of data recording,\n\n Note\n ----\n Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.\n valid_p : float\n fraction of data to use for holdout validation set, targets will still never be shared.\n local_split : bool\n Each dataframe will be split according to valid_p locally (in case of dict of dataframes\n\n Returns\n -------\n tuple of two pd.DataFrames\n\n training data\n\n validation data\n\n See Also\n --------\n crossvalidation_split_df : Splits timeseries data in k folds for crossvalidation.\n double_crossvalidation_split_df : Splits timeseries data in two sets of k folds for crossvalidation on training and testing data.\n\n Examples\n --------\n >>> df1 = pd.DataFrame({'ds': pd.date_range(start='2022-12-01', periods=5,\n ... freq='D'), 'y': [9.59, 8.52, 8.18, 8.07, 7.89]})\n >>> df2 = pd.DataFrame({'ds': pd.date_range(start='2022-12-09', periods=5,\n ... freq='D'), 'y': [8.71, 8.09, 7.84, 7.65, 8.02]})\n >>> df3 = pd.DataFrame({'ds': pd.date_range(start='2022-12-09', periods=5,\n ... freq='D'), 'y': [7.67, 7.64, 7.55, 8.25, 8.3]})\n >>> df3\n ds\t y\n 0\t2022-12-09\t7.67\n 1\t2022-12-10\t7.64\n 2\t2022-12-11\t7.55\n 3\t2022-12-12\t8.25\n 4\t2022-12-13\t8.30\n\n One can define a dict with many time series.\n >>> df_dict = {'data1': df1, 'data2': df2, 'data3': df3}\n\n You can split a single dataframe.\n >>> (df_train, df_val) = m.split_df(df3, valid_p=0.2)\n >>> df_train\n ds\t y\n 0\t2022-12-09\t7.67\n 1\t2022-12-10\t7.64\n 2\t2022-12-11\t7.55\n 3\t2022-12-12\t8.25\n >>> df_val\n ds\t y\n 0\t2022-12-13\t8.3\n\n You can also use a dict of dataframes (especially useful for global modeling), which will account for the time range of the whole group of time series as default.\n >>> (df_dict_train, df_dict_val) = m.split_df(df_dict, valid_p=0.2)\n >>> df_dict_train\n {'data1': ds y\n 0 2022-12-01 9.59\n 1 2022-12-02 8.52\n 2 2022-12-03 8.18\n 3 2022-12-04 8.07\n 4 2022-12-05 7.89,\n 'data2': ds y\n 0 2022-12-09 8.71\n 1 2022-12-10 8.09\n 2 2022-12-11 7.84,\n 'data3': ds y\n 0 2022-12-09 7.67\n 1 2022-12-10 7.64\n 2 2022-12-11 7.55}\n >>> df_dict_val\n {'data2': ds y\n 0 2022-12-12 7.65\n 1 2022-12-13 8.02,\n 'data3': ds y\n 0 2022-12-12 8.25\n 1 2022-12-13 8.30}\n\n In some applications, splitting locally each time series may be helpful. In this case, one should set `local_split` to True.\n >>> (df_dict_train, df_dict_val) = m.split_df(df_dict, valid_p=0.2,\n ... local_split=True)\n >>> df_dict_train\n {'data1': ds y\n 0 2022-12-01 9.59\n 1 2022-12-02 8.52\n 2 2022-12-03 8.18\n 3 2022-12-04 8.07,\n 'data2': ds y\n 0 2022-12-09 8.71\n 1 2022-12-10 8.09\n 2 2022-12-11 7.84\n 3 2022-12-12 7.65,\n 'data3': ds y\n 0 2022-12-09 7.67\n 1 2022-12-10 7.64\n 2 2022-12-11 7.55\n 3 2022-12-12 8.25}\n >>> df_dict_val\n {'data1': ds y\n 0 2022-12-05 7.89,\n 'data2': ds y\n 0 2022-12-13 8.02,\n 'data3': ds y\n 0 2022-12-13 8.3}\n \"\"\"\n df, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n df = self._check_dataframe(df, check_y=False, exogenous=False)\n freq = df_utils.infer_frequency(df, n_lags=self.n_lags, freq=freq)\n df = self._handle_missing_data(df, freq=freq, predicting=False)\n df_train, df_val = df_utils.split_df(\n df,\n n_lags=self.n_lags,\n n_forecasts=self.n_forecasts,\n valid_p=valid_p,\n inputs_overbleed=True,\n local_split=local_split,\n )\n df_train = df_utils.maybe_get_single_df_from_df_dict(df_train, received_unnamed_df)\n df_val = df_utils.maybe_get_single_df_from_df_dict(df_val, received_unnamed_df)\n return df_train, df_val\n\n def crossvalidation_split_df(self, df, freq=\"auto\", k=5, fold_pct=0.1, fold_overlap_pct=0.5):\n \"\"\"Splits timeseries data in k folds for crossvalidation.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n freq : str\n data step sizes. Frequency of data recording,\n\n Note\n ----\n Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.\n k : int\n number of CV folds\n fold_pct : float\n percentage of overall samples to be in each fold\n fold_overlap_pct : float\n percentage of overlap between the validation folds.\n\n Returns\n -------\n list of k tuples [(df_train, df_val), ...]\n\n training data\n\n validation data\n \"\"\"\n if isinstance(df, dict):\n raise NotImplementedError(\"Crossvalidation not implemented for multiple dataframes\")\n df = df.copy(deep=True)\n df = self._check_dataframe(df, check_y=False, exogenous=False)\n freq = df_utils.infer_frequency(df, n_lags=self.n_lags, freq=freq)\n df = self._handle_missing_data(df, freq=freq, predicting=False)\n folds = df_utils.crossvalidation_split_df(\n df,\n n_lags=self.n_lags,\n n_forecasts=self.n_forecasts,\n k=k,\n fold_pct=fold_pct,\n fold_overlap_pct=fold_overlap_pct,\n )\n return folds\n\n def double_crossvalidation_split_df(self, df, freq=\"auto\", k=5, valid_pct=0.10, test_pct=0.10):\n \"\"\"Splits timeseries data in two sets of k folds for crossvalidation on training and testing data.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n freq : str\n data step sizes. Frequency of data recording,\n\n Note\n ----\n Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.\n k : int\n number of CV folds\n valid_pct : float\n percentage of overall samples to be in validation\n test_pct : float\n percentage of overall samples to be in test\n\n Returns\n -------\n tuple of k tuples [(folds_val, folds_test), …]\n elements same as :meth:`crossvalidation_split_df` returns\n \"\"\"\n if isinstance(df, dict):\n raise NotImplementedError(\"Double crossvalidation not implemented for multiple dataframes\")\n df = df.copy(deep=True)\n df = self._check_dataframe(df, check_y=False, exogenous=False)\n freq = df_utils.infer_frequency(df, n_lags=self.n_lags, freq=freq)\n df = self._handle_missing_data(df, freq=freq, predicting=False)\n folds_val, folds_test = df_utils.double_crossvalidation_split_df(\n df,\n n_lags=self.n_lags,\n n_forecasts=self.n_forecasts,\n k=k,\n valid_pct=valid_pct,\n test_pct=test_pct,\n )\n\n return folds_val, folds_test\n\n def create_df_with_events(self, df, events_df):\n \"\"\"\n Create a concatenated dataframe with the time series data along with the events data expanded.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n events_df : dict, pd.DataFrame\n containing column ``ds`` and ``event``\n\n Returns\n -------\n dict, pd.DataFrame\n columns ``y``, ``ds`` and other user specified events\n \"\"\"\n if self.events_config is None:\n raise Exception(\n \"The events configs should be added to the NeuralProphet object (add_events fn)\"\n \"before creating the data with events features\"\n )\n df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n df_dict = self._check_dataframe(df_dict, check_y=True, exogenous=False)\n if isinstance(events_df, pd.DataFrame):\n events_df_i = events_df.copy(deep=True)\n for df_name, df_i in df_dict.items():\n if isinstance(events_df, dict):\n events_df_i = events_df[df_name].copy(deep=True)\n for name in events_df_i[\"event\"].unique():\n assert name in self.events_config\n df_out = df_utils.convert_events_to_features(\n df_i,\n events_config=self.events_config,\n events_df=events_df_i,\n )\n df_dict[df_name] = df_out.reset_index(drop=True)\n df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)\n return df\n\n def make_future_dataframe(self, df, events_df=None, regressors_df=None, periods=None, n_historic_predictions=False):\n df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n df_dict_events, received_unnamed_events_df = df_utils.prep_copy_df_dict(events_df)\n df_dict_regressors, received_unnamed_regressors_df = df_utils.prep_copy_df_dict(regressors_df)\n if received_unnamed_events_df:\n df_dict_events = {key: df_dict_events[\"__df__\"] for key in df_dict.keys()}\n elif df_dict_events is None:\n df_dict_events = {key: None for key in df_dict.keys()}\n else:\n df_utils.compare_dict_keys(df_dict, df_dict_events, \"dataframes\", \"events\")\n if received_unnamed_regressors_df:\n df_dict_regressors = {key: df_dict_regressors[\"__df__\"] for key in df_dict.keys()}\n elif df_dict_regressors is None:\n df_dict_regressors = {key: None for key in df_dict.keys()}\n else:\n df_utils.compare_dict_keys(df_dict, df_dict_regressors, \"dataframes\", \"regressors\")\n\n df_future_dataframe = {}\n for key in df_dict.keys():\n df_future_dataframe[key] = self._make_future_dataframe(\n df=df_dict[key],\n events_df=df_dict_events[key],\n regressors_df=df_dict_regressors[key],\n periods=periods,\n n_historic_predictions=n_historic_predictions,\n )\n df_future = df_utils.maybe_get_single_df_from_df_dict(df_future_dataframe, received_unnamed_df)\n return df_future\n\n def predict_trend(self, df):\n \"\"\"Predict only trend component of the model.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n\n Returns\n -------\n pd.DataFrame, dict\n trend on prediction dates.\n \"\"\"\n df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n df_dict = self._check_dataframe(df_dict, check_y=False, exogenous=False)\n df_dict = self._normalize(df_dict)\n for df_name, df in df_dict.items():\n t = torch.from_numpy(np.expand_dims(df[\"t\"].values, 1))\n trend = self.model.trend(t).squeeze().detach().numpy()\n data_params = self.config_normalization.get_data_params(df_name)\n trend = trend * data_params[\"y\"].scale + data_params[\"y\"].shift\n df_dict[df_name] = pd.DataFrame({\"ds\": df[\"ds\"], \"trend\": trend})\n df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)\n return df\n\n def predict_seasonal_components(self, df):\n \"\"\"Predict seasonality components\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing columns ``ds``, ``y`` with all data\n\n Returns\n -------\n pd.DataFrame, dict\n seasonal components with columns of name <seasonality component name>\n \"\"\"\n df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n df_dict = self._check_dataframe(df_dict, check_y=False, exogenous=False)\n df_dict = self._normalize(df_dict)\n for df_name, df in df_dict.items():\n dataset = time_dataset.TimeDataset(\n df,\n name=df_name,\n season_config=self.season_config,\n # n_lags=0,\n # n_forecasts=1,\n predict_mode=True,\n )\n loader = DataLoader(dataset, batch_size=min(4096, len(df)), shuffle=False, drop_last=False)\n predicted = {}\n for name in self.season_config.periods:\n predicted[name] = list()\n for inputs, _, _ in loader:\n for name in self.season_config.periods:\n features = inputs[\"seasonalities\"][name]\n y_season = torch.squeeze(self.model.seasonality(features=features, name=name))\n predicted[name].append(y_season.data.numpy())\n\n for name in self.season_config.periods:\n predicted[name] = np.concatenate(predicted[name])\n if self.season_config.mode == \"additive\":\n data_params = self.config_normalization.get_data_params(df_name)\n predicted[name] = predicted[name] * data_params[\"y\"].scale\n df_dict[df_name] = pd.DataFrame({\"ds\": df[\"ds\"], **predicted})\n df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)\n return df\n\n def set_true_ar_for_eval(self, true_ar_weights):\n \"\"\"Configures model to evaluate closeness of AR weights to true weights.\n\n Parameters\n ----------\n true_ar_weights : np.array\n true AR-parameters, if known.\n \"\"\"\n self.true_ar_weights = true_ar_weights\n\n def highlight_nth_step_ahead_of_each_forecast(self, step_number=None):\n \"\"\"Set which forecast step to focus on for metrics evaluation and plotting.\n\n Parameters\n ----------\n step_number : int\n i-th step ahead forecast to use for statistics and plotting.\n \"\"\"\n if step_number is not None:\n assert step_number <= self.n_forecasts\n self.highlight_forecast_step_n = step_number\n return self\n\n def plot(self, fcst, ax=None, xlabel=\"ds\", ylabel=\"y\", figsize=(10, 6)):\n \"\"\"Plot the NeuralProphet forecast, including history.\n\n Parameters\n ----------\n fcst : pd.DataFrame\n output of self.predict.\n ax : matplotlib axes\n optional, matplotlib axes on which to plot.\n xlabel : string\n label name on X-axis\n ylabel : string\n label name on Y-axis\n figsize : tuple\n width, height in inches. default: (10, 6)\n \"\"\"\n if isinstance(fcst, dict):\n log.error(\"Receiced more than one DataFrame. Use a for loop for many dataframes.\")\n if self.n_lags > 0:\n num_forecasts = sum(fcst[\"yhat1\"].notna())\n if num_forecasts < self.n_forecasts:\n log.warning(\n \"Too few forecasts to plot a line per forecast step.\" \"Plotting a line per forecast origin instead.\"\n )\n return self.plot_last_forecast(\n fcst,\n ax=ax,\n xlabel=xlabel,\n ylabel=ylabel,\n figsize=figsize,\n include_previous_forecasts=num_forecasts - 1,\n plot_history_data=True,\n )\n return plot(\n fcst=fcst,\n ax=ax,\n xlabel=xlabel,\n ylabel=ylabel,\n figsize=figsize,\n highlight_forecast=self.highlight_forecast_step_n,\n )\n\n def plot_last_forecast(\n self,\n fcst,\n ax=None,\n xlabel=\"ds\",\n ylabel=\"y\",\n figsize=(10, 6),\n include_previous_forecasts=0,\n plot_history_data=None,\n ):\n \"\"\"Plot the NeuralProphet forecast, including history.\n\n Parameters\n ----------\n fcst : pd.DataFrame\n output of self.predict.\n ax : matplotlib axes\n Optional, matplotlib axes on which to plot.\n xlabel : str\n label name on X-axis\n ylabel : str\n abel name on Y-axis\n figsize : tuple\n width, height in inches. default: (10, 6)\n include_previous_forecasts : int\n number of previous forecasts to include in plot\n plot_history_data : bool\n specifies plot of historical data\n Returns\n -------\n matplotlib.axes.Axes\n plot of NeuralProphet forecasting\n \"\"\"\n if self.n_lags == 0:\n raise ValueError(\"Use the standard plot function for models without lags.\")\n if isinstance(fcst, dict):\n log.error(\"Receiced more than one DataFrame. Use a for loop for many dataframes.\")\n if plot_history_data is None:\n fcst = fcst[-(include_previous_forecasts + self.n_forecasts + self.n_lags) :]\n elif plot_history_data is False:\n fcst = fcst[-(include_previous_forecasts + self.n_forecasts) :]\n elif plot_history_data is True:\n fcst = fcst\n fcst = utils.fcst_df_to_last_forecast(fcst, n_last=1 + include_previous_forecasts)\n return plot(\n fcst=fcst,\n ax=ax,\n xlabel=xlabel,\n ylabel=ylabel,\n figsize=figsize,\n highlight_forecast=self.highlight_forecast_step_n,\n line_per_origin=True,\n )\n\n def plot_components(self, fcst, figsize=None, residuals=False):\n \"\"\"Plot the NeuralProphet forecast components.\n\n Parameters\n ----------\n fcst : pd.DataFrame\n output of self.predict\n figsize : tuple\n width, height in inches.\n\n Note\n ----\n None (default): automatic (10, 3 * npanel)\n\n Returns\n -------\n matplotlib.axes.Axes\n plot of NeuralProphet components\n \"\"\"\n if isinstance(fcst, dict):\n log.error(\"Receiced more than one DataFrame. Use a for loop for many dataframes.\")\n return plot_components(\n m=self,\n fcst=fcst,\n figsize=figsize,\n forecast_in_focus=self.highlight_forecast_step_n,\n residuals=residuals,\n )\n\n def plot_parameters(self, weekly_start=0, yearly_start=0, figsize=None, df_name=None):\n \"\"\"Plot the NeuralProphet forecast components.\n\n Parameters\n ----------\n weekly_start : int\n specifying the start day of the weekly seasonality plot.\n\n Note\n ----\n 0 (default) starts the week on Sunday. 1 shifts by 1 day to Monday, and so on.\n yearly_start : int\n specifying the start day of the yearly seasonality plot.\n\n Note\n ----\n 0 (default) starts the year on Jan 1. 1 shifts by 1 day to Jan 2, and so on.\n df_name : str\n name of dataframe to refer to data params from original keys of train dataframes (used for local normalization in global modeling)\n figsize : tuple\n width, height in inches.\n\n Note\n ----\n None (default): automatic (10, 3 * npanel)\n\n Returns\n -------\n matplotlib.axes.Axes\n plot of NeuralProphet forecasting\n \"\"\"\n return plot_parameters(\n m=self,\n forecast_in_focus=self.highlight_forecast_step_n,\n weekly_start=weekly_start,\n yearly_start=yearly_start,\n figsize=figsize,\n df_name=df_name,\n )\n\n def _init_model(self):\n \"\"\"Build Pytorch model with configured hyperparamters.\n\n Returns\n -------\n TimeNet model\n \"\"\"\n self.model = time_net.TimeNet(\n config_trend=self.config_trend,\n config_season=self.season_config,\n config_covar=self.config_covar,\n config_regressors=self.regressors_config,\n config_events=self.events_config,\n config_holidays=self.country_holidays_config,\n n_forecasts=self.n_forecasts,\n n_lags=self.n_lags,\n num_hidden_layers=self.config_model.num_hidden_layers,\n d_hidden=self.config_model.d_hidden,\n )\n log.debug(self.model)\n return self.model\n\n def _create_dataset(self, df_dict, predict_mode):\n \"\"\"Construct dataset from dataframe.\n\n (Configured Hyperparameters can be overridden by explicitly supplying them.\n Useful to predict a single model component.)\n\n Parameters\n ----------\n df_dict : dict\n containing pd.DataFrames of original and normalized columns ``ds``, ``y``, ``t``, ``y_scaled``\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` and\n normalized columns normalized columns ``ds``, ``y``, ``t``, ``y_scaled``\n predict_mode : bool\n specifies predict mode\n\n Options\n * ``False``: includes target values.\n * ``True``: does not include targets but includes entire dataset as input\n\n Returns\n -------\n TimeDataset\n \"\"\"\n return time_dataset.GlobalTimeDataset(\n df_dict,\n predict_mode=predict_mode,\n n_lags=self.n_lags,\n n_forecasts=self.n_forecasts,\n season_config=self.season_config,\n events_config=self.events_config,\n country_holidays_config=self.country_holidays_config,\n covar_config=self.config_covar,\n regressors_config=self.regressors_config,\n )\n\n def __handle_missing_data(self, df, freq, predicting):\n \"\"\"Checks, auto-imputes and normalizes new data\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n freq : str\n data step sizes. Frequency of data recording,\n\n Note\n ----\n Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.\n predicting : bool\n when no lags, allow NA values in ``y`` of forecast series or ``y`` to miss completely\n\n Returns\n -------\n pd.DataFrame\n preprocessed dataframe\n \"\"\"\n if self.n_lags == 0 and not predicting:\n # we can drop rows with NA in y\n sum_na = sum(df[\"y\"].isna())\n if sum_na > 0:\n df = df[df[\"y\"].notna()]\n log.info(\"dropped {} NAN row in 'y'\".format(sum_na))\n\n # add missing dates for autoregression modelling\n if self.n_lags > 0:\n df, missing_dates = df_utils.add_missing_dates_nan(df, freq=freq)\n if missing_dates > 0:\n if self.impute_missing:\n log.info(\"{} missing dates added.\".format(missing_dates))\n else:\n raise ValueError(\n \"{} missing dates found. Please preprocess data manually or set impute_missing to True.\".format(\n missing_dates\n )\n )\n\n if self.regressors_config is not None:\n # if future regressors, check that they are not nan at end, else drop\n # we ignore missing events, as those will be filled in with zeros.\n reg_nan_at_end = 0\n for col in self.regressors_config.keys():\n col_nan_at_end = 0\n while len(df) > col_nan_at_end and df[col].isnull().iloc[-(1 + col_nan_at_end)]:\n col_nan_at_end += 1\n reg_nan_at_end = max(reg_nan_at_end, col_nan_at_end)\n if reg_nan_at_end > 0:\n # drop rows at end due to missing future regressors\n df = df[:-reg_nan_at_end]\n log.info(\"Dropped {} rows at end due to missing future regressor values.\".format(reg_nan_at_end))\n\n df_end_to_append = None\n nan_at_end = 0\n while len(df) > nan_at_end and df[\"y\"].isnull().iloc[-(1 + nan_at_end)]:\n nan_at_end += 1\n if nan_at_end > 0:\n if predicting:\n # allow nans at end - will re-add at end\n if self.n_forecasts > 1 and self.n_forecasts < nan_at_end:\n # check that not more than n_forecasts nans, else drop surplus\n df = df[: -(nan_at_end - self.n_forecasts)]\n # correct new length:\n nan_at_end = self.n_forecasts\n log.info(\n \"Detected y to have more NaN values than n_forecast can predict. \"\n \"Dropped {} rows at end.\".format(nan_at_end - self.n_forecasts)\n )\n df_end_to_append = df[-nan_at_end:]\n df = df[:-nan_at_end]\n else:\n # training - drop nans at end\n df = df[:-nan_at_end]\n log.info(\n \"Dropped {} consecutive nans at end. \"\n \"Training data can only be imputed up to last observation.\".format(nan_at_end)\n )\n\n # impute missing values\n data_columns = []\n if self.n_lags > 0:\n data_columns.append(\"y\")\n if self.config_covar is not None:\n data_columns.extend(self.config_covar.keys())\n if self.regressors_config is not None:\n data_columns.extend(self.regressors_config.keys())\n if self.events_config is not None:\n data_columns.extend(self.events_config.keys())\n for column in data_columns:\n sum_na = sum(df[column].isnull())\n if sum_na > 0:\n if self.impute_missing:\n # use 0 substitution for holidays and events missing values\n if self.events_config is not None and column in self.events_config.keys():\n df[column].fillna(0, inplace=True)\n remaining_na = 0\n else:\n df.loc[:, column], remaining_na = df_utils.fill_linear_then_rolling_avg(\n df[column],\n limit_linear=self.impute_limit_linear,\n rolling=self.impute_rolling,\n )\n log.info(\"{} NaN values in column {} were auto-imputed.\".format(sum_na - remaining_na, column))\n if remaining_na > 0:\n raise ValueError(\n \"More than {} consecutive missing values encountered in column {}. \"\n \"{} NA remain. Please preprocess data manually.\".format(\n 2 * self.impute_limit_linear + self.impute_rolling, column, remaining_na\n )\n )\n else: # fail because set to not impute missing\n raise ValueError(\n \"Missing values found. \" \"Please preprocess data manually or set impute_missing to True.\"\n )\n if df_end_to_append is not None:\n df = df.append(df_end_to_append)\n return df\n\n def _handle_missing_data(self, df, freq, predicting=False):\n \"\"\"Checks, auto-imputes and normalizes new data\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n freq : str\n data step sizes. Frequency of data recording,\n\n Note\n ----\n Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.\n predicting (bool): when no lags, allow NA values in ``y`` of forecast series or ``y`` to miss completely\n\n Returns\n -------\n pre-processed df\n \"\"\"\n df_is_dict = True\n if isinstance(df, pd.DataFrame):\n df_is_dict = False\n df = {\"__df__\": df}\n elif not isinstance(df, dict):\n raise ValueError(\"Please insert valid df type (i.e. pd.DataFrame, dict)\")\n df_handled_missing_dict = {}\n for key in df:\n df_handled_missing_dict[key] = self.__handle_missing_data(df[key], freq, predicting)\n if not df_is_dict:\n df_handled_missing_dict = df_handled_missing_dict[\"__df__\"]\n return df_handled_missing_dict\n\n def _check_dataframe(self, df, check_y=True, exogenous=True):\n \"\"\"Performs basic data sanity checks and ordering\n\n Prepare dataframe for fitting or predicting.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n check_y : bool\n if df must have series values\n\n Note\n ----\n set to True if training or predicting with autoregression\n exogenous : bool\n whether to check covariates, regressors and events column names\n\n Returns\n -------\n pd.DataFrame\n checked dataframe\n \"\"\"\n df_is_dict = True\n if isinstance(df, pd.DataFrame):\n df_is_dict = False\n df = {\"__df__\": df}\n elif not isinstance(df, dict):\n raise ValueError(\"Please insert valid df type (i.e. pd.DataFrame, dict)\")\n checked_df = {}\n for key, df_i in df.items():\n checked_df[key] = df_utils.check_single_dataframe(\n df=df_i,\n check_y=check_y,\n covariates=self.config_covar if exogenous else None,\n regressors=self.regressors_config if exogenous else None,\n events=self.events_config if exogenous else None,\n )\n if not df_is_dict:\n checked_df = checked_df[\"__df__\"]\n return checked_df\n\n def _validate_column_name(self, name, events=True, seasons=True, regressors=True, covariates=True):\n \"\"\"Validates the name of a seasonality, event, or regressor.\n\n Parameters\n ----------\n name : str\n name of seasonality, event or regressor\n events : bool\n check if name already used for event\n seasons : bool\n check if name already used for seasonality\n regressors : bool\n check if name already used for regressor\n \"\"\"\n reserved_names = [\n \"trend\",\n \"additive_terms\",\n \"daily\",\n \"weekly\",\n \"yearly\",\n \"events\",\n \"holidays\",\n \"zeros\",\n \"extra_regressors_additive\",\n \"yhat\",\n \"extra_regressors_multiplicative\",\n \"multiplicative_terms\",\n ]\n rn_l = [n + \"_lower\" for n in reserved_names]\n rn_u = [n + \"_upper\" for n in reserved_names]\n reserved_names.extend(rn_l)\n reserved_names.extend(rn_u)\n reserved_names.extend([\"ds\", \"y\", \"cap\", \"floor\", \"y_scaled\", \"cap_scaled\"])\n if name in reserved_names:\n raise ValueError(\"Name {name!r} is reserved.\".format(name=name))\n if events and self.events_config is not None:\n if name in self.events_config.keys():\n raise ValueError(\"Name {name!r} already used for an event.\".format(name=name))\n if events and self.country_holidays_config is not None:\n if name in self.country_holidays_config.holiday_names:\n raise ValueError(\n \"Name {name!r} is a holiday name in {country_holidays}.\".format(\n name=name, country_holidays=self.country_holidays_config.country\n )\n )\n if seasons and self.season_config is not None:\n if name in self.season_config.periods:\n raise ValueError(\"Name {name!r} already used for a seasonality.\".format(name=name))\n if covariates and self.config_covar is not None:\n if name in self.config_covar:\n raise ValueError(\"Name {name!r} already used for an added covariate.\".format(name=name))\n if regressors and self.regressors_config is not None:\n if name in self.regressors_config.keys():\n raise ValueError(\"Name {name!r} already used for an added regressor.\".format(name=name))\n\n def _normalize(self, df_dict):\n \"\"\"Apply data scales.\n\n Applies data scaling factors to df using data_params.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n\n Returns\n -------\n df_dict: dict of pd.DataFrame, normalized\n \"\"\"\n for df_name, df_i in df_dict.items():\n data_params = self.config_normalization.get_data_params(df_name)\n df_dict[df_name] = df_utils.normalize(df_i, data_params)\n return df_dict\n\n def _init_train_loader(self, df_dict):\n \"\"\"Executes data preparation steps and initiates training procedure.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n\n Returns\n -------\n torch DataLoader\n \"\"\"\n if not isinstance(df_dict, dict):\n raise ValueError(\"df_dict must be a dict of pd.DataFrames.\")\n # if not self.fitted:\n self.config_normalization.init_data_params(\n df_dict=df_dict,\n covariates_config=self.config_covar,\n regressor_config=self.regressors_config,\n events_config=self.events_config,\n )\n\n df_dict = self._normalize(df_dict)\n # if not self.fitted:\n if self.config_trend.changepoints is not None:\n # scale user-specified changepoint times\n self.config_trend.changepoints = self._normalize(\n {\"__df__\": pd.DataFrame({\"ds\": pd.Series(self.config_trend.changepoints)})}\n )[\"__df__\"][\"t\"].values\n\n df_merged, _ = df_utils.join_dataframes(df_dict)\n df_merged = df_merged.sort_values(\"ds\")\n df_merged.drop_duplicates(inplace=True, keep=\"first\", subset=[\"ds\"])\n\n self.season_config = utils.set_auto_seasonalities(df_merged, season_config=self.season_config)\n if self.country_holidays_config is not None:\n self.country_holidays_config.init_holidays(df_merged)\n\n dataset = self._create_dataset(df_dict, predict_mode=False) # needs to be called after set_auto_seasonalities\n self.config_train.set_auto_batch_epoch(n_data=len(dataset))\n\n loader = DataLoader(dataset, batch_size=self.config_train.batch_size, shuffle=True)\n\n # if not self.fitted:\n self.model = self._init_model() # needs to be called after set_auto_seasonalities\n\n if self.config_train.learning_rate is None:\n self.config_train.learning_rate = self.config_train.find_learning_rate(self.model, dataset)\n log.info(\"lr-range-test selected learning rate: {:.2E}\".format(self.config_train.learning_rate))\n self.optimizer = self.config_train.get_optimizer(self.model.parameters())\n self.scheduler = self.config_train.get_scheduler(self.optimizer, steps_per_epoch=len(loader))\n return loader\n\n def _init_val_loader(self, df_dict):\n \"\"\"Executes data preparation steps and initiates evaluation procedure.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n\n Returns\n -------\n torch DataLoader\n \"\"\"\n df_dict = self._normalize(df_dict)\n dataset = self._create_dataset(df_dict, predict_mode=False)\n loader = DataLoader(dataset, batch_size=min(1024, len(dataset)), shuffle=False, drop_last=False)\n return loader\n\n def _get_time_based_sample_weight(self, t):\n weight = torch.ones_like(t)\n if self.config_train.newer_samples_weight > 1.0:\n end_w = self.config_train.newer_samples_weight\n start_t = self.config_train.newer_samples_start\n time = (t.detach() - start_t) / (1.0 - start_t)\n time = torch.maximum(torch.zeros_like(time), time)\n time = torch.minimum(torch.ones_like(time), time) # time = 0 to 1\n time = np.pi * (time - 1.0) # time = -pi to 0\n time = 0.5 * torch.cos(time) + 0.5 # time = 0 to 1\n # scales end to be end weight times bigger than start weight\n # with end weight being 1.0\n weight = (1.0 + time * (end_w - 1.0)) / end_w\n return weight\n\n def _train_epoch(self, e, loader):\n \"\"\"Make one complete iteration over all samples in dataloader and update model after each batch.\n\n Parameters\n ----------\n e : int\n current epoch number\n loader : torch DataLoader\n Training Dataloader\n \"\"\"\n self.model.train()\n for i, (inputs, targets, meta) in enumerate(loader):\n # Run forward calculation\n predicted = self.model.forward(inputs)\n # Compute loss. no reduction.\n loss = self.config_train.loss_func(predicted, targets)\n # Weigh newer samples more.\n loss = loss * self._get_time_based_sample_weight(t=inputs[\"time\"])\n loss = loss.mean()\n # Regularize.\n loss, reg_loss = self._add_batch_regualarizations(loss, e, i / float(len(loader)))\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.scheduler.step()\n if self.metrics is not None:\n self.metrics.update(\n predicted=predicted.detach(), target=targets.detach(), values={\"Loss\": loss, \"RegLoss\": reg_loss}\n )\n if self.metrics is not None:\n return self.metrics.compute(save=True)\n else:\n return None\n\n def _add_batch_regualarizations(self, loss, e, iter_progress):\n \"\"\"Add regulatization terms to loss, if applicable\n\n Parameters\n ----------\n loss : torch.Tensor, scalar\n current batch loss\n e : int\n current epoch number\n iter_progress : float\n this epoch's progress of iterating over dataset [0, 1]\n\n Returns\n -------\n loss, reg_loss\n \"\"\"\n delay_weight = self.config_train.get_reg_delay_weight(e, iter_progress)\n\n reg_loss = torch.zeros(1, dtype=torch.float, requires_grad=False)\n if delay_weight > 0:\n # Add regularization of AR weights - sparsify\n if self.model.n_lags > 0 and self.config_ar.reg_lambda is not None:\n reg_ar = self.config_ar.regularize(self.model.ar_weights)\n reg_ar = torch.sum(reg_ar).squeeze() / self.n_forecasts\n reg_loss += self.config_ar.reg_lambda * reg_ar\n\n # Regularize trend to be smoother/sparse\n l_trend = self.config_trend.trend_reg\n if self.config_trend.n_changepoints > 0 and l_trend is not None and l_trend > 0:\n reg_trend = utils.reg_func_trend(\n weights=self.model.get_trend_deltas,\n threshold=self.config_train.trend_reg_threshold,\n )\n reg_loss += l_trend * reg_trend\n\n # Regularize seasonality: sparsify fourier term coefficients\n l_season = self.config_train.reg_lambda_season\n if self.model.season_dims is not None and l_season is not None and l_season > 0:\n for name in self.model.season_params.keys():\n reg_season = utils.reg_func_season(self.model.season_params[name])\n reg_loss += l_season * reg_season\n\n # Regularize events: sparsify events features coefficients\n if self.events_config is not None or self.country_holidays_config is not None:\n reg_events_loss = utils.reg_func_events(self.events_config, self.country_holidays_config, self.model)\n reg_loss += reg_events_loss\n\n # Regularize regressors: sparsify regressor features coefficients\n if self.regressors_config is not None:\n reg_regressor_loss = utils.reg_func_regressors(self.regressors_config, self.model)\n reg_loss += reg_regressor_loss\n\n reg_loss = delay_weight * reg_loss\n loss = loss + reg_loss\n return loss, reg_loss\n\n def _evaluate_epoch(self, loader, val_metrics):\n \"\"\"Evaluates model performance.\n\n Parameters\n ----------\n loader : torch DataLoader\n instantiated Validation Dataloader (with TimeDataset)\n val_metrics : MetricsCollection\n alidation metrics to be computed.\n\n Returns\n -------\n dict with evaluation metrics\n \"\"\"\n with torch.no_grad():\n self.model.eval()\n for inputs, targets, meta in loader:\n predicted = self.model.forward(inputs)\n val_metrics.update(predicted=predicted.detach(), target=targets.detach())\n val_metrics = val_metrics.compute(save=True)\n return val_metrics\n\n def _train(self, df_dict, df_val_dict=None, progress=\"bar\"):\n \"\"\"Execute model training procedure for a configured number of epochs.\n\n Parameters\n ----------\n df_dict : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n df_val_dict : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with validation data\n progress : str\n Method of progress display.\n\n Options\n * (default) ``bar`` display updating progress bar (tqdm)\n * ``print`` print out progress (fallback option)\n * ``plot`` plot a live updating graph of the training loss, requires [live] install or livelossplot package installed.\n * ``plot-all`` \"plot\" extended to all recorded metrics.\n\n Returns\n -------\n pd.DataFrame\n metrics\n \"\"\"\n # parse progress arg\n progress_bar = False\n progress_print = False\n plot_live_loss = False\n plot_live_all_metrics = False\n if progress.lower() == \"bar\":\n progress_bar = True\n elif progress.lower() == \"print\":\n progress_print = True\n elif progress.lower() == \"plot\":\n plot_live_loss = True\n elif progress.lower() in [\"plot-all\", \"plotall\", \"plot all\"]:\n plot_live_loss = True\n plot_live_all_metrics = True\n elif not progress.lower() == \"none\":\n raise ValueError(\"received unexpected value for progress {}\".format(progress))\n\n if self.metrics is None:\n log.info(\"No progress prints or plots possible because metrics are deactivated.\")\n if df_val_dict is not None:\n log.warning(\"Ignoring supplied df_val as no metrics are specified.\")\n if plot_live_loss or plot_live_all_metrics:\n log.warning(\"Can not plot live loss as no metrics are specified.\")\n progress_bar = True\n if progress_print:\n log.warning(\"Can not print progress as no metrics are specified.\")\n return self._train_minimal(df_dict, progress_bar=progress_bar)\n\n # set up data loader\n loader = self._init_train_loader(df_dict)\n # set up Metrics\n if self.highlight_forecast_step_n is not None:\n self.metrics.add_specific_target(target_pos=self.highlight_forecast_step_n - 1)\n if not self.config_normalization.global_normalization:\n log.warning(\"When Global modeling with local normalization, metrics are displayed in normalized scale.\")\n else:\n if not self.config_normalization.normalize == \"off\":\n self.metrics.set_shift_scale(\n (\n self.config_normalization.global_data_params[\"y\"].shift,\n self.config_normalization.global_data_params[\"y\"].scale,\n )\n )\n\n validate = df_val_dict is not None\n if validate:\n val_loader = self._init_val_loader(df_val_dict)\n val_metrics = metrics.MetricsCollection([m.new() for m in self.metrics.batch_metrics])\n\n # set up printing and plotting\n if plot_live_loss:\n try:\n from livelossplot import PlotLosses\n\n live_out = [\"MatplotlibPlot\"]\n if not progress_bar:\n live_out.append(\"ExtremaPrinter\")\n live_loss = PlotLosses(outputs=live_out)\n plot_live_loss = True\n except:\n log.warning(\n \"To plot live loss, please install neuralprophet[live].\"\n \"Using pip: 'pip install neuralprophet[live]'\"\n \"Or install the missing package manually: 'pip install livelossplot'\",\n exc_info=True,\n )\n plot_live_loss = False\n progress_bar = True\n if progress_bar:\n training_loop = tqdm(\n range(self.config_train.epochs),\n total=self.config_train.epochs,\n leave=log.getEffectiveLevel() <= 20,\n )\n else:\n training_loop = range(self.config_train.epochs)\n\n start = time.time()\n # run training loop\n for e in training_loop:\n metrics_live = OrderedDict({})\n self.metrics.reset()\n if validate:\n val_metrics.reset()\n # run epoch\n epoch_metrics = self._train_epoch(e, loader)\n # collect metrics\n if validate:\n val_epoch_metrics = self._evaluate_epoch(val_loader, val_metrics)\n print_val_epoch_metrics = {k + \"_val\": v for k, v in val_epoch_metrics.items()}\n else:\n val_epoch_metrics = None\n print_val_epoch_metrics = OrderedDict({})\n # print metrics\n if progress_bar:\n training_loop.set_description(f\"Epoch[{(e+1)}/{self.config_train.epochs}]\")\n training_loop.set_postfix(ordered_dict=epoch_metrics, **print_val_epoch_metrics)\n elif progress_print:\n metrics_string = utils.print_epoch_metrics(epoch_metrics, e=e, val_metrics=val_epoch_metrics)\n if e == 0:\n log.info(metrics_string.splitlines()[0])\n log.info(metrics_string.splitlines()[1])\n else:\n log.info(metrics_string.splitlines()[1])\n # plot metrics\n if plot_live_loss:\n metrics_train = list(epoch_metrics)\n metrics_live[\"log-{}\".format(metrics_train[0])] = np.log(epoch_metrics[metrics_train[0]])\n if plot_live_all_metrics and len(metrics_train) > 1:\n for i in range(1, len(metrics_train)):\n metrics_live[\"{}\".format(metrics_train[i])] = epoch_metrics[metrics_train[i]]\n if validate:\n metrics_val = list(val_epoch_metrics)\n metrics_live[\"val_log-{}\".format(metrics_val[0])] = np.log(val_epoch_metrics[metrics_val[0]])\n if plot_live_all_metrics and len(metrics_val) > 1:\n for i in range(1, len(metrics_val)):\n metrics_live[\"val_{}\".format(metrics_val[i])] = val_epoch_metrics[metrics_val[i]]\n live_loss.update(metrics_live)\n if e % (1 + self.config_train.epochs // 20) == 0 or e + 1 == self.config_train.epochs:\n live_loss.send()\n\n # return metrics as df\n log.debug(\"Train Time: {:8.3f}\".format(time.time() - start))\n log.debug(\"Total Batches: {}\".format(self.metrics.total_updates))\n metrics_df = self.metrics.get_stored_as_df()\n if validate:\n metrics_df_val = val_metrics.get_stored_as_df()\n for col in metrics_df_val.columns:\n metrics_df[\"{}_val\".format(col)] = metrics_df_val[col]\n return metrics_df\n\n def _train_minimal(self, df_dict, progress_bar=False):\n \"\"\"Execute minimal model training procedure for a configured number of epochs.\n\n Parameters\n ----------\n df_dict : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n\n Returns\n -------\n None\n \"\"\"\n loader = self._init_train_loader(df_dict)\n if progress_bar:\n training_loop = tqdm(\n range(self.config_train.epochs),\n total=self.config_train.epochs,\n leave=log.getEffectiveLevel() <= 20,\n )\n else:\n training_loop = range(self.config_train.epochs)\n for e in training_loop:\n if progress_bar:\n training_loop.set_description(f\"Epoch[{(e+1)}/{self.config_train.epochs}]\")\n _ = self._train_epoch(e, loader)\n\n def _eval_true_ar(self):\n assert self.n_lags > 0\n if self.highlight_forecast_step_n is None:\n if self.n_lags > 1:\n raise ValueError(\"Please define forecast_lag for sTPE computation\")\n forecast_pos = 1\n else:\n forecast_pos = self.highlight_forecast_step_n\n weights = self.model.ar_weights.detach().numpy()\n weights = weights[forecast_pos - 1, :][::-1]\n sTPE = utils.symmetric_total_percentage_error(self.true_ar_weights, weights)\n log.info(\"AR parameters: \", self.true_ar_weights, \"\\n\", \"Model weights: \", weights)\n return sTPE\n\n def _evaluate(self, loader):\n \"\"\"Evaluates model performance.\n\n Parameters\n ----------\n loader : torch DataLoader\n instantiated Validation Dataloader (with TimeDataset)\n\n Returns\n -------\n pd.DataFrame\n evaluation metrics\n \"\"\"\n val_metrics = metrics.MetricsCollection([m.new() for m in self.metrics.batch_metrics])\n if self.highlight_forecast_step_n is not None:\n val_metrics.add_specific_target(target_pos=self.highlight_forecast_step_n - 1)\n ## Run\n val_metrics_dict = self._evaluate_epoch(loader, val_metrics)\n\n if self.true_ar_weights is not None:\n val_metrics_dict[\"sTPE\"] = self._eval_true_ar()\n log.info(\"Validation metrics: {}\".format(utils.print_epoch_metrics(val_metrics_dict)))\n val_metrics_df = val_metrics.get_stored_as_df()\n return val_metrics_df\n\n def _make_future_dataframe(self, df, events_df, regressors_df, periods, n_historic_predictions):\n if periods == 0 and n_historic_predictions is True:\n log.warning(\n \"Not extending df into future as no periods specified.\" \"You can call predict directly instead.\"\n )\n df = df.copy(deep=True)\n _ = df_utils.infer_frequency(df, n_lags=self.n_lags, freq=self.data_freq)\n last_date = pd.to_datetime(df[\"ds\"].copy(deep=True).dropna()).sort_values().max()\n if events_df is not None:\n events_df = events_df.copy(deep=True).reset_index(drop=True)\n if regressors_df is not None:\n regressors_df = regressors_df.copy(deep=True).reset_index(drop=True)\n n_lags = 0 if self.n_lags is None else self.n_lags\n if periods is None:\n periods = 1 if n_lags == 0 else self.n_forecasts\n else:\n assert periods >= 0\n\n if isinstance(n_historic_predictions, bool):\n if n_historic_predictions:\n n_historic_predictions = len(df) - n_lags\n else:\n n_historic_predictions = 0\n elif not isinstance(n_historic_predictions, int):\n log.error(\"non-integer value for n_historic_predictions set to zero.\")\n n_historic_predictions = 0\n\n if periods == 0 and n_historic_predictions == 0:\n raise ValueError(\"Set either history or future to contain more than zero values.\")\n\n # check for external regressors known in future\n if self.regressors_config is not None and periods > 0:\n if regressors_df is None:\n raise ValueError(\"Future values of all user specified regressors not provided\")\n else:\n for regressor in self.regressors_config.keys():\n if regressor not in regressors_df.columns:\n raise ValueError(\"Future values of user specified regressor {} not provided\".format(regressor))\n\n if len(df) < n_lags:\n raise ValueError(\"Insufficient data for a prediction\")\n elif len(df) < n_lags + n_historic_predictions:\n log.warning(\n \"Insufficient data for {} historic forecasts, reduced to {}.\".format(\n n_historic_predictions, len(df) - n_lags\n )\n )\n n_historic_predictions = len(df) - n_lags\n if (n_historic_predictions + n_lags) == 0:\n df = pd.DataFrame(columns=df.columns)\n else:\n df = df[-(n_lags + n_historic_predictions) :]\n\n if len(df) > 0:\n if len(df.columns) == 1 and \"ds\" in df:\n assert n_lags == 0\n df = self._check_dataframe(df, check_y=False, exogenous=False)\n else:\n df = self._check_dataframe(df, check_y=n_lags > 0, exogenous=True)\n\n # future data\n # check for external events known in future\n if self.events_config is not None and periods > 0 and events_df is None:\n log.warning(\n \"Future values not supplied for user specified events. \"\n \"All events being treated as not occurring in future\"\n )\n\n if n_lags > 0:\n if periods > 0 and periods != self.n_forecasts:\n periods = self.n_forecasts\n log.warning(\n \"Number of forecast steps is defined by n_forecasts. \" \"Adjusted to {}.\".format(self.n_forecasts)\n )\n\n if periods > 0:\n future_df = df_utils.make_future_df(\n df_columns=df.columns,\n last_date=last_date,\n periods=periods,\n freq=self.data_freq,\n events_config=self.events_config,\n events_df=events_df,\n regressor_config=self.regressors_config,\n regressors_df=regressors_df,\n )\n if len(df) > 0:\n df = df.append(future_df)\n else:\n df = future_df\n df.reset_index(drop=True, inplace=True)\n return df\n\n def _get_maybe_extend_periods(self, df):\n n_lags = 0 if self.n_lags is None else self.n_lags\n periods_add = 0\n nan_at_end = 0\n while len(df) > nan_at_end and df[\"y\"].isnull().iloc[-(1 + nan_at_end)]:\n nan_at_end += 1\n if n_lags > 0:\n if self.regressors_config is None:\n # if dataframe has already been extended into future,\n # don't extend beyond n_forecasts.\n periods_add = max(0, self.n_forecasts - nan_at_end)\n else:\n # can not extend as we lack future regressor values.\n periods_add = 0\n return periods_add\n\n def _maybe_extend_df(self, df_dict):\n periods_add = {}\n for df_name, df in df_dict.items():\n _ = df_utils.infer_frequency(df, n_lags=self.n_lags, freq=self.data_freq)\n # to get all forecasteable values with df given, maybe extend into future:\n periods_add[df_name] = self._get_maybe_extend_periods(df)\n if periods_add[df_name] > 0:\n # This does not include future regressors or events.\n # periods should be 0 if those are configured.\n last_date = pd.to_datetime(df[\"ds\"].copy(deep=True)).sort_values().max()\n future_df = df_utils.make_future_df(\n df_columns=df.columns,\n last_date=last_date,\n periods=periods_add[df_name],\n freq=self.data_freq,\n )\n df = df.append(future_df)\n df.reset_index(drop=True, inplace=True)\n df_dict[df_name] = df\n return df_dict, periods_add\n\n def _prepare_dataframe_to_predict(self, df_dict):\n for df_name, df in df_dict.items():\n df = df.copy(deep=True)\n _ = df_utils.infer_frequency(df, n_lags=self.n_lags, freq=self.data_freq)\n # check if received pre-processed df\n if \"y_scaled\" in df.columns or \"t\" in df.columns:\n raise ValueError(\n \"DataFrame has already been normalized. \" \"Please provide raw dataframe or future dataframe.\"\n )\n # Checks\n n_lags = 0 if self.n_lags is None else self.n_lags\n if len(df) == 0 or len(df) < n_lags:\n raise ValueError(\"Insufficient data to make predictions.\")\n if len(df.columns) == 1 and \"ds\" in df:\n if n_lags != 0:\n raise ValueError(\"only datestamps provided but y values needed for auto-regression.\")\n df = self._check_dataframe(df, check_y=False, exogenous=False)\n else:\n df = self._check_dataframe(df, check_y=n_lags > 0, exogenous=False)\n # fill in missing nans except for nans at end\n df = self._handle_missing_data(df, freq=self.data_freq, predicting=True)\n df.reset_index(drop=True, inplace=True)\n df_dict[df_name] = df\n return df_dict\n\n def _predict_raw(self, df, df_name, include_components=False):\n \"\"\"Runs the model to make predictions.\n\n Predictions are returned in raw vector format without decomposition.\n Predictions are given on a forecast origin basis, not on a target basis.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n df_name : str\n name of the data params from which the current dataframe refers to (only in case of local_normalization)\n include_components : bool\n whether to return individual components of forecast\n\n Returns\n -------\n pd.Series\n timestamps referring to the start of the predictions.\n np.array\n array containing the forecasts\n dict[np.array]\n Dictionary of components containing an array of each components contribution to the forecast\n \"\"\"\n if isinstance(df, dict):\n raise ValueError(\"Receiced more than one DataFrame. Use a for loop for many dataframes.\")\n if \"y_scaled\" not in df.columns or \"t\" not in df.columns:\n raise ValueError(\"Received unprepared dataframe to predict. \" \"Please call predict_dataframe_to_predict.\")\n dataset = self._create_dataset(df_dict={df_name: df}, predict_mode=True)\n loader = DataLoader(dataset, batch_size=min(1024, len(df)), shuffle=False, drop_last=False)\n if self.n_forecasts > 1:\n dates = df[\"ds\"].iloc[self.n_lags : -self.n_forecasts + 1]\n else:\n dates = df[\"ds\"].iloc[self.n_lags :]\n predicted_vectors = list()\n component_vectors = None\n\n with torch.no_grad():\n self.model.eval()\n for inputs, _, _ in loader:\n predicted = self.model.forward(inputs)\n predicted_vectors.append(predicted.detach().numpy())\n\n if include_components:\n components = self.model.compute_components(inputs)\n if component_vectors is None:\n component_vectors = {name: [value.detach().numpy()] for name, value in components.items()}\n else:\n for name, value in components.items():\n component_vectors[name].append(value.detach().numpy())\n\n predicted = np.concatenate(predicted_vectors)\n data_params = self.config_normalization.get_data_params(df_name)\n scale_y, shift_y = data_params[\"y\"].scale, data_params[\"y\"].shift\n predicted = predicted * scale_y + shift_y\n\n if include_components:\n components = {name: np.concatenate(value) for name, value in component_vectors.items()}\n for name, value in components.items():\n if \"multiplicative\" in name:\n continue\n elif \"event_\" in name:\n event_name = name.split(\"_\")[1]\n if self.events_config is not None and event_name in self.events_config:\n if self.events_config[event_name].mode == \"multiplicative\":\n continue\n elif (\n self.country_holidays_config is not None\n and event_name in self.country_holidays_config.holiday_names\n ):\n if self.country_holidays_config.mode == \"multiplicative\":\n continue\n elif \"season\" in name and self.season_config.mode == \"multiplicative\":\n continue\n\n # scale additive components\n components[name] = value * scale_y\n if \"trend\" in name:\n components[name] += shift_y\n else:\n components = None\n return dates, predicted, components\n\n def _convert_raw_predictions_to_raw_df(self, dates, predicted, components=None):\n \"\"\"Turns forecast-origin-wise predictions into forecast-target-wise predictions.\n\n Parameters\n ----------\n dates : pd.Series\n timestamps referring to the start of the predictions.\n predicted : np.array\n Array containing the forecasts\n components : dict[np.array]\n Dictionary of components containing an array of each components' contribution to the forecast\n\n Returns\n -------\n pd. DataFrame\n columns ``ds``, ``y``, and [``step<i>``]\n\n Note\n ----\n where step<i> refers to the i-step-ahead prediction *made at* this row's datetime.\n e.g. the first forecast step0 is the prediction for this timestamp,\n the step1 is for the timestamp after, ...\n ... step3 is the prediction for 3 steps into the future,\n predicted using information up to (excluding) this datetime.\n \"\"\"\n if isinstance(dates, dict):\n raise ValueError(\"Receiced more than one DataFrame. Use a for loop for many dataframes.\")\n predicted_names = [\"step{}\".format(i) for i in range(self.n_forecasts)]\n all_data = predicted\n all_names = predicted_names\n if components is not None:\n for comp_name, comp_data in components.items():\n all_data = np.concatenate((all_data, comp_data), 1)\n all_names += [\"{}{}\".format(comp_name, i) for i in range(self.n_forecasts)]\n\n df_raw = pd.DataFrame(data=all_data, columns=all_names)\n df_raw.insert(0, \"ds\", dates.values)\n return df_raw\n\n def _reshape_raw_predictions_to_forecst_df(self, df, predicted, components): # DOES NOT ACCEPT DICT\n \"\"\"Turns forecast-origin-wise predictions into forecast-target-wise predictions.\n\n Parameters\n ----------\n df : pd.DataFrame\n input dataframe\n predicted : np.array\n Array containing the forecasts\n components : dict[np.array]\n Dictionary of components containing an array of each components' contribution to the forecast\n\n Returns\n -------\n pd.DataFrame\n columns ``ds``, ``y``, ``trend`` and [``yhat<i>``]\n\n Note\n ----\n where yhat<i> refers to the i-step-ahead prediction for this row's datetime.\n e.g. yhat3 is the prediction for this datetime, predicted 3 steps ago, \"3 steps old\".\n \"\"\"\n if isinstance(df, dict):\n raise ValueError(\"Receiced more than one DataFrame. Use a for loop for many dataframes.\")\n cols = [\"ds\", \"y\"] # cols to keep from df\n df_forecast = pd.concat((df[cols],), axis=1)\n\n # create a line for each forecast_lag\n # 'yhat<i>' is the forecast for 'y' at 'ds' from i steps ago.\n for forecast_lag in range(1, self.n_forecasts + 1):\n forecast = predicted[:, forecast_lag - 1]\n pad_before = self.n_lags + forecast_lag - 1\n pad_after = self.n_forecasts - forecast_lag\n yhat = np.concatenate(([None] * pad_before, forecast, [None] * pad_after))\n df_forecast[\"yhat{}\".format(forecast_lag)] = yhat\n df_forecast[\"residual{}\".format(forecast_lag)] = yhat - df_forecast[\"y\"]\n if components is None:\n return df_forecast\n\n # else add components\n lagged_components = [\n \"ar\",\n ]\n if self.config_covar is not None:\n for name in self.config_covar.keys():\n lagged_components.append(\"lagged_regressor_{}\".format(name))\n for comp in lagged_components:\n if comp in components:\n for forecast_lag in range(1, self.n_forecasts + 1):\n forecast = components[comp][:, forecast_lag - 1]\n pad_before = self.n_lags + forecast_lag - 1\n pad_after = self.n_forecasts - forecast_lag\n yhat = np.concatenate(([None] * pad_before, forecast, [None] * pad_after))\n df_forecast[\"{}{}\".format(comp, forecast_lag)] = yhat\n\n # only for non-lagged components\n for comp in components:\n if comp not in lagged_components:\n forecast_0 = components[comp][0, :]\n forecast_rest = components[comp][1:, self.n_forecasts - 1]\n yhat = np.concatenate(([None] * self.n_lags, forecast_0, forecast_rest))\n df_forecast[comp] = yhat\n return df_forecast\n" ]
[ [ "torch.zeros", "numpy.concatenate", "torch.cos", "numpy.log", "pandas.DataFrame", "torch.no_grad", "torch.utils.data.DataLoader", "torch.ones_like", "pandas.concat", "torch.zeros_like", "pandas.Series", "numpy.expand_dims", "torch.sum" ] ]
ramunter/dopamine_mirror
[ "711e188344b199abd925ecc3aa7c991332b3ee83" ]
[ "tests/dopamine/discrete_domains/checkpointer_test.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Dopamine Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for dopamine.common.checkpointer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\n\n\n\nfrom absl import flags\nfrom dopamine.discrete_domains import checkpointer\nimport tensorflow as tf\n\nFLAGS = flags.FLAGS\n\n\nclass CheckpointerTest(tf.test.TestCase):\n\n def setUp(self):\n self._test_subdir = os.path.join('/tmp/dopamine_tests', 'checkpointing')\n shutil.rmtree(self._test_subdir, ignore_errors=True)\n os.makedirs(self._test_subdir)\n\n def testCheckpointingInitialization(self):\n # Fails with empty directory.\n with self.assertRaisesRegexp(ValueError,\n 'No path provided to Checkpointer.'):\n checkpointer.Checkpointer('')\n # Fails with invalid directory.\n invalid_dir = '/does/not/exist'\n with self.assertRaisesRegexp(\n ValueError, 'Unable to create checkpoint path: {}.'.\n format(invalid_dir)):\n checkpointer.Checkpointer(invalid_dir)\n # Succeeds with valid directory.\n checkpointer.Checkpointer('/tmp/dopamine_tests')\n # This verifies initialization still works after the directory has already\n # been created.\n self.assertTrue(tf.gfile.Exists('/tmp/dopamine_tests'))\n checkpointer.Checkpointer('/tmp/dopamine_tests')\n\n def testLogToFileWithValidDirectoryDefaultPrefix(self):\n exp_checkpointer = checkpointer.Checkpointer(self._test_subdir)\n data = {'data1': 1, 'data2': 'two', 'data3': (3, 'three')}\n iteration_number = 1729\n exp_checkpointer.save_checkpoint(iteration_number, data)\n loaded_data = exp_checkpointer.load_checkpoint(iteration_number)\n self.assertEqual(data, loaded_data)\n self.assertEqual(None,\n exp_checkpointer.load_checkpoint(iteration_number + 1))\n\n def testLogToFileWithValidDirectoryCustomPrefix(self):\n prefix = 'custom_prefix'\n exp_checkpointer = checkpointer.Checkpointer(self._test_subdir,\n checkpoint_file_prefix=prefix)\n data = {'data1': 1, 'data2': 'two', 'data3': (3, 'three')}\n iteration_number = 1729\n exp_checkpointer.save_checkpoint(iteration_number, data)\n loaded_data = exp_checkpointer.load_checkpoint(iteration_number)\n self.assertEqual(data, loaded_data)\n self.assertEqual(None,\n exp_checkpointer.load_checkpoint(iteration_number + 1))\n\n def testLoadLatestCheckpointWithInvalidDir(self):\n self.assertEqual(\n -1, checkpointer.get_latest_checkpoint_number('/does/not/exist'))\n\n def testLoadLatestCheckpointWithEmptyDir(self):\n self.assertEqual(\n -1, checkpointer.get_latest_checkpoint_number(self._test_subdir))\n\n def testLoadLatestCheckpoint(self):\n exp_checkpointer = checkpointer.Checkpointer(self._test_subdir)\n first_iter = 1729\n exp_checkpointer.save_checkpoint(first_iter, first_iter)\n second_iter = first_iter + 1\n exp_checkpointer.save_checkpoint(second_iter, second_iter)\n self.assertEqual(\n second_iter,\n checkpointer.get_latest_checkpoint_number(self._test_subdir))\n\n def testGarbageCollection(self):\n custom_prefix = 'custom_prefix'\n exp_checkpointer = checkpointer.Checkpointer(\n self._test_subdir, checkpoint_file_prefix=custom_prefix)\n data = {'data1': 1, 'data2': 'two', 'data3': (3, 'three')}\n deleted_log_files = 7\n total_log_files = checkpointer.CHECKPOINT_DURATION + deleted_log_files\n for iteration_number in range(total_log_files):\n exp_checkpointer.save_checkpoint(iteration_number, data)\n for iteration_number in range(total_log_files):\n prefixes = [custom_prefix, 'sentinel_checkpoint_complete']\n for prefix in prefixes:\n checkpoint_file = os.path.join(self._test_subdir, '{}.{}'.format(\n prefix, iteration_number))\n if iteration_number < deleted_log_files:\n self.assertFalse(tf.gfile.Exists(checkpoint_file))\n else:\n self.assertTrue(tf.gfile.Exists(checkpoint_file))\n\n def testGarbageCollectionWithCheckpointFrequency(self):\n custom_prefix = 'custom_prefix'\n checkpoint_frequency = 3\n exp_checkpointer = checkpointer.Checkpointer(\n self._test_subdir, checkpoint_file_prefix=custom_prefix,\n checkpoint_frequency=checkpoint_frequency)\n data = {'data1': 1, 'data2': 'two', 'data3': (3, 'three')}\n deleted_log_files = 6\n total_log_files = (checkpointer.CHECKPOINT_DURATION *\n checkpoint_frequency) + deleted_log_files + 1\n\n # The checkpoints will happen in iteration numbers 0,3,6,9,12,15,18.\n # We are checking if checkpoints 0,3,6 are deleted.\n for iteration_number in range(total_log_files):\n exp_checkpointer.save_checkpoint(iteration_number,\n data)\n for iteration_number in range(total_log_files):\n prefixes = [custom_prefix, 'sentinel_checkpoint_complete']\n for prefix in prefixes:\n checkpoint_file = os.path.join(self._test_subdir, '{}.{}'.format(\n prefix, iteration_number))\n if iteration_number <= deleted_log_files:\n self.assertFalse(tf.gfile.Exists(checkpoint_file))\n else:\n if iteration_number % checkpoint_frequency == 0:\n self.assertTrue(tf.gfile.Exists(checkpoint_file))\n else:\n self.assertFalse(tf.gfile.Exists(checkpoint_file))\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.gfile.Exists", "tensorflow.test.main" ] ]
DATEXIS/EntEval-1
[ "71810e6f4462bd2c12fadab1d2f3383d940f4331" ]
[ "enteval/efp.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\n'''\nEntity Factuality Prediction classification\n'''\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport io\nimport os\nimport numpy as np\nimport logging\n\nfrom enteval.tools.validation import SplitClassifier\n\n\nclass EFPEval(object):\n def __init__(self, task_path, use_ctx=True, nclasses=2, seed=1111):\n logging.debug('***** Transfer task : Entity Factuality Prediction classification *****\\n\\n')\n self.nclasses = nclasses\n self.task_name = \"Fever\"\n self.seed = seed\n self.use_ctx = use_ctx\n\n trainlabels, traintext = self.loadFile(os.path.join(task_path, 'train.txt'))\n devlabels, devtext = self.loadFile(os.path.join(task_path, 'dev.txt'))\n testlabels, testtext = self.loadFile(os.path.join(task_path, 'test.txt'))\n self.id2label = list(set(trainlabels))\n self.label2id = {label:i for i, label in enumerate(self.id2label)}\n trainlabels = [self.label2id[l] for l in trainlabels]\n devlabels = [self.label2id[l] for l in devlabels]\n testlabels = [self.label2id[l] for l in testlabels]\n self.data = {'train': [trainlabels, traintext], 'dev': [devlabels, devtext], 'test': [testlabels, testtext]}\n\n if self.use_ctx:\n self.samples = [item[0] for item in traintext] + \\\n [item[0] for item in devtext] + \\\n [item[0] for item in testtext]\n else:\n self.samples = [item[-1] for item in traintext] + \\\n [item[-1] for item in devtext] + \\\n [item[-1] for item in testtext]\n\n def do_prepare(self, params, prepare):\n return prepare(params, self.samples)\n\n def loadFile(self, fpath):\n labels = []\n data = []\n with io.open(fpath, 'r', encoding='utf-8') as fin:\n for line in fin:\n label, entity, s, e = line.strip().split(\"\\t\")\n labels.append(label)\n if self.use_ctx:\n data.append([entity.split(), int(s), int(e), None])\n else:\n data.append([None, None, None, entity.split()])\n return labels, data\n\n\n def run(self, params, batcher):\n fever_embed = {'train': {}, 'dev': {}, 'test': {}}\n bsize = params.batch_size\n\n for key in self.data:\n logging.info('Computing embedding for {0}'.format(key))\n \n fever_embed[key]['X'] = []\n for ii in range(0, len(self.data[key][1]), bsize):\n batch = self.data[key][1][ii:ii + bsize]\n if self.use_ctx:\n embeddings, _ = batcher(params, batch)\n else:\n _, embeddings = batcher(params, batch)\n fever_embed[key]['X'].append(embeddings)\n fever_embed[key]['X'] = np.vstack(fever_embed[key]['X'])\n fever_embed[key]['y'] = np.array(self.data[key][0])\n logging.info('Computed {0} embeddings'.format(key))\n\n config_classifier = {'nclasses': self.nclasses, 'seed': self.seed,\n 'usepytorch': params.usepytorch,\n 'classifier': params.classifier}\n\n clf = SplitClassifier(X={'train': fever_embed['train']['X'],\n 'valid': fever_embed['dev']['X'],\n 'test': fever_embed['test']['X']},\n y={'train': fever_embed['train']['y'],\n 'valid': fever_embed['dev']['y'],\n 'test': fever_embed['test']['y']},\n config=config_classifier)\n\n devacc, testacc = clf.run()\n logging.debug('\\nDev acc : {0} Test acc : {1} for \\\n {2} classification with {3} encoder \\n'.format(devacc, testacc, self.task_name, \"context\" if self.use_ctx else \"description\"))\n\n return {'devacc': devacc, 'acc': testacc,\n 'ndev': len(fever_embed['dev']['X']),\n 'ntest': len(fever_embed['test']['X'])}\n" ]
[ [ "numpy.array", "numpy.vstack" ] ]
ThmCuong/IIC-Python3
[ "5a02b40ffa07b159fa7e89cf5b4ed781f4798ff1" ]
[ "code_icc/scripts/semisup/IID_semisup_STL10.py" ]
[ "from __future__ import print_function\n\nimport argparse\nimport os\nimport pickle\nimport sys\nfrom datetime import datetime\n\nimport matplotlib\nimport torch\nimport torchvision\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\n\nimport code_icc.archs as archs\nfrom code_icc.archs.semisup.sup_head5 import SupHead5\nfrom code_icc.utils.cluster.general import update_lr\nfrom code_icc.utils.cluster.transforms import sobel_process, \\\n sobel_make_transforms\nfrom code_icc.utils.semisup.general import get_dlen, assess_acc_block\nfrom code_icc.utils.semisup.dataset import TenCropAndFinish\n\n\n# Finetune a network that has been trained in an unsupervised fashion but with a\n# train/test split (e.g. network that has been trained with IIC+)\n\n# Options ----------------------------------------------------------------------\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model_ind\", type=int, required=True)\n\n parser.add_argument(\"--arch\", type=str, required=True)\n\n parser.add_argument(\"--head_lr\", type=float, required=True)\n parser.add_argument(\"--trunk_lr\", type=float, required=True)\n\n parser.add_argument(\"--num_epochs\", type=int, default=3200)\n\n parser.add_argument(\"--new_batch_sz\", type=int, default=-1)\n\n parser.add_argument(\"--old_model_ind\", type=int, required=True)\n\n parser.add_argument(\"--penultimate_features\", default=False,\n action=\"store_true\")\n\n parser.add_argument(\"--random_affine\", default=False, action=\"store_true\")\n parser.add_argument(\"--affine_p\", type=float, default=0.5)\n\n parser.add_argument(\"--cutout\", default=False, action=\"store_true\")\n parser.add_argument(\"--cutout_p\", type=float, default=0.5)\n parser.add_argument(\"--cutout_max_box\", type=float, default=0.5)\n\n parser.add_argument(\"--restart\", default=False, action=\"store_true\")\n parser.add_argument(\"--lr_schedule\", type=int, nargs=\"+\", default=[])\n parser.add_argument(\"--lr_mult\", type=float, default=0.5)\n\n parser.add_argument(\"--restart_new_model_ind\", default=False,\n action=\"store_true\")\n parser.add_argument(\"--new_model_ind\", type=int, default=0)\n\n parser.add_argument(\"--out_root\", type=str,\n default=\"/scratch/shared/slow/xuji/iid_private\")\n config = parser.parse_args() # new config\n\n # Setup ----------------------------------------------------------------------\n\n config.contiguous_sz = 10 # Tencrop\n config.out_dir = os.path.join(config.out_root, str(config.model_ind))\n\n if not os.path.exists(config.out_dir):\n os.makedirs(config.out_dir)\n\n if config.restart:\n given_config = config\n reloaded_config_path = os.path.join(given_config.out_dir,\n \"config.pickle\")\n print(\"Loading restarting config from: %s\" % reloaded_config_path)\n with open(reloaded_config_path, \"rb\") as config_f:\n config = pickle.load(config_f)\n assert (config.model_ind == given_config.model_ind)\n\n config.restart = True\n config.num_epochs = given_config.num_epochs # train for longer\n\n config.restart_new_model_ind = given_config.restart_new_model_ind\n config.new_model_ind = given_config.new_model_ind\n\n start_epoch = config.last_epoch + 1\n\n print(\"...restarting from epoch %d\" % start_epoch)\n\n # in case we overshot without saving\n config.epoch_acc = config.epoch_acc[:start_epoch]\n config.epoch_loss = config.epoch_loss[:start_epoch]\n\n\n else:\n config.epoch_acc = []\n config.epoch_loss = []\n start_epoch = 0\n\n # old config only used retrospectively for setting up model at start\n reloaded_config_path = os.path.join(os.path.join(config.out_root,\n str(config.old_model_ind)),\n \"config.pickle\")\n print(\"Loading old features config from: %s\" % reloaded_config_path)\n with open(reloaded_config_path, \"rb\") as config_f:\n old_config = pickle.load(config_f)\n assert (old_config.model_ind == config.old_model_ind)\n\n if config.new_batch_sz == -1:\n config.new_batch_sz = old_config.batch_sz\n\n fig, axarr = plt.subplots(2, sharex=False, figsize=(20, 20))\n\n # Data -----------------------------------------------------------------------\n\n assert (old_config.dataset == \"STL10\")\n\n # make supervised data: train on train, test on test, unlabelled is unused\n tf1, tf2, tf3 = sobel_make_transforms(old_config,\n random_affine=config.random_affine,\n cutout=config.cutout,\n cutout_p=config.cutout_p,\n cutout_max_box=config.cutout_max_box,\n affine_p=config.affine_p)\n\n dataset_class = torchvision.datasets.STL10\n train_data = dataset_class(\n root=old_config.dataset_root,\n transform=tf2, # also could use tf1\n split=\"train\")\n\n train_loader = torch.utils.data.DataLoader(train_data,\n batch_size=config.new_batch_sz,\n shuffle=True,\n num_workers=0,\n drop_last=False)\n\n test_data = dataset_class(\n root=old_config.dataset_root,\n transform=None,\n split=\"test\")\n test_data = TenCropAndFinish(test_data, input_sz=old_config.input_sz,\n include_rgb=old_config.include_rgb)\n\n test_loader = torch.utils.data.DataLoader(test_data,\n batch_size=config.new_batch_sz,\n # full batch\n shuffle=False,\n num_workers=0,\n drop_last=False)\n\n # Model ----------------------------------------------------------------------\n\n net_features = archs.__dict__[old_config.arch](old_config)\n\n if not config.restart:\n model_path = os.path.join(old_config.out_dir, \"best_net.pytorch\")\n net_features.load_state_dict(\n torch.load(model_path, map_location=lambda storage, loc: storage))\n\n dlen = get_dlen(net_features, train_loader,\n include_rgb=old_config.include_rgb,\n penultimate_features=config.penultimate_features)\n print(\"dlen: %d\" % dlen)\n\n assert (config.arch == \"SupHead5\")\n net = SupHead5(net_features, dlen=dlen, gt_k=old_config.gt_k)\n\n if config.restart:\n print(\"restarting from latest net\")\n model_path = os.path.join(config.out_dir, \"latest_net.pytorch\")\n net.load_state_dict(\n torch.load(model_path, map_location=lambda storage, loc: storage))\n\n net.cuda()\n net = torch.nn.DataParallel(net)\n\n opt_trunk = torch.optim.Adam(\n net.module.trunk.parameters(),\n lr=config.trunk_lr\n )\n opt_head = torch.optim.Adam(\n net.module.head.parameters(),\n lr=(config.head_lr)\n )\n\n if config.restart:\n print(\"restarting from latest optimiser\")\n optimiser_states = torch.load(\n os.path.join(config.out_dir, \"latest_optimiser.pytorch\"))\n opt_trunk.load_state_dict(optimiser_states[\"opt_trunk\"])\n opt_head.load_state_dict(optimiser_states[\"opt_head\"])\n else:\n print(\"using new optimiser state\")\n\n criterion = nn.CrossEntropyLoss().cuda()\n\n if not config.restart:\n net.eval()\n acc = assess_acc_block(net, test_loader, gt_k=old_config.gt_k,\n include_rgb=old_config.include_rgb,\n penultimate_features=config.penultimate_features,\n contiguous_sz=config.contiguous_sz)\n\n print(\"pre: model %d old model %d, acc %f time %s\" % (\n config.model_ind, config.old_model_ind, acc, datetime.now()))\n sys.stdout.flush()\n\n config.epoch_acc.append(acc)\n\n if config.restart_new_model_ind:\n assert (config.restart)\n config.model_ind = config.new_model_ind # old_model_ind stays same\n config.out_dir = os.path.join(config.out_root, str(config.model_ind))\n print(\"restarting as model %d\" % config.model_ind)\n\n if not os.path.exists(config.out_dir):\n os.makedirs(config.out_dir)\n\n # Train ----------------------------------------------------------------------\n\n for e_i in xrange(start_epoch, config.num_epochs):\n net.train()\n\n if e_i in config.lr_schedule:\n print(\"e_i %d, multiplying lr for opt trunk and head by %f\" %\n (e_i, config.lr_mult))\n opt_trunk = update_lr(opt_trunk, lr_mult=config.lr_mult)\n opt_head = update_lr(opt_head, lr_mult=config.lr_mult)\n if not hasattr(config, \"lr_changes\"):\n config.lr_changes = []\n config.lr_changes.append((e_i, config.lr_mult))\n\n avg_loss = 0.\n num_batches = len(train_loader)\n for i, (imgs, targets) in enumerate(train_loader):\n imgs = sobel_process(imgs.cuda(), old_config.include_rgb)\n targets = targets.cuda()\n\n x_out = net(imgs, penultimate_features=config.penultimate_features)\n loss = criterion(x_out, targets)\n\n avg_loss += float(loss.data)\n\n opt_trunk.zero_grad()\n opt_head.zero_grad()\n\n loss.backward()\n\n opt_trunk.step()\n opt_head.step()\n\n if (i % 100 == 0) or (e_i == start_epoch):\n print(\"batch %d of %d, loss %f, time %s\" % (i, num_batches,\n float(loss.data),\n datetime.now()))\n sys.stdout.flush()\n\n avg_loss /= num_batches\n\n net.eval()\n acc = assess_acc_block(net, test_loader, gt_k=old_config.gt_k,\n include_rgb=old_config.include_rgb,\n penultimate_features=config.penultimate_features,\n contiguous_sz=config.contiguous_sz)\n\n print(\"model %d old model %d epoch %d acc %f time %s\" % (\n config.model_ind, config.old_model_ind, e_i, acc, datetime.now()))\n sys.stdout.flush()\n\n is_best = False\n if acc > max(config.epoch_acc):\n is_best = True\n\n config.epoch_acc.append(acc)\n config.epoch_loss.append(avg_loss)\n\n axarr[0].clear()\n axarr[0].plot(config.epoch_acc)\n axarr[0].set_title(\"Acc\")\n\n axarr[1].clear()\n axarr[1].plot(config.epoch_loss)\n axarr[1].set_title(\"Loss\")\n\n fig.canvas.draw_idle()\n fig.savefig(os.path.join(config.out_dir, \"plots.png\"))\n\n if is_best or (e_i % 10 == 0):\n net.module.cpu()\n\n if is_best:\n torch.save(net.module.state_dict(),\n os.path.join(config.out_dir, \"best_net.pytorch\"))\n torch.save({\"opt_head\": opt_head.state_dict(),\n \"opt_trunk\": opt_trunk.state_dict()},\n os.path.join(config.out_dir,\n \"best_optimiser.pytorch\"))\n\n # save model sparingly for this script\n if e_i % 10 == 0:\n torch.save(net.module.state_dict(),\n os.path.join(config.out_dir, \"latest_net.pytorch\"))\n torch.save({\"opt_head\": opt_head.state_dict(),\n \"opt_trunk\": opt_trunk.state_dict()},\n os.path.join(config.out_dir,\n \"latest_optimiser.pytorch\"))\n\n net.module.cuda()\n\n config.last_epoch = e_i # for last saved version\n\n with open(os.path.join(config.out_dir, \"config.pickle\"),\n 'w') as outfile:\n pickle.dump(config, outfile)\n\n with open(os.path.join(config.out_dir, \"config.txt\"),\n \"w\") as text_file:\n text_file.write(\"%s\" % config)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.subplots", "torch.utils.data.DataLoader", "torch.load", "torch.nn.CrossEntropyLoss", "torch.nn.DataParallel" ] ]
harryputterman/Cirq
[ "b0096307da010a050d67c28fa55d1797d210b366" ]
[ "cirq/protocols/has_unitary.py" ]
[ "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n TypeVar,\n Optional,\n Tuple,\n List,\n Sequence,\n)\n\nfrom collections import defaultdict\nimport numpy as np\nfrom typing_extensions import Protocol\n\nfrom cirq.protocols import qid_shape_protocol\n\nif TYPE_CHECKING:\n import cirq\n\nTDefault = TypeVar('TDefault')\n\n\nclass SupportsExplicitHasUnitary(Protocol):\n \"\"\"An object that explicitly specifies whether it has a unitary effect.\"\"\"\n\n def _has_unitary_(self) -> bool:\n \"\"\"Determines whether the receiver has a unitary effect.\n\n This method is used preferentially by the global `cirq.has_unitary`\n method, because this method is much cheaper than the fallback strategies\n such as checking `value._unitary_` (which causes a large matrix to be\n computed).\n\n Returns:\n Whether or not the receiving object (`self`) has a unitary effect.\n \"\"\"\n\n\ndef has_unitary(val: Any) -> bool:\n \"\"\"Determines whether the value has a unitary effect.\n\n Determines whether `val` has a unitary effect by attempting the following\n strategies:\n\n 1. Try to use `val.has_unitary()`.\n Case a) Method not present or returns `NotImplemented`.\n Inconclusive.\n Case b) Method returns `True`.\n Unitary.\n Case c) Method returns `False`.\n Not unitary.\n\n 2. Try to use `val._decompose_()`.\n Case a) Method not present or returns `NotImplemented` or `None`.\n Inconclusive.\n Case b) Method returns an OP_TREE containing only unitary operations.\n Unitary.\n Case c) Method returns an OP_TREE containing non-unitary operations.\n Not Unitary.\n\n 3. Try to use `val._apply_unitary_(args)`.\n Case a) Method not present or returns `NotImplemented`.\n Inconclusive.\n Case b) Method returns a numpy array.\n Unitary.\n Case c) Method returns `None`.\n Not unitary.\n\n 4. Try to use `val._unitary_()`.\n Case a) Method not present or returns `NotImplemented`.\n Continue to next strategy.\n Case b) Method returns a numpy array.\n Unitary.\n Case c) Method returns `None`.\n Not unitary.\n\n It is assumed that, when multiple of these strategies give a conclusive\n result, that these results will all be consistent with each other. If all\n strategies are inconclusive, the value is classified as non-unitary.\n\n Args:\n The value that may or may not have a unitary effect.\n\n Returns:\n Whether or not `val` has a unitary effect.\n \"\"\"\n strats = [\n _strat_has_unitary_from_has_unitary, _strat_has_unitary_from_decompose,\n _strat_has_unitary_from_apply_unitary, _strat_has_unitary_from_unitary\n ]\n for strat in strats:\n result = strat(val)\n if result is not None:\n return result\n\n # If you can't tell that it's unitary, it's not unitary.\n return False\n\n\ndef _strat_has_unitary_from_has_unitary(val: Any) -> Optional[bool]:\n \"\"\"Attempts to infer a value's unitary-ness via its _has_unitary_ method.\"\"\"\n if hasattr(val, '_has_unitary_'):\n result = val._has_unitary_()\n if result is NotImplemented:\n return None\n return result\n return None\n\n\ndef _strat_has_unitary_from_unitary(val: Any) -> Optional[bool]:\n \"\"\"Attempts to infer a value's unitary-ness via its _unitary_ method.\"\"\"\n getter = getattr(val, '_unitary_', None)\n if getter is None:\n return None\n result = getter()\n return result is not NotImplemented and result is not None\n\n\ndef _strat_has_unitary_from_decompose(val: Any) -> Optional[bool]:\n \"\"\"Attempts to infer a value's unitary-ness via its _decompose_ method.\"\"\"\n operations, _, _ = _try_decompose_into_operations_and_qubits(val)\n if operations is None:\n return None\n return all(has_unitary(op) for op in operations)\n\n\ndef _strat_has_unitary_from_apply_unitary(val: Any) -> Optional[bool]:\n \"\"\"Attempts to infer a value's unitary-ness via its _apply_unitary_ method.\n \"\"\"\n from cirq.protocols.apply_unitary import ApplyUnitaryArgs\n from cirq import linalg\n\n method = getattr(val, '_apply_unitary_', None)\n if method is None:\n return None\n\n val_qid_shape = qid_shape_protocol.qid_shape(val, None)\n if val_qid_shape is None:\n return None\n state = linalg.one_hot(shape=val_qid_shape, dtype=np.complex64)\n buffer = np.empty_like(state)\n result = method(ApplyUnitaryArgs(state, buffer, range(len(val_qid_shape))))\n if result is NotImplemented:\n return None\n return result is not None\n\n\ndef _try_decompose_into_operations_and_qubits(val: Any) -> Tuple[Optional[\n List['cirq.Operation']], Sequence['cirq.Qid'], Tuple[int, ...]]:\n \"\"\"Returns the value's decomposition (if any) and the qubits it applies to.\n \"\"\"\n from cirq.protocols.decompose import (decompose_once,\n decompose_once_with_qubits)\n from cirq import LineQid, Gate, Operation\n\n if isinstance(val, Gate):\n # Gates don't specify qubits, and so must be handled specially.\n qid_shape = qid_shape_protocol.qid_shape(val)\n qubits = LineQid.for_qid_shape(qid_shape) # type: Sequence[cirq.Qid]\n return decompose_once_with_qubits(val, qubits, None), qubits, qid_shape\n\n if isinstance(val, Operation):\n qid_shape = qid_shape_protocol.qid_shape(val)\n return decompose_once(val, None), val.qubits, qid_shape\n\n result = decompose_once(val, None)\n if result is not None:\n qubit_set = set()\n qid_shape_dict = defaultdict(lambda: 1) # type: Dict[cirq.Qid, int]\n for op in result:\n for level, q in zip(qid_shape_protocol.qid_shape(op), op.qubits):\n qubit_set.add(q)\n qid_shape_dict[q] = max(qid_shape_dict[q], level)\n qubits = sorted(qubit_set)\n return result, qubits, tuple(qid_shape_dict[q] for q in qubits)\n\n return None, (), ()\n" ]
[ [ "numpy.empty_like" ] ]
pavanteja295/few-shot
[ "96a1377c846674fb92ef35f4e4daedc7cea18b10" ]
[ "experiments/infer.py" ]
[ "\"\"\"\nScript for domain transfer experiments\n\n\"\"\"\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\nimport argparse\n\nfrom few_shot.models import FewShotClassifier\nfrom few_shot.datasets import OmniglotDataset, MiniImageNet, FashionDataset\nfrom few_shot.models import get_few_shot_encoder\n#from experiments.maml import prepare_meta_batch\nfrom few_shot.core import NShotTaskSampler, EvaluateFewShot, prepare_nshot_task, create_nshot_task_label\nfrom few_shot.proto import proto_net_episode\nfrom few_shot.matching import matching_net_episode\nfrom few_shot.maml import meta_gradient_step\n\nfrom few_shot.train import fit\nfrom few_shot.callbacks import *\nfrom few_shot.utils import setup_dirs\nfrom config import PATH\n\nsetup_dirs()\nassert torch.cuda.is_available()\ndevice = torch.device('cuda')\ntorch.backends.cudnn.benchmark = True\n\n#def prepare_meta_batch(n, k, q, meta_batch_size):\n# def prepare_meta_batch_(batch):\n# x, y = batch\n # Reshape to `meta_batch_size` number of tasks. Each task contains\n # n*k support samples to train the fast model on and q*k query samples to\n # evaluate the fast model on and generate meta-gradients\n# x = x.reshape(meta_batch_size, n*k + q*k, num_input_channels, x.shape[-2], x.shape[-1])\n # Move to device\n# x = x.double().to(device)\n # Create label\n# y = create_nshot_task_label(k, q).cuda().repeat(meta_batch_size)\n# return x, y\n\n#return prepare_meta_batch_\ndef prepare_meta_batch(n, k, q, meta_batch_size):\n def prepare_meta_batch_(batch):\n x, y = batch\n # Reshape to `meta_batch_size` number of tasks. Each task contains\n # n*k support samples to train the fast model on and q*k query samples to\n # evaluate the fast model on and generate meta-gradients\n x = x.reshape(meta_batch_size, n*k + q*k, num_input_channels, x.shape[-2], x.shape[-1])\n # Move to device\n x = x.double().to(device)\n # Create label\n y = create_nshot_task_label(k, q).cuda().repeat(meta_batch_size)\n return x, y\n return prepare_meta_batch_\n\ndef infer(callbacks, model, loss_fn, optimiser):\n# import pdb; pdb.set_trace()\n num_batches = 1\n# batch_size = dataloader.batch_size\n\n # default call back averages the bach accuracy and loss\n callbacks = CallbackList((callbacks or []))\n # model and all other information has been passed to call back nothing else ot be done during function calls\n callbacks.set_model(model)\n callbacks.set_params({\n 'loss_fn': loss_fn,\n 'optimiser': optimiser\n })\n\n # creates a csv logger file\n callbacks.on_train_begin()\n callbacks.on_epoch_begin(1)\n epoch_logs = {}\n callbacks.on_epoch_end(1, epoch_logs)\n# import pdb; pdb.set_trace()\n\n\n##############\n# Parameters #\n##############\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', default='fashion-dataset')\nparser.add_argument('--distance', default='l2')\nparser.add_argument('--n-train', default=5, type=int)\nparser.add_argument('--n-test', default=5, type=int)\nparser.add_argument('--k-train', default=5, type=int)\nparser.add_argument('--k-test', default=5, type=int)\nparser.add_argument('--q-train', default=5, type=int)\nparser.add_argument('--q-test', default=5, type=int)\nparser.add_argument('--exp_name', default='test', type=str)\nparser.add_argument('--model_path', default='test', type=str)\nparser.add_argument('--eval_classes', default='None' , type=str)\nparser.add_argument('--network', default='proto', type=str)\nargs = parser.parse_args()\n\nparam_str = f'{args.network}_network_{args.exp_name}_experiment_name_{args.dataset}_nt={args.n_test}_kt={args.k_test}_qt={args.q_test}_' \\\n f'nv={args.n_test}_kv={args.k_test}_qv={args.q_test}'\n\nprint(param_str)\nif args.dataset == 'omniglot':\n n_epochs = 40\n dataset_class = OmniglotDataset\n num_input_channels = 1\n drop_lr_every = 20\nelif args.dataset == 'miniImageNet':\n n_epochs = 80\n dataset_class = MiniImageNet\n num_input_channels = 3\n drop_lr_every = 40\nelif args.dataset == 'fashion-dataset':\n n_epochs = 80\n dataset_class = FashionDataset\n num_input_channels = 3\n drop_lr_every = 40\nelse:\n # add for fashion here\n raise(ValueError, 'Unsupported dataset')\n\n##############\n# Create datasets #\n###################\nbackground = dataset_class('background')\n# no batch size for proto nets\n\nevaluation = dataset_class('evaluation')\n\nif args.eval_classes == 'low_class':\n eval_classes = ['Shirts','Kurta Sets', 'Sweaters' , 'Sweatshirts', 'Night suits' ]\nelif args.eval_classes == 'high_class':\n import random\n eval_classes = ['Bracelet', 'Tracksuits', 'Mask and Peel', 'Scarves', 'Sports Shoes']\nelse:\n eval_classes = None\n\nevaluation_taskloader = DataLoader(\n evaluation,\n num_workers=4\n)\n\n#########\n# Model #\n#########\nevaluation_episodes = 1000\nepisodes_per_epoch = 100\n\nif args.network == 'proto':\n optimiser = Adam(model.parameters(), lr=1e-3)\n loss_fn = torch.nn.NLLLoss().cuda()\n\n n_epochs = 80\n dataset_class = FashionDataset\n num_input_channels = 3\n drop_lr_every = 40\n model = get_few_shot_encoder(num_input_channels)\n eval_fn = proto_net_episode\n evaluation_taskloader = DataLoader(evaluation, batch_sampler=NShotTaskSampler(evaluation, episodes_per_epoch, args.n_test, args.k_test, args.q_test, eval_classes=None), # why is qtest needed for protonet i think its not rquired for protonet check it\n num_workers=4\n)\n callbacks = [\n EvaluateFewShot(\n eval_fn=eval_fn,\n num_tasks=evaluation_episodes,\n n_shot=args.n_test,\n k_way=args.k_test,\n q_queries=args.q_test,\n taskloader=evaluation_taskloader,\n prepare_batch=prepare_nshot_task(args.n_test, args.k_test, args.q_test), # n shot task is a simple function that maps classes to [0-k]\n distance=args.distance\n ),\n]\nelif args.network == 'matching':\n from few_shot.models import MatchingNetwork\n n_epochs = 200\n dataset_class = FashionDataset\n num_input_channels = 3\n lstm_input_size = 1600\n\n evaluation_taskloader = DataLoader(evaluation, batch_sampler=NShotTaskSampler(evaluation, episodes_per_epoch, args.n_test, args.k_test, args.q_test, eval_classes=eval_classes), # why is qtest needed for protonet i think its not rquired for protonet check it\n num_workers=4\n)\n model = MatchingNetwork(args.n_train, args.k_train, args.q_train, True, num_input_channels,\n lstm_layers=1,\n lstm_input_size=lstm_input_size,\n unrolling_steps=2,\n device=device)\n optimiser = Adam(model.parameters(), lr=1e-3)\n loss_fn = torch.nn.NLLLoss().cuda()\n\n\n eval_fn = matching_net_episode\n\n callbacks = [ EvaluateFewShot(\n eval_fn=matching_net_episode,\n num_tasks=evaluation_episodes,\n n_shot=args.n_test,\n k_way=args.k_test,\n q_queries=args.q_test,\n taskloader=evaluation_taskloader,\n prepare_batch=prepare_nshot_task(args.n_test, args.k_test, args.q_test),\n fce= True,\n distance=args.distance)]\nelif args.network == 'maml':\n# import pdb;pdb.set_trace()\n dataset_class = FashionDataset\n fc_layer_size = 1600\n num_input_channels = 3\n meta_lr = 0.001\n model = FewShotClassifier(num_input_channels, args.k_train, fc_layer_size).to(device, dtype=torch.double)\n optimiser = torch.optim.Adam(model.parameters(), lr=meta_lr)\n loss_fn = torch.nn.CrossEntropyLoss().to(device)\n optimiser = Adam(model.parameters(), lr=1e-3)\n eval_batches=20\n meta_batch_size = 32\n batch_sampler=NShotTaskSampler(evaluation, eval_batches, n=args.n_test, k=args.k_test, q=args.q_test,\n num_tasks=meta_batch_size, eval_classes=eval_classes)\n evaluation_taskloader = DataLoader(evaluation, batch_sampler= batch_sampler, num_workers=8 )\n prepare_batch = prepare_meta_batch(args.n_test, args.k_test, args.q_test, meta_batch_size)\n callbacks = [\n EvaluateFewShot(\n eval_fn=meta_gradient_step,\n num_tasks=eval_batches,\n n_shot=args.n_test,\n k_way=args.k_test,\n q_queries=args.q_test,\n taskloader=evaluation_taskloader,\n prepare_batch=prepare_meta_batch(args.n_test, args.k_test, args.q_test, meta_batch_size),\n # MAML kwargs\n inner_train_steps=1,\n inner_lr=0.4,\n device=device,\n order=1,\n )\n ]\n\nmodel.load_state_dict(torch.load(args.model_path))\nmodel.to(device, dtype=torch.double)\n\n############\n# Training #\n############\n#print(f'Training Prototypical network on {args.dataset}...')\n\ndef lr_schedule(epoch, lr):\n # Drop lr every 2000 episodes\n if epoch % drop_lr_every == 0:\n return lr / 2\n else:\n return lr\ninfer(callbacks, model, loss_fn, optimiser)\n\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
adehad/plotly.py
[ "bca292530c400c61e8b7f8a6571262a9dde43ee3" ]
[ "packages/python/plotly/plotly/tests/test_optional/test_px/test_px.py" ]
[ "import plotly.express as px\nimport numpy as np\nimport pytest\nfrom itertools import permutations\n\n\ndef test_scatter():\n iris = px.data.iris()\n fig = px.scatter(iris, x=\"sepal_width\", y=\"sepal_length\")\n assert fig.data[0].type == \"scatter\"\n assert np.all(fig.data[0].x == iris.sepal_width)\n assert np.all(fig.data[0].y == iris.sepal_length)\n # test defaults\n assert fig.data[0].mode == \"markers\"\n\n\ndef test_custom_data_scatter():\n iris = px.data.iris()\n # No hover, no custom data\n fig = px.scatter(iris, x=\"sepal_width\", y=\"sepal_length\", color=\"species\")\n assert fig.data[0].customdata is None\n # Hover, no custom data\n fig = px.scatter(\n iris,\n x=\"sepal_width\",\n y=\"sepal_length\",\n color=\"species\",\n hover_data=[\"petal_length\", \"petal_width\"],\n )\n for data in fig.data:\n assert np.all(np.in1d(data.customdata[:, 1], iris.petal_width))\n # Hover and custom data, no repeated arguments\n fig = px.scatter(\n iris,\n x=\"sepal_width\",\n y=\"sepal_length\",\n hover_data=[\"petal_length\", \"petal_width\"],\n custom_data=[\"species_id\", \"species\"],\n )\n assert [e[0] for e in fig.data[0].customdata] == iris.species_id.to_list()\n assert len(fig.data[0].customdata[0]) == 4\n # Hover and custom data, with repeated arguments\n fig = px.scatter(\n iris,\n x=\"sepal_width\",\n y=\"sepal_length\",\n hover_data=[\"petal_length\", \"petal_width\", \"species_id\"],\n custom_data=[\"species_id\", \"species\"],\n )\n assert [e[0] for e in fig.data[0].customdata] == iris.species_id.tolist()\n assert len(fig.data[0].customdata[0]) == 4\n assert (\n fig.data[0].hovertemplate\n == \"sepal_width=%{x}<br>sepal_length=%{y}<br>petal_length=%{customdata[2]}<br>petal_width=%{customdata[3]}<br>species_id=%{customdata[0]}<extra></extra>\"\n )\n\n\ndef test_labels():\n tips = px.data.tips()\n fig = px.scatter(\n tips,\n x=\"total_bill\",\n y=\"tip\",\n facet_row=\"time\",\n facet_col=\"day\",\n color=\"size\",\n symbol=\"sex\",\n labels={c: c.upper() for c in tips.columns},\n )\n assert \"SEX\" in fig.data[0].hovertemplate\n assert \"TOTAL_BILL\" in fig.data[0].hovertemplate\n assert \"SIZE\" in fig.data[0].hovertemplate\n assert \"DAY\" in fig.data[0].hovertemplate\n assert \"TIME\" in fig.data[0].hovertemplate\n assert fig.layout.legend.title.text.startswith(\"SEX\")\n assert fig.layout.xaxis.title.text == \"TOTAL_BILL\"\n assert fig.layout.coloraxis.colorbar.title.text == \"SIZE\"\n assert fig.layout.annotations[0].text.startswith(\"DAY\")\n assert fig.layout.annotations[4].text.startswith(\"TIME\")\n\n\ndef test_px_templates():\n try:\n import plotly.io as pio\n import plotly.graph_objects as go\n\n tips = px.data.tips()\n\n # use the normal defaults\n fig = px.scatter()\n assert fig.layout.template == pio.templates[pio.templates.default]\n\n # respect changes to defaults\n pio.templates.default = \"seaborn\"\n fig = px.scatter()\n assert fig.layout.template == pio.templates[\"seaborn\"]\n\n # special px-level defaults over pio defaults\n pio.templates.default = \"seaborn\"\n px.defaults.template = \"ggplot2\"\n fig = px.scatter()\n assert fig.layout.template == pio.templates[\"ggplot2\"]\n\n # accept names in args over pio and px defaults\n fig = px.scatter(template=\"seaborn\")\n assert fig.layout.template == pio.templates[\"seaborn\"]\n\n # accept objects in args\n fig = px.scatter(template={})\n assert fig.layout.template == go.layout.Template(data_scatter=[{}])\n\n # read colorway from the template\n fig = px.scatter(\n tips,\n x=\"total_bill\",\n y=\"tip\",\n color=\"sex\",\n template=dict(layout_colorway=[\"red\", \"blue\"]),\n )\n assert fig.data[0].marker.color == \"red\"\n assert fig.data[1].marker.color == \"blue\"\n\n # default colorway fallback\n fig = px.scatter(tips, x=\"total_bill\", y=\"tip\", color=\"sex\", template=dict())\n assert fig.data[0].marker.color == px.colors.qualitative.D3[0]\n assert fig.data[1].marker.color == px.colors.qualitative.D3[1]\n\n # pio default template colorway fallback\n pio.templates.default = \"seaborn\"\n px.defaults.template = None\n fig = px.scatter(tips, x=\"total_bill\", y=\"tip\", color=\"sex\")\n assert fig.data[0].marker.color == pio.templates[\"seaborn\"].layout.colorway[0]\n assert fig.data[1].marker.color == pio.templates[\"seaborn\"].layout.colorway[1]\n\n # pio default template colorway fallback\n pio.templates.default = \"seaborn\"\n px.defaults.template = \"ggplot2\"\n fig = px.scatter(tips, x=\"total_bill\", y=\"tip\", color=\"sex\")\n assert fig.data[0].marker.color == pio.templates[\"ggplot2\"].layout.colorway[0]\n assert fig.data[1].marker.color == pio.templates[\"ggplot2\"].layout.colorway[1]\n\n # don't overwrite top margin when set in template\n fig = px.scatter(title=\"yo\")\n assert fig.layout.margin.t is None\n\n fig = px.scatter()\n assert fig.layout.margin.t == 60\n\n fig = px.scatter(template=dict(layout_margin_t=2))\n assert fig.layout.margin.t is None\n\n # don't force histogram gridlines when set in template\n pio.templates.default = \"none\"\n px.defaults.template = None\n fig = px.scatter(\n tips,\n x=\"total_bill\",\n y=\"tip\",\n marginal_x=\"histogram\",\n marginal_y=\"histogram\",\n )\n assert fig.layout.xaxis2.showgrid\n assert fig.layout.xaxis3.showgrid\n assert fig.layout.yaxis2.showgrid\n assert fig.layout.yaxis3.showgrid\n\n fig = px.scatter(\n tips,\n x=\"total_bill\",\n y=\"tip\",\n marginal_x=\"histogram\",\n marginal_y=\"histogram\",\n template=dict(layout_yaxis_showgrid=False),\n )\n assert fig.layout.xaxis2.showgrid\n assert fig.layout.xaxis3.showgrid\n assert fig.layout.yaxis2.showgrid is None\n assert fig.layout.yaxis3.showgrid is None\n\n fig = px.scatter(\n tips,\n x=\"total_bill\",\n y=\"tip\",\n marginal_x=\"histogram\",\n marginal_y=\"histogram\",\n template=dict(layout_xaxis_showgrid=False),\n )\n assert fig.layout.xaxis2.showgrid is None\n assert fig.layout.xaxis3.showgrid is None\n assert fig.layout.yaxis2.showgrid\n assert fig.layout.yaxis3.showgrid\n finally:\n # reset defaults to prevent all other tests from failing if this one does\n px.defaults.reset()\n\n\ndef test_px_defaults():\n px.defaults.labels = dict(x=\"hey x\")\n px.defaults.category_orders = dict(color=[\"b\", \"a\"])\n px.defaults.color_discrete_map = dict(b=\"red\")\n fig = px.scatter(x=[1, 2], y=[1, 2], color=[\"a\", \"b\"])\n try:\n assert fig.data[0].name == \"b\"\n assert fig.data[0].marker.color == \"red\"\n assert fig.layout.xaxis.title.text == \"hey x\"\n finally:\n # reset defaults to prevent all other tests from failing if this one does\n px.defaults.reset()\n\n\ndef assert_orderings(days_order, days_check, times_order, times_check):\n symbol_sequence = [\"circle\", \"diamond\", \"square\", \"cross\"]\n color_sequence = [\"red\", \"blue\"]\n fig = px.scatter(\n px.data.tips(),\n x=\"total_bill\",\n y=\"tip\",\n facet_row=\"time\",\n facet_col=\"day\",\n color=\"time\",\n symbol=\"day\",\n symbol_sequence=symbol_sequence,\n color_discrete_sequence=color_sequence,\n category_orders=dict(day=days_order, time=times_order),\n )\n\n for col in range(len(days_check)):\n for trace in fig.select_traces(col=col + 1):\n assert days_check[col] in trace.hovertemplate\n\n for row in range(len(times_check)):\n for trace in fig.select_traces(row=2 - row):\n assert times_check[row] in trace.hovertemplate\n\n for trace in fig.data:\n for i, day in enumerate(days_check):\n if day in trace.name:\n assert trace.marker.symbol == symbol_sequence[i]\n for i, time in enumerate(times_check):\n if time in trace.name:\n assert trace.marker.color == color_sequence[i]\n\n\ndef test_noisy_orthogonal_orderings():\n assert_orderings(\n [\"x\", \"Sun\", \"Sat\", \"y\", \"Fri\", \"z\"], # add extra noise, missing Thur\n [\"Sun\", \"Sat\", \"Fri\", \"Thur\"], # Thur is at the back\n [\"a\", \"Lunch\", \"b\"], # add extra noise, missing Dinner\n [\"Lunch\", \"Dinner\"], # Dinner is at the back\n )\n\n\[email protected](\"days\", permutations([\"Sun\", \"Sat\", \"Fri\", \"Thur\"]))\[email protected](\"times\", permutations([\"Lunch\", \"Dinner\"]))\ndef test_orthogonal_orderings(days, times):\n assert_orderings(days, days, times, times)\n\n\ndef test_permissive_defaults():\n msg = \"'PxDefaults' object has no attribute 'should_not_work'\"\n with pytest.raises(AttributeError, match=msg):\n px.defaults.should_not_work = \"test\"\n\n\ndef test_marginal_ranges():\n df = px.data.tips()\n fig = px.scatter(\n df,\n x=\"total_bill\",\n y=\"tip\",\n marginal_x=\"histogram\",\n marginal_y=\"histogram\",\n range_x=[5, 10],\n range_y=[5, 10],\n )\n assert fig.layout.xaxis2.range is None\n assert fig.layout.yaxis3.range is None\n\n\ndef test_render_mode():\n df = px.data.gapminder()\n df2007 = df.query(\"year == 2007\")\n fig = px.scatter(df2007, x=\"gdpPercap\", y=\"lifeExp\", trendline=\"ols\")\n assert fig.data[0].type == \"scatter\"\n assert fig.data[1].type == \"scatter\"\n fig = px.scatter(\n df2007, x=\"gdpPercap\", y=\"lifeExp\", trendline=\"ols\", render_mode=\"webgl\"\n )\n assert fig.data[0].type == \"scattergl\"\n assert fig.data[1].type == \"scattergl\"\n fig = px.scatter(df, x=\"gdpPercap\", y=\"lifeExp\", trendline=\"ols\")\n assert fig.data[0].type == \"scattergl\"\n assert fig.data[1].type == \"scattergl\"\n fig = px.scatter(df, x=\"gdpPercap\", y=\"lifeExp\", trendline=\"ols\", render_mode=\"svg\")\n assert fig.data[0].type == \"scatter\"\n assert fig.data[1].type == \"scatter\"\n fig = px.density_contour(df, x=\"gdpPercap\", y=\"lifeExp\", trendline=\"ols\")\n assert fig.data[0].type == \"histogram2dcontour\"\n assert fig.data[1].type == \"scatter\"\n" ]
[ [ "numpy.all", "numpy.in1d" ] ]
mlitre/Deep-Cure-Learning
[ "c3fe00d0917ea2cec2fcefc491d29ef706d99839" ]
[ "deep_cure_learning/stable.py" ]
[ "import gym\r\nimport numpy as np\r\nfrom envs.deep_cure_env import DeepCure, random_base_infect_rate, random_lifetime, ForeignCountry\r\nimport matplotlib.pyplot as plt\r\nfrom plotting import plot\r\nfrom stable_baselines3 import DQN, A2C\r\nimport torch as th \r\nfrom stable_baselines3.common.callbacks import EvalCallback\r\n\r\ndef lr(progress):\r\n return 0.001*np.sqrt(progress/100)\r\n\r\nenv = DeepCure(foreign_countries = [ForeignCountry(0.1,100,100_000, save_history=True)], use_discrete = True, save_history=True)\r\neval_callback = EvalCallback(env, best_model_save_path='./',\r\n log_path='./', eval_freq=500,\r\n deterministic=True, render=False)\r\n\r\npolicy_kwargs = dict(activation_fn=th.nn.Sigmoid, net_arch=[5])\r\nmodel = DQN(\"MlpPolicy\", env, batch_size=2, learning_rate=lr, policy_kwargs=policy_kwargs, verbose=1)\r\nmodel.learn(total_timesteps=100000, n_eval_episodes=100000, callback=eval_callback)\r\nmodel.save(\"dqn_stable\")\r\n\r\nmodel = DQN.load(\"dqn_stable\")\r\n\r\n\r\nobs = env.reset(rate=2.7)\r\nwhile True:\r\n action, _states = model.predict(obs, deterministic=True)\r\n obs, reward, done, info = env.step(action)\r\n\r\n if done:\r\n break\r\n\r\nplot(env)\r\n" ]
[ [ "numpy.sqrt" ] ]
BynaryCobweb/joliGAN
[ "a712b540b61f09691bb99406a49646dc8746cb7f" ]
[ "models/modules/cut_networks.py" ]
[ "import torch.nn as nn\nfrom .utils import init_net\nimport torch\n\nclass PatchSampleF(nn.Module):\n def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[]):\n # potential issues: currently, we use the same patch_ids for multiple images in the batch\n super(PatchSampleF, self).__init__()\n self.l2norm = Normalize(2)\n self.use_mlp = use_mlp\n self.nc = nc # hard-coded\n self.mlp_init = False\n self.init_type = init_type\n self.init_gain = init_gain\n self.gpu_ids = gpu_ids\n\n def create_mlp(self, feats):\n for mlp_id, feat in enumerate(feats):\n input_nc = feat.shape[1]\n mlp = nn.Sequential(*[nn.Linear(input_nc, self.nc), nn.ReLU(), nn.Linear(self.nc, self.nc)])\n if len(self.gpu_ids) > 0:\n mlp.cuda()\n setattr(self, 'mlp_%d' % mlp_id, mlp)\n init_net(self, self.init_type, self.init_gain, self.gpu_ids)\n self.mlp_init = True\n\n def forward(self, feats, num_patches=64, patch_ids=None):\n return_ids = []\n return_feats = []\n if self.use_mlp and not self.mlp_init:\n self.create_mlp(feats)\n for feat_id, feat in enumerate(feats):\n B, H, W = feat.shape[0], feat.shape[2], feat.shape[3]\n feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2)\n if num_patches > 0:\n if patch_ids is not None:\n patch_id = patch_ids[feat_id].squeeze()\n else:\n patch_id = torch.randperm(feat_reshape.shape[1], device=feats[0].device)\n patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)\n x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])\n else:\n x_sample = feat_reshape\n patch_id = []\n if self.use_mlp:\n mlp = getattr(self, 'mlp_%d' % feat_id)\n x_sample = mlp(x_sample)\n return_ids.append(patch_id.unsqueeze(0))\n x_sample = self.l2norm(x_sample)\n\n if num_patches == 0:\n x_sample = x_sample.permute(0, 2, 1).reshape([B, x_sample.shape[-1], H, W])\n return_feats.append(x_sample)\n return return_feats, return_ids\n\nclass Normalize(nn.Module):\n\n def __init__(self, power=2):\n super(Normalize, self).__init__()\n self.power = power\n\n def forward(self, x):\n norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)\n out = x.div(norm + 1e-7)\n return out\n" ]
[ [ "torch.nn.Linear", "torch.randperm", "torch.nn.ReLU" ] ]
jiangwenfan/pythonScripts
[ "c9004944f162af575e111522f98d4de4f59885e6" ]
[ "learnEnglish/wordsWallpaper/randomSetWords.py" ]
[ "import cv2\r\nfrom PIL import ImageFont, ImageDraw, Image\r\nimport numpy as np\r\nimport random\r\n\r\n\"\"\"\r\n读取words文件,将单词数据打乱,生成随机含有单词的背景图。\r\n\"\"\"\r\n\r\nwordsFile = \"wordsInput.txt\" # set word source file\r\ninputImage = \"imageInput.png\" # set picture output location\r\n\r\n# font color pond\r\nfontColor = [(248, 248, 255), (46, 139, 87), (255, 69, 0)]\r\n# words list\r\nwordsList = []\r\n\r\n\r\n# create wordslist info\r\ndef createWordsList(wordsFile):\r\n with open(wordsFile, 'r', encoding=\"utf-8\") as f:\r\n # get line number of words file\r\n count = 0\r\n # read words file and storage as list data structure\r\n for word in f:\r\n # storage every word info\r\n wordInfo = {}\r\n wordInfo[\"text\"] = word\r\n wordInfo['fill'] = (255, 69, 0)\r\n # word location of random create\r\n x = random.randint(50, 1800)\r\n y = random.randint(50, 1800)\r\n wordInfo['xy'] = (x, y)\r\n wordsList.append(wordInfo)\r\n\r\n\r\ndef writeImage(backgroundImage, wordsList):\r\n \"\"\"\r\n textList = [{\"xy\":(100,300),\"text\":\"hello\",\"fill\"=(255,255,255)},{......}]\r\n \"\"\"\r\n bk_img = cv2.imread(backgroundImage)\r\n # set font style\r\n fontPath = \"font/simsun.ttc\"\r\n # set font size\r\n font = ImageFont.truetype(fontPath, 12)\r\n # convert picture object to picture memory\r\n img_pil = Image.fromarray(bk_img)\r\n # draw picture memory\r\n draw = ImageDraw.Draw(img_pil)\r\n wordsWriteReal = wordsList[:100] # set words in picture of real\r\n for wordInfo in wordsWriteReal:\r\n draw.text(wordInfo['xy'], wordInfo['text'], font=font, fill=wordInfo['fill'])\r\n bk_img = np.array(img_pil)\r\n # set title of show picture\r\n cv2.imshow(\"text6\", bk_img)\r\n cv2.waitKey()\r\n cv2.imwrite(\"imageOutputRandom.png\", bk_img) # set output picture name\r\n\r\n\r\nif __name__ == '__main__':\r\n createWordsList(wordsFile)\r\n # shuffle words list\r\n random.shuffle(wordsList)\r\n writeImage(inputImage,wordsList)\r\n" ]
[ [ "numpy.array" ] ]
ruanjiandong/tensorflow
[ "8284401718fedc0c5ae1cdf19ea399efcca6c03f" ]
[ "tensorflow/python/eager/function.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=unidiomatic-typecheck\n\"\"\"Defun decorator for defining graph-mode functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nimport sys\nimport threading\n\nimport numpy as np\nimport six\n\nfrom tensorflow.core.framework import function_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import execute\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.eager.graph_only_ops import graph_placeholder\nfrom tensorflow.python.framework import c_api_util\nfrom tensorflow.python.framework import device as pydev\nfrom tensorflow.python.framework import dtypes as dtypes_module\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import cond_v2_impl\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.training import distribution_strategy_context\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util import tf_inspect\n\n# This is to avoid a circular dependency with cond_v2_impl\n# (function -> gradients_impl -> control_flow_ops -> cond_v2_impl).\ncond_v2_impl._function = sys.modules[__name__] # pylint: disable=protected-access\n\n# This is to avoid a circular dependency with gradients_impl\ngradients_impl._function = sys.modules[__name__] # pylint: disable=protected-access\n\n\ndef _create_substitute_placeholder(value, name, dtype=None):\n \"\"\"Creates a placeholder for `value` and propagates shape info to it.\"\"\"\n # Note: setting ops.control_dependencies(None) ensures we always put\n # capturing placeholders outside of any control flow context.\n with ops.control_dependencies(None):\n placeholder = graph_placeholder(\n dtype=dtype or value.dtype, shape=value.shape, name=name)\n if placeholder.dtype == dtypes_module.resource:\n if isinstance(value, ops.EagerTensor):\n handle_data = value._handle_data # pylint: disable=protected-access\n else:\n handle_data = resource_variable_ops.get_resource_handle_data(value)\n if handle_data is not None and handle_data.is_set:\n # pylint: disable=protected-access\n pywrap_tensorflow.SetResourceHandleShapeAndType(\n placeholder.graph._c_graph, placeholder._as_tf_output(),\n handle_data.SerializeToString())\n # pylint: enable=protected-access\n # Ensure that shapes and dtypes are propagated.\n shapes, types = zip(*[(pair.shape, pair.dtype)\n for pair in handle_data.shape_and_type])\n ranks = [len(s.dim) if not s.unknown_rank else -1 for s in shapes]\n shapes = [[d.size for d in s.dim]\n if not s.unknown_rank else None for s in shapes]\n pywrap_tensorflow.TF_GraphSetOutputHandleShapesAndTypes_wrapper(\n placeholder._op._graph._c_graph, # pylint: disable=protected-access\n placeholder._as_tf_output(), # pylint: disable=protected-access\n shapes, ranks, types)\n\n return placeholder\n\n\ndef _get_device_functions(ctx, graph):\n \"\"\"Returns a tuple of device functions representing the device stack.\"\"\"\n if ctx.executing_eagerly():\n return (pydev.merge_device(ctx.device_name),)\n else:\n return tuple(graph._device_functions_outer_to_inner) # pylint: disable=protected-access\n\n\nclass FuncGraph(ops.Graph):\n \"\"\"Graph representing a function body.\n\n Attributes:\n name: The name of the function.\n inputs: Placeholder tensors representing the inputs to this function. The\n tensors are in this FuncGraph. This represents \"regular\" inputs as well as\n captured inputs (i.e. the values of self.captures), with the regular\n inputs coming first.\n outputs: Tensors that will be returned by this function. The tensors are in\n this FuncGraph.\n structured_outputs: A possibly-nested python object which will be returned\n by this function. The Tensors in this structure are the same as those of\n self.outputs. Note that this structure might contain Python `None`s.\n variables: Variables that should be watched during function execution.\n outer_graph: The graph this function is defined in. May be another FuncGraph\n or the global default Graph.\n captures: Maps external tensor -> internal tensor (i.e. input placeholder).\n The entries are in the order they were captured.\n seed: The graph-level random seed.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Construct a new FuncGraph.\n\n The graph will inherit its graph key, collections, seed, device stack, and\n distribution strategy stack from the current context or graph.\n\n Args:\n name: the name of the function.\n \"\"\"\n super(FuncGraph, self).__init__()\n\n self.name = name\n self.inputs = []\n self.outputs = []\n self.structured_outputs = None\n self.variables = []\n self.outer_graph = ops.get_default_graph()\n self.captures = collections.OrderedDict()\n\n self._building_function = True\n # Map from resource tensor name to last op (in program order) which uses\n # this tensor. Used to enforce that execution order matches program order\n # for resource tensors.\n self._last_op_using_resource_tensor = {}\n\n graph = self.outer_graph\n\n if context.executing_eagerly():\n self.seed = context.global_seed()\n self._xla_compile = (context.context().device_spec.device_type == \"TPU\")\n self._add_device_to_stack(context.context().device_name)\n else:\n self.seed = graph.seed\n self._xla_compile = getattr(graph, \"_xla_compile\", False)\n self._device_function_stack = graph._device_function_stack.copy() # pylint: disable=protected-access\n self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access\n\n # TODO(b/112165328, b/112906995): summaries depend on inheriting collections\n # from the default graph even in eager mode. It'd be nice to not have a\n # default graph with eager execution, so hopefully this will go away when we\n # remove collections.\n # pylint: disable=protected-access\n self._collections = graph._collections\n # TODO(b/112906995): distribution strategy depends on inheriting this stack\n # from the default graph even in eager mode. Maybe it should be part of the\n # eager context?\n self._distribution_strategy_stack = graph._distribution_strategy_stack\n # Inherit the graph key, since this is used for matching variables in\n # optimizers.\n self._graph_key = graph._graph_key\n # pylint: enable=protected-access\n\n def create_op(\n self,\n op_type,\n inputs,\n dtypes,\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_shapes=True,\n compute_device=True):\n \"\"\"Like Graph.create_op, except handles external input tensors.\n\n This overload adds functionality to create_op to \"capture\" any external\n input tensors, i.e. tensors from the eager context or outer function graphs\n if this is a nested function. See `capture` for more information.\n\n Args:\n op_type: The `Operation` type to create. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n inputs: A list of `Tensor` objects that will be inputs to the `Operation`.\n dtypes: A list of `DType` objects that will be the types of the tensors\n that the operation produces.\n input_types: (Optional.) A list of `DType`s that will be the types of\n the tensors that the operation consumes. By default, uses the base\n `DType` of each input in `inputs`. Operations that expect\n reference-typed inputs must specify `input_types` explicitly.\n name: (Optional.) A string name for the operation. If not specified, a\n name is generated based on `op_type`.\n attrs: (Optional.) A dictionary where the key is the attribute name (a\n string) and the value is the respective `attr` attribute of the\n `NodeDef` proto that will represent the operation (an `AttrValue`\n proto).\n op_def: (Optional.) The `OpDef` proto that describes the `op_type` that\n the operation will have.\n compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always\n computed).\n compute_device: (Optional.) If True, device functions will be executed\n to compute the device property of the Operation.\n\n Returns:\n An `Operation` object.\n \"\"\"\n # This capturing logic interacts poorly with control flow contexts which\n # want to replace inputs of ops far too late in the process. This can lead\n # the context to get confused and try to create an Enter for an Enter. We\n # can detect this here and skip the additional Enter which can confuse loop\n # validation logic.\n if op_type == \"Enter\" and inputs[0].op.type == \"Enter\":\n if inputs[0].op.get_attr(\"frame_name\") == attrs[\"frame_name\"].s:\n return inputs[0].op\n # Calling AddValue on the control flow contexts to force creation of the\n # backward accumulators in the original graph before we create placeholders\n # to capture the inputs.\n ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access\n for i, inp in enumerate(inputs):\n # TPU Estimator defines a control flow context with no AddValue method.\n if ctxt is not None and hasattr(ctxt, \"AddValue\"):\n inp = ctxt.AddValue(inp)\n inp = self.capture(inp)\n inputs[i] = inp\n return super(FuncGraph, self).create_op(\n op_type, inputs, dtypes, input_types, name, attrs, op_def,\n compute_device=compute_device)\n\n def capture(self, tensor, name=None):\n \"\"\"Captures `tensor` if it's external to this graph.\n\n If `tensor` is from a different graph, returns a placeholder for it.\n `tensor` and the placeholder will appear in self.captures, and the\n placeholder will appear in self.inputs. Multiple calls to this method with\n the same `tensor` argument will return the same placeholder. If `tensor` is\n from this graph, returns `tensor`.\n\n Args:\n tensor: Tensor. May be from this FuncGraph or a different graph.\n name: Optional name if a placeholder is created.\n\n Returns:\n Tensor from this FuncGraph.\n \"\"\"\n if isinstance(tensor, ops.EagerTensor):\n if name is None:\n name = str(ops.uid())\n return self._capture_helper(tensor, name)\n if tensor.graph is not self:\n if name is None:\n name = tensor.op.name\n return self._capture_helper(tensor, name)\n return tensor\n\n def _capture_helper(self, tensor, name):\n captured_tensor = self.captures.get(tensor, None)\n if captured_tensor is None:\n captured_tensor = _create_substitute_placeholder(tensor, name=name,\n dtype=tensor.dtype)\n self.captures[tensor] = captured_tensor\n self.inputs.append(captured_tensor)\n tape.record_operation(\"captured_value\", [captured_tensor], [tensor],\n lambda x: [x])\n return captured_tensor\n\n @property\n def external_captures(self):\n \"\"\"External tensors captured by this function.\"\"\"\n return list(self.captures.keys())\n\n @property\n def internal_captures(self):\n \"\"\"Placeholders in this function corresponding captured tensors.\"\"\"\n return list(self.captures.values())\n\n\ndef _forward_name(n):\n \"\"\"The name of a generated forward defun named n.\"\"\"\n return \"__forward_%s_%s\" % (n, ops.uid())\n\n\ndef _backward_name(n):\n \"\"\"The name of a generated backward defun named n.\"\"\"\n return \"__backward_%s_%s\" % (n, ops.uid())\n\n\ndef _inference_name(n):\n \"\"\"The name of a forward-but-no-gradient defun named n.\"\"\"\n return \"__inference_%s_%s\" % (n, ops.uid())\n\n\ndef _register(fn):\n \"\"\"Registers the function `fn`.\"\"\"\n context.context().add_function(fn)\n\n\n# TODO(apassos) get rid of this by splitting framework.function._DefinedFunction\n# so it doesn't have the definition-generating logic and is just a container for\n# an already-defined function.\nclass _EagerDefinedFunction(object):\n \"\"\"Callable with the interface of `framework.function._DefinedFunction.`\n\n `_EagerDefinedFunction` encapsulates a function definition and its properties,\n and it provides a method for calling the encapsulated function. Some Ops\n take functions as attributes, which have type `func`; an instance of this\n class may be provided as the value of these `func` attributes.\n \"\"\"\n\n def __init__(self, name, graph, inputs, outputs, attrs):\n \"\"\"Initializes an eager defined function.\n\n Args:\n name: str, the name for the created function.\n graph: Graph, the graph containing the operations in the function\n inputs: the tensors in the graph to be used as inputs to the function\n outputs: the tensors in the graph which will be outputs to the function\n attrs: dict mapping names of attributes to their AttrValue values\n \"\"\"\n operations = [\n op for op in graph.get_operations()\n if op not in set(arg.op for arg in inputs)\n ]\n fn = pywrap_tensorflow.TF_GraphToFunction_wrapper(\n graph._c_graph, # pylint: disable=protected-access\n compat.as_str(name),\n False,\n [o._c_op for o in operations], # pylint: disable=protected-access\n [t._as_tf_output() for t in inputs], # pylint: disable=protected-access\n [t._as_tf_output() for t in outputs], # pylint: disable=protected-access\n [],\n None,\n compat.as_str(\"\"))\n\n for name, attr_value in attrs.items():\n serialized = attr_value.SerializeToString()\n # TODO(iga): this creates and deletes a new TF_Status for every attr.\n # It might be worth creating a convenient way to re-use status.\n pywrap_tensorflow.TF_FunctionSetAttrValueProto(\n fn, compat.as_str(name), serialized)\n\n # TODO(apassos) avoid creating a FunctionDef (specially to grab the\n # signature, but also in general it's nice not to depend on it.\n with c_api_util.tf_buffer() as buffer_:\n pywrap_tensorflow.TF_FunctionToFunctionDef(fn, buffer_)\n proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)\n function_def = function_pb2.FunctionDef()\n function_def.ParseFromString(compat.as_bytes(proto_data))\n if context.executing_eagerly():\n _register(fn)\n self.definition = function_def\n self.name = compat.as_bytes(function_def.signature.name)\n self.signature = function_def.signature\n self._num_outputs = len(self.signature.output_arg)\n self._output_types = [o.type for o in self.signature.output_arg]\n self._output_shapes = [o.shape for o in outputs]\n self.grad_func_name = None\n self.python_grad_func = None\n self._c_func = c_api_util.ScopedTFFunction(fn)\n self._grad_func = None\n self._graph = graph\n self._stateful_ops = tuple(op for op in operations if op.op_def.is_stateful)\n\n def add_to_graph(self, g):\n # pylint: disable=protected-access\n if self.name not in g._functions:\n g._add_function(self)\n for f in self._graph._functions.values():\n if f.name not in g._functions:\n g._add_function(f)\n # pylint: enable=protected-access\n\n @property\n def stateful_ops(self):\n return self._stateful_ops\n\n def call(self, ctx, args):\n \"\"\"Calls this function with `args` as inputs.\n\n Function execution respects device annotations only if the function won't\n be compiled with xla.\n\n Args:\n ctx: a Context object\n args: a list of arguments to supply this function with.\n\n Returns:\n The outputs of the function call.\n \"\"\"\n\n executing_eagerly = ctx.executing_eagerly()\n\n if self._graph._xla_compile: # pylint: disable=protected-access\n # XLA compilation relies upon a custom kernel creator to run functions.\n signature = self.signature\n if executing_eagerly:\n outputs = execute.execute(\n str(signature.name),\n num_outputs=self._num_outputs,\n inputs=args,\n attrs=None,\n ctx=ctx)\n else:\n g = ops.get_default_graph()\n self.add_to_graph(g)\n op = g.create_op(\n signature.name,\n [ops.internal_convert_to_tensor(x, ctx=ctx) for x in args],\n tuple(dtypes_module.DType(x.type) for x in signature.output_arg),\n op_def=signature,\n name=\"FunctionCall\",\n compute_shapes=False)\n outputs = op.outputs\n if not outputs:\n return op\n outputs = [outputs] if isinstance(\n outputs, (ops.Tensor, type(None))) else list(outputs)\n else:\n # TODO(akshayka): Either remove this if the FunctionLibraryRuntime\n # creates `PartitionedCallOp` kernels by default, or remove the previous\n # branch if a TPU kernel is registered for `PartitionedCall`.\n outputs = functional_ops.partitioned_call(\n args=args,\n f=self,\n tout=self._output_types,\n executing_eagerly=executing_eagerly)\n\n if executing_eagerly:\n return outputs\n else:\n for i, shape in enumerate(self._output_shapes):\n outputs[i].set_shape(shape)\n return outputs\n\n\ndef _flatten(sequence):\n \"\"\"A wrapper around `nest.flatten` that also unpacks `IndexedSlices`.\"\"\"\n # TODO(akshayka): Support `SparseTensor` in a similar fashion.\n flat_sequence = nest.flatten(sequence)\n outputs = []\n for item in flat_sequence:\n if isinstance(item, ops.IndexedSlices):\n if item.dense_shape is not None:\n outputs.extend([item.values, item.indices, item.dense_shape])\n else:\n outputs.extend([item.values, item.indices])\n else:\n outputs.append(item)\n return outputs\n\n\nclass Function(object):\n \"\"\"Callable object encapsulating a function definition and its gradient.\n\n `Function` is a callable that encapsulates a function definition and\n is differentiable under `tf.GradientTape` objects.\n \"\"\"\n\n def __init__(self, func_graph, attrs=None):\n \"\"\"Initialize a Function.\n\n Args:\n func_graph: An instance of FuncGraph: the function body to wrap.\n attrs: (optional) dict mapping names of attributes to their AttrValue\n values. Attributes in `attrs` will be included in this function's\n definition.\n\n Raises:\n ValueError: If number of input_placeholders is not equal to the number\n of function inputs.\n \"\"\"\n self._func_graph = func_graph\n self._captured_inputs = list(self._func_graph.captures.keys())\n self._num_outputs = len(self._func_graph.outputs)\n self._output_shapes = tuple(\n output.shape for output in self._func_graph.outputs)\n self._attrs = attrs or {}\n self._device_functions = tuple(\n self._func_graph._device_functions_outer_to_inner) # pylint: disable=protected-access\n\n self._inference_function = _EagerDefinedFunction(\n _inference_name(self._func_graph.name), self._func_graph,\n self._func_graph.inputs, self._func_graph.outputs, self._attrs)\n self._backward_graph_function = None\n\n # Map holding distributed variables, keyed by resource handle tensors.\n self._distributed_variables = {}\n strategy = distribution_strategy_context.get_distribution_strategy()\n for variable in self._func_graph.variables:\n # If variable is not distributed, unwrap returns [variable].\n component_variables = strategy.unwrap(variable)\n # Only update the dictionary when the variable is actually distributed.\n if (len(component_variables) > 1 or component_variables[0] != variable):\n for component_variable in component_variables:\n self._distributed_variables[component_variable.handle] = variable\n\n def __call__(self, *args):\n \"\"\"Executes the wrapped function.\"\"\"\n ctx = context.context()\n device_functions = _get_device_functions(ctx, ops.get_default_graph())\n if device_functions != self._device_functions:\n raise ValueError(\n \"The current device stack does not match the device stack under \"\n \"which the TensorFlow function '%s' was created.\\n\"\n \"Current device stack: %s\\n%s device stack: %s\" %\n (self._inference_function.name, device_functions,\n self._inference_function.name, self._device_functions))\n\n for v in self._func_graph.variables:\n if v.trainable:\n tape.variable_accessed(v)\n\n captures = self._resolve_captured_inputs()\n tensor_inputs = [x for x in nest.flatten(args) if isinstance(x, ops.Tensor)]\n args = tensor_inputs + captures\n\n if tape.should_record(tensor_inputs) or tape.should_record(captures):\n return self._backprop_call(args)\n\n outputs = self._inference_function.call(ctx, args)\n return self._build_call_outputs(outputs)\n\n @property\n def graph(self):\n \"\"\"Returns the graph from which this function was constructed.\"\"\"\n return self._func_graph\n\n @property\n def variables(self):\n \"\"\"Returns all variables touched by this function.\"\"\"\n return self._func_graph.variables\n\n @property\n def inputs(self):\n \"\"\"Returns tensors in `self.graph` corresponding to arguments.\"\"\"\n return self._func_graph.inputs\n\n @property\n def outputs(self):\n \"\"\"Returns tensors in `self.graph` corresponding to return values.\"\"\"\n return self._func_graph.outputs\n\n @property\n def captured_inputs(self):\n \"\"\"Returns external Tensors captured by this function.\n\n self.__call__(*args) passes `args + self.captured_inputs` to the function.\n \"\"\"\n return self._captured_inputs\n\n @property\n def function_def(self):\n \"\"\"Returns a `FunctionDef` object representing this function.\"\"\"\n return self._inference_function.definition\n\n @property\n def output_shapes(self):\n \"\"\"The function's output shapes.\"\"\"\n # TODO(ebrevdo): Should we only keep the output shapes associated\n # with len(self._python_returns) outputs?\n # TODO(akshayka): Consider removing this.\n outputs_list = nest.flatten(self._func_graph.structured_outputs)\n j = 0\n for i, o in enumerate(outputs_list):\n if o is not None:\n if isinstance(o, ops.IndexedSlices):\n # Extract the shape of the `IndexedSlices` object's `values` field.\n outputs_list[i] = self._output_shapes[j] # the `values` shape\n if o.dense_shape is not None:\n j += 3 # skip over shapes for `values`, `indices`, `dense_shape`\n else:\n j += 2 # skip over shapes for `values`, `indices`\n else:\n outputs_list[i] = self._output_shapes[j]\n j += 1\n return nest.pack_sequence_as(self._func_graph.structured_outputs,\n outputs_list)\n\n @property\n def output_dtypes(self):\n # TODO(akshayka): Consider removing this.\n return nest.map_structure(lambda x: x.dtype if x is not None else None,\n self._func_graph.structured_outputs)\n\n def _construct_backprop_function(self):\n \"\"\"Constructs the backprop function object for this function.\"\"\"\n backwards_graph = FuncGraph(_backward_name(self._func_graph.name))\n with backwards_graph.as_default():\n gradients_wrt_outputs = [\n graph_placeholder(x.dtype, x.shape) for x in self._func_graph.outputs\n ]\n gradients_wrt_inputs = gradients_impl._GradientsHelper( # pylint: disable=protected-access\n self._func_graph.outputs,\n self._func_graph.inputs,\n grad_ys=gradients_wrt_outputs,\n src_graph=self._func_graph)\n\n self._forward_function = _EagerDefinedFunction(\n _forward_name(\n self._func_graph.name), self._func_graph, self._func_graph.inputs,\n self._func_graph.outputs + list(backwards_graph.captures.keys()),\n self._attrs)\n\n # The ordering of `backwards_graph.inputs` is important: inputs of\n # `self._backward_graph_function` correspond to outputs of\n # `self._forward_function`.\n backwards_graph.inputs = gradients_wrt_outputs + list(\n backwards_graph.captures.values())\n # Clear captures, since we pass them in as inputs.\n backwards_graph.captures = {}\n backwards_graph.outputs.extend(\n grad for grad in _flatten(gradients_wrt_inputs) if grad is not None)\n backwards_graph.structured_outputs = gradients_wrt_inputs\n self._backward_graph_function = Function(\n backwards_graph, attrs=self._attrs)\n\n def _backprop_call(self, args):\n \"\"\"Calls the forward function and records the result on a tape.\n\n (Only records results on a tape if the function has outputs)\n\n Args:\n args: All inputs to the function, including resolved captured inputs\n\n Returns:\n The call output.\n \"\"\"\n if self._backward_graph_function is None:\n self._construct_backprop_function()\n\n ctx = context.context()\n outputs = self._forward_function.call(ctx, args)\n if isinstance(outputs, ops.Operation) or outputs is None:\n return outputs\n\n # `real_outputs` are the actual outputs of the inference graph function;\n # `side_outputs` are the intermediate Tensors that were added as outputs to\n # the forward graph function so that we can compute its gradient.\n real_outputs = outputs[:self._num_outputs]\n side_outputs = outputs[self._num_outputs:]\n\n def backward_function(*args):\n return self._backward_graph_function(*(list(args) + side_outputs)) # pylint: disable=not-callable\n\n tape.record_operation(self._forward_function.signature.name, real_outputs,\n args, backward_function)\n return self._build_call_outputs(real_outputs)\n\n def _resolve_captured_inputs(self):\n \"\"\"Resolve captured distributed variables to their current values.\n\n Some inputs can be distributed variables. Such variables yield a different\n component (i.e. actual tf.Variable) variables depending on the context of\n execution.\n\n Returns:\n a list of resolved captured input tensors.\n \"\"\"\n if self._distributed_variables:\n # Loop over each captured input and check if it corresponds to something\n # distributed. If so, get its _distributed_container and fetch the\n # component appropriate for the current execution context.\n resolved_captured_inputs = self._captured_inputs[:]\n for i, captured_input in enumerate(self._captured_inputs):\n distributed_var = self._distributed_variables.get(captured_input, None)\n if distributed_var is not None:\n # distributed variables override __getattr__ and substitute the\n # right component variable. In here, `distributed_var.handle`\n # actually does the equivalent of\n # distributed_var.get_current_component_var().handle.\n resolved_captured_inputs[i] = distributed_var.handle\n return resolved_captured_inputs\n return self._captured_inputs\n\n def _build_call_outputs(self, result):\n \"\"\"Maps the fdef output list to actual output structure.\n\n Args:\n result: Output lists defined by FunctionDef.\n Returns:\n The actual call output.\n \"\"\"\n if self._func_graph.structured_outputs is None:\n return result\n\n # Use `nest.flatten` instead of `_flatten` in order to preserve any\n # IndexedSlices in `self._func_graph.structured_outputs`.\n outputs_list = nest.flatten(self._func_graph.structured_outputs)\n j = 0\n for i, o in enumerate(outputs_list):\n if o is not None:\n if isinstance(o, ops.IndexedSlices):\n # Repack Tensors for IndexedSlices.\n if o.dense_shape is not None:\n outputs_list[i] = ops.IndexedSlices(\n values=result[j],\n indices=result[j + 1],\n dense_shape=result[j + 2])\n j += 3\n else:\n outputs_list[i] = ops.IndexedSlices(\n values=result[j], indices=result[j + 1])\n j += 2\n else:\n outputs_list[i] = result[j]\n j += 1\n ret = nest.pack_sequence_as(self._func_graph.structured_outputs,\n outputs_list)\n return ret\n\n\ndef _get_defun_inputs_from_signature(signature):\n \"\"\"Maps a signature to graph-construction inputs.\"\"\"\n function_inputs = [\n graph_placeholder(spec.dtype, spec.shape)\n for spec in nest.flatten(signature)\n ]\n return nest.pack_sequence_as(signature, function_inputs)\n\n\ndef _get_defun_inputs_from_args(args):\n \"\"\"Maps python function args to graph-construction inputs.\"\"\"\n function_inputs = [\n graph_placeholder(arg.dtype, arg.shape)\n if isinstance(arg, ops.Tensor) else arg for arg in nest.flatten(args)\n ]\n return nest.pack_sequence_as(args, function_inputs)\n\n\ndef func_graph_from_py_func(name, python_func, args, kwds, signature=None):\n \"\"\"Returns a `FuncGraph` generated from `python_func`.\n\n Args:\n name: an identifier for the function.\n python_func: the Python function to trace.\n args: the positional args with which the Python function should be called;\n ignored if a signature is provided.\n kwds: the keyword args with which the Python function should be called;\n ignored if a signature is provided.\n signature: a possibly nested sequence of `TensorSpecs` specifying the shapes\n and dtypes of the arguments. When a signature is provided, `args` and\n `kwds` are ignored, and `python_func` is traced with Tensors conforming\n to `signature`. If `None`, the shapes and dtypes are inferred from the\n inputs.\n\n Returns:\n A FuncGraph.\n\n Raises:\n TypeError: If any of `python_func`'s return values is neither `None` nor a\n `Tensor`.\n \"\"\"\n func_graph = FuncGraph(name)\n with func_graph.as_default(), AutomaticControlDependencies() as a:\n variable_scope.get_variable_scope().set_use_resource(True)\n\n if signature is None:\n func_args = _get_defun_inputs_from_args(args)\n func_kwds = _get_defun_inputs_from_args(kwds)\n else:\n func_args = _get_defun_inputs_from_signature(signature)\n func_kwds = {}\n\n # Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.\n func_graph.inputs.extend(\n x for x in nest.flatten(func_args) + nest.flatten(func_kwds)\n if isinstance(x, ops.Tensor))\n\n # Variables to help check whether mutation happens in calling the function\n # Copy the recursive list, tuple and map structure, but not base objects\n func_args_before = nest.pack_sequence_as(func_args, nest.flatten(func_args))\n func_kwds_before = nest.pack_sequence_as(func_kwds, nest.flatten(func_kwds))\n\n def convert(x):\n \"\"\"Converts an argument to a Tensor.\"\"\"\n if x is None:\n return None\n try:\n x = ops.convert_to_tensor_or_indexed_slices(x)\n except (ValueError, TypeError):\n raise TypeError(\n \"To be compatible with tf.contrib.eager.defun, Python functions \"\n \"must return zero or more Tensors; in compilation of %s, found \"\n \"return value of type %s, which is not a Tensor.\" %\n (str(python_func), type(x)))\n x = a.mark_as_return(x)\n return x\n\n this_tape = tape.push_new_tape()\n try:\n func_outputs = python_func(*func_args, **func_kwds)\n # invariant: `func_outputs` contains only Tensors and `None`s.\n func_outputs = nest.map_structure(convert, func_outputs)\n\n def check_mutation(n1, n2):\n \"\"\"Check if two list of arguments are exactly the same.\"\"\"\n errmsg = (\"Function to be traced should not modify structure of input \"\n \"arguments. Check if your function has list and dictionary \"\n \"operations that alter input arguments, \"\n \"such as `list.pop`, `list.append`\")\n try:\n nest.assert_same_structure(n1, n2)\n except ValueError:\n raise ValueError(errmsg)\n\n for arg1, arg2 in zip(nest.flatten(n1), nest.flatten(n2)):\n if arg1 is not arg2:\n raise ValueError(errmsg)\n\n check_mutation(func_args_before, func_args)\n check_mutation(func_kwds_before, func_kwds)\n finally:\n tape.pop_tape(this_tape)\n\n func_graph.structured_outputs = func_outputs\n # Returning a closed-over tensor does not trigger convert_to_tensor.\n func_graph.outputs.extend(\n func_graph.capture(x)\n for x in _flatten(func_graph.structured_outputs)\n if x is not None)\n\n # Some captured variables might be components of DistributedValues.\n # Instead of storing non-distributed component variables, we\n # store their distributed containers so we can retrieve the correct\n # component variables at call-time.\n variables = list(this_tape.watched_variables())\n strategy = distribution_strategy_context.get_distribution_strategy()\n for i, variable in enumerate(variables):\n # If variable is not distributed value_container returns itself.\n variables[i] = strategy.value_container(variable)\n func_graph.variables = variables\n\n # Register any other functions defined in the graph.\n if context.executing_eagerly():\n for f in func_graph._functions.values(): # pylint: disable=protected-access\n # TODO(ashankar): What about the gradient registry?\n _register(f._c_func.func) # pylint: disable=protected-access\n\n return func_graph\n\n\n_TensorType = collections.namedtuple(\"_TensorType\", [\"dtype\", \"shape\"])\n\n\ndef _encode_arg(arg):\n \"\"\"A canonical representation for this argument, for use in a cache key.\"\"\"\n\n # `defun` uses dtypes and shapes instead of `Tensors` as cache keys. Dtypes\n # are used because TensorFlow graphs are not parametric w.r.t. dtypes. Shapes\n # are used for both performance reasons, as much TensorFlow code specializes\n # on known shapes to produce slimmer graphs, and correctness, as some\n # high-level APIs require shapes to be fully-known.\n #\n # TODO(akshayka): Add support for sparse tensors.\n #\n # pylint: disable=protected-access\n if isinstance(arg, ops.Tensor):\n return _TensorType(arg.dtype, arg._shape_tuple())\n elif isinstance(arg, ops.IndexedSlices):\n if arg.dense_shape is not None:\n return tuple([\n _TensorType(arg.values.dtype, arg.values._shape_tuple()),\n _TensorType(arg.indices.dtype, arg.indices._shape_tuple()),\n _TensorType(arg.dense_shape.dtype, arg.dense_shape._shape_tuple()),\n ])\n else:\n return tuple([\n _TensorType(arg.values.dtype, arg.values._shape_tuple()),\n _TensorType(arg.indices.dtype, arg.indices._shape_tuple()),\n ])\n elif isinstance(arg, np.ndarray):\n tensor = ops.convert_to_tensor(arg)\n return _TensorType(tensor.dtype, tensor._shape_tuple())\n # pylint: enable=protected-access\n elif isinstance(arg, (list, tuple)):\n return tuple([_encode_arg(elem) for elem in arg])\n elif isinstance(arg, dict):\n return tuple(\n (_encode_arg(key), _encode_arg(arg[key])) for key in sorted(arg))\n else:\n return arg\n\n\ndef _deterministic_dict_values(dictionary):\n return tuple(dictionary[key] for key in sorted(dictionary))\n\n\nclass PolymorphicFunction(object):\n \"\"\"Wrapper class for the graph functions defined for a Python function.\n\n See the documentation for `defun` for more information on the semantics of\n defined functions.\n\n PolymorphicFunction class is thread-compatible meaning that minimal\n usage of defuns (defining and calling) is thread-safe, but if users call other\n methods or invoke the base `python_function` themselves, external\n synchronization is necessary.\n \"\"\"\n\n def __init__(self,\n python_function,\n name,\n input_signature=None):\n \"\"\"Initializes a polymorphic function.\n\n Args:\n python_function: the function to be wrapped.\n name: the name given to it.\n input_signature: a possibly nested sequence of `TensorSpec` objects\n specifying the input signature of this function. If `None`, a separate\n function is instantiated for each inferred input signature.\n\n Raises:\n ValueError: if `input_signature` is not None and the `python_function`'s\n argspec has keyword arguments.\n \"\"\"\n\n if isinstance(python_function, functools.partial):\n self._python_function = python_function.func\n self._args_to_prepend = python_function.args or tuple()\n self._kwds_to_include = python_function.keywords or {}\n else:\n self._python_function = python_function\n self._args_to_prepend = tuple()\n self._kwds_to_include = {}\n self._name = name\n self._function_cache = collections.OrderedDict()\n self._variables = []\n\n self._lock = threading.Lock()\n\n fullargspec = tf_inspect.getfullargspec(self._python_function)\n if tf_inspect.ismethod(self._python_function):\n # Remove `self`: default arguments shouldn't be matched to it.\n args = fullargspec.args[1:]\n else:\n args = fullargspec.args\n\n # A cache mapping from argument name to index, for canonicalizing\n # arguments that are called in a keyword-like fashion.\n self._args_to_indices = {arg: i for i, arg in enumerate(args)}\n # A cache mapping from arg index to default value, for canonicalization.\n offset = len(args) - len(fullargspec.defaults or [])\n self._arg_indices_to_default_values = {\n offset + index: default\n for index, default in enumerate(fullargspec.defaults or [])\n }\n if input_signature is None:\n self._input_signature = None\n else:\n if fullargspec.varkw is not None or fullargspec.kwonlyargs:\n raise ValueError(\"Cannot define a TensorFlow function from a Python \"\n \"function with keyword arguments when \"\n \"input_signature is provided.\")\n\n if not isinstance(input_signature, (tuple, list)):\n raise TypeError(\"input_signature must be either a tuple or a \"\n \"list, received \" + str(type(input_signature)))\n\n self._input_signature = tuple(input_signature)\n self._flat_input_signature = tuple(nest.flatten(input_signature))\n\n def __call__(self, *args, **kwds):\n \"\"\"Calls a graph function specialized to the inputs.\"\"\"\n graph_function, inputs = self._maybe_define_function(*args, **kwds)\n return graph_function(*inputs)\n\n @property\n def python_function(self):\n \"\"\"Returns the wrapped Python function.\"\"\"\n return self._python_function\n\n # TODO(akshayka): Remove this property.\n @property\n def variables(self):\n \"\"\"Returns the union of all variables referenced by cached `Function`s`.\"\"\"\n return self._variables\n\n def get_concrete_function(self, *args, **kwargs):\n \"\"\"Returns a `Function` object specialized to inputs and execution context.\n\n `args` and `kwargs` are ignored if this `PolymorphicFunction` was created\n with an `input_signature`.\n\n Args:\n *args: inputs to specialize on.\n **kwargs: inputs to specialize on.\n \"\"\"\n graph_function, _ = self._maybe_define_function(*args, **kwargs)\n return graph_function\n\n def __get__(self, instance, owner):\n \"\"\"Makes it possible to defun instance methods.\"\"\"\n del owner\n # `instance` here is the instance that this `PolymorphicFunction` was\n # accessed through; e.g., for\n #\n # class Foo(object):\n #\n # @function.defun\n # def bar(self):\n # ...\n #\n # foo = Foo()\n # foo.bar() # `foo.bar` is a `PolymorphicFunction` instance\n #\n # then `instance` will be `foo` (and `owner` will be `Foo`).\n return functools.partial(self.__call__, instance)\n\n def _cache_key(self, args, kwds, ctx, graph):\n \"\"\"Computes the cache key given inputs and execution context.\"\"\"\n if self._input_signature is None:\n inputs = (args, kwds) if kwds else args\n cache_key = tuple(_encode_arg(arg) for arg in inputs)\n else:\n del args, kwds\n cache_key = self._flat_input_signature\n\n # The graph, or whether we're executing eagerly, should be a part of the\n # cache key so we don't improperly capture tensors such as variables.\n executing_eagerly = ctx.executing_eagerly()\n execution_context = executing_eagerly or graph\n\n # Putting the device in the cache key ensures that call-site device\n # annotations are respected.\n device_functions = _get_device_functions(ctx, graph)\n\n # `ops.colocate_with` directives translate into `ops.device` directives when\n # eager execution is enabled.\n colocation_stack = (None if executing_eagerly else\n tuple(graph._colocation_stack.peek_objs())) # pylint: disable=protected-access\n\n return cache_key + (execution_context, device_functions, colocation_stack)\n\n def _canonicalize_function_inputs(self, *args, **kwds):\n \"\"\"Canonicalizes `args` and `kwds`.\n\n Canonicalize the inputs to the Python function using its fullargspec. In\n particular, we parse the varags and kwargs that this\n `PolymorphicFunction` was called with into a tuple corresponding to the\n Python function's positional (named) arguments and a dictionary\n corresponding to its kwargs.\n\n Args:\n *args: The varargs this object was called with.\n **kwds: The keyword args this function was called with.\n\n Returns:\n A canonicalized ordering of the inputs.\n\n Raises:\n ValueError: If a keyword in `kwds` cannot be matched with a positional\n argument when an input signature is specified, or when the inputs\n do not conform to the input signature.\n \"\"\"\n args = self._args_to_prepend + args\n kwds = dict(kwds, **self._kwds_to_include)\n # Maps from index of arg to its corresponding value, according to `args`\n # and `kwds`; seeded with the default values for the named args that aren't\n # in `args`.\n arg_indices_to_values = {\n index: default\n for index, default in six.iteritems(self._arg_indices_to_default_values)\n if index >= len(args)\n }\n consumed_args = []\n for arg, value in six.iteritems(kwds):\n index = self._args_to_indices.get(arg, None)\n if index is not None:\n arg_indices_to_values[index] = value\n consumed_args.append(arg)\n elif self._input_signature is not None:\n raise ValueError(\"Cannot define a TensorFlow function from a Python \"\n \"function with keyword arguments when \"\n \"input_signature is provided.\")\n for arg in consumed_args:\n # After this loop, `kwds` will only contain true keyword arguments, as\n # opposed to named arguments called in a keyword-like fashion.\n kwds.pop(arg)\n inputs = args + _deterministic_dict_values(arg_indices_to_values)\n if self._input_signature is None:\n return inputs, kwds\n else:\n assert not kwds\n try:\n nest.assert_same_structure(self._input_signature, inputs)\n except (ValueError, TypeError):\n raise ValueError(\"Structure of Python function inputs does not match \"\n \"input_signature.\")\n flat_inputs = nest.flatten(inputs)\n if any(not isinstance(arg, ops.Tensor) for arg in flat_inputs):\n raise ValueError(\"When input_signature is provided, all inputs to \"\n \"the Python function must be Tensors.\")\n tensor_specs = [\n tensor_spec.TensorSpec.from_tensor(tensor) for tensor in flat_inputs\n ]\n if any(not spec.is_compatible_with(other)\n for spec, other in zip(self._flat_input_signature, tensor_specs)):\n raise ValueError(\"Python inputs incompatible with input_signature: \"\n \"inputs (%s), input_signature (%s)\" %\n (str(inputs), str(self._input_signature)))\n return inputs, {}\n\n def _maybe_define_function(self, *args, **kwds):\n \"\"\"Gets a function for these inputs, defining it if necessary.\n\n Args:\n *args: args for the Python function.\n **kwds: keywords for the Python function.\n\n Returns:\n A graph function corresponding to the input signature implied by args and\n kwds, as well as the inputs that the object should be called with.\n\n Raises:\n ValueError: If inputs are incompatible with the input signature.\n TypeError: If the function inputs include non-hashable objects\n \"\"\"\n\n args, kwds = self._canonicalize_function_inputs(*args, **kwds)\n cache_key = self._cache_key(args, kwds, context.context(),\n ops.get_default_graph())\n with self._lock:\n try:\n graph_function = self._function_cache.get(cache_key, None)\n except TypeError:\n raise TypeError(\"Arguments supplied to `defun`-generated functions \"\n \"must be hashable.\")\n\n if graph_function is None:\n graph_function = Function(\n func_graph_from_py_func(self._name, self._python_function, args,\n kwds, self._input_signature))\n self._variables.extend(\n [v for v in graph_function.variables if v not in self._variables])\n self._function_cache[cache_key] = graph_function\n return graph_function, (args, kwds)\n\n\ndef _validate_signature(signature):\n if any(not isinstance(arg, tensor_spec.TensorSpec)\n for arg in nest.flatten(signature)):\n raise TypeError(\"Invalid input_signature %s; input_signature must be \"\n \"a possibly nested sequence of TensorSpec objects.\")\n\n\ndef defun(func=None, input_signature=None):\n \"\"\"Compiles a Python function into a callable TensorFlow graph.\n\n `defun` (short for \"define function\") trace-compiles a Python function\n composed of TensorFlow operations into a callable that executes a `tf.Graph`\n containing those operations. The callable produced by `defun` contains only\n the subgraph of TensorFlow operations that were executed when the Python\n function was called with a particular input signature, defined as a list\n of the shapes and dtypes of the Python function's Tensor-valued arguments and\n the values of its non-Tensor Python objects. In particular, `defun` is _not_ a\n compiler for arbitrary Python code.\n\n When eager execution is enabled, the ability to create graphs from Python\n functions makes it possible to incrementally trade off debugability and\n interactivity for performance. Functions compiled with `defun` cannot be\n inspected with `pdb` and `print` statements; however, executing a graph\n generated by `defun` sometimes takes less time and memory than eagerly\n executing the corresponding Python function, since specifying computations as\n graphs allows for optimizations like automatic buffer reuse and\n parallelization among ops. Note that executing a `defun`-compiled function\n incurs a small constant overhead, so eagerly executing sufficiently small\n Python functions might take less time than executing their corresponding\n `defun`-generated graphs.\n\n For a Python function to be compatible with `defun`, all of its arguments must\n be hashable Python objects or lists thereof. The function itself may not\n modify the list/map structure of its arguments. Additionally, it must return\n zero or more `tf.Tensor` objects. If the Python function returns\n a `tf.Variable`, its compiled version will return the value of that variable\n as a `tf.Tensor`.\n\n Executing a graph generated by `defun` respects device annotations (i.e.,\n all `with tf.device` directives present in a Python function will also be\n present in its corresponding graph), but it is not yet possible to execute the\n generated graphs across multiple machines.\n\n _Example Usage_\n\n ```python\n import tensorflow as tf\n\n tf.enable_eager_execution()\n\n # A simple example.\n def f(x, y):\n return tf.reduce_mean(tf.multiply(x ** 2, 3) + y)\n\n g = tf.contrib.eager.defun(f)\n\n x = tf.constant([[2.0, 3.0]])\n y = tf.constant([[3.0, -2.0]])\n\n # `f` and `g` will return the same value, but `g` will be executed as a\n # TensorFlow graph.\n assert f(x, y).numpy() == g(x, y).numpy()\n\n # `defun` is capable of compiling Python functions that close over Python\n # objects, including Tensors and Variables.\n @tf.contrib.eager.defun\n def h():\n return f(x, y)\n\n assert (h().numpy() == f(x, y).numpy()).all()\n\n # `defun` automatically lifts variables out of the graphs it creates,\n # allowing you to compile the `call` methods of `tf.keras.layers.Layer` and\n # `tf.keras.Model` objects.\n class MyModel(tf.keras.Model):\n\n def __init__(self, keep_probability=0.2):\n super(MyModel, self).__init__()\n self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)\n self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)\n self.keep_probability = keep_probability\n\n @tf.contrib.eager.defun\n def call(self, inputs, training=True):\n x = self.dense2(self.dense1(inputs))\n if training:\n return tf.nn.dropout(x, self.keep_probability)\n else:\n return x\n\n model = MyModel()\n model(x, training=True) # executes a graph, with dropout\n model(x, training=False) # executes a graph, without dropout\n\n # `defun`-compiled functions are differentiable.\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\n with tf.GradientTape() as tape:\n outputs = model(x)\n gradient = tape.gradient(outputs, model.trainable_variables)\n optimizer.apply_gradients((grad, var) for grad, var in zip(gradient,\n model.trainable_variables))\n ```\n\n When using `defun`, there are subtleties regarding inputs, Python control\n flow, and variable creation that one should be aware of. For concreteness, let\n `f` be a Python function that returns zero or more `tf.Tensor` objects and\n let `F = defun(f)`. `F` builds a graph for each unique input signature it\n sees, Python control flow is baked into graphs, and operations related to\n variable initialization are automatically lifted out of the graphs that `F`\n generates and placed in the eager context if executing eagerly or into an\n outer graph otherwise.\n\n _Input Signatures_\n By default, `F = tf.contrib.eager.defun(f)` instantiates a separate graph\n for every unique sequence of the shapes and dtypes of Tensor arguments and\n the values of Python objects it is invoked with. For example, calling\n `F(tf.random_uniform([2])` will execute a different graph than\n `F(tf.random_uniform([3])` because the two inputs have different shapes.\n The first time that `F(*args, **kwargs)` is called with a particular sequence\n of Tensor shapes and dtypes and Python values, it constructs a graph by\n tracing the execution of `f(*args, **kwargs)`; this graph is bound to an\n input signature inferred from `(*args, **kwargs)` and cached for future reuse.\n\n `tf.contrib.eager.defun` caches graphs for your convenience, letting you\n define TensorFlow functions without explicitly specifying their signatures.\n However, this policy is conservative and potentially expensive; for example,\n when different invocations of your function have differently-shaped Tensor\n inputs, this policy might generate more graph functions than necessary. To\n eliminate such costs, `tf.contrib.eager.defun` allows you to supply an\n optional `input_signature` argument specifying the shapes and dtypes of the\n inputs. In particular, the shapes may be partially unspecified, with `None`s\n in the unknown dimensions. When an input signature is provided,\n `tf.contrib.eager.defun` will only instantiate a single graph for the\n decorated Python function. The following is an example:\n\n ```python\n import tensorflow as tf\n\n # The first `TensorSpec` below describes the shape and dtype of `words`,\n # and the second describes the shape and dtype of `another_tensor`. Note that\n # the last dimension of the `words` `TensorSpec` is left unspecified.\n @tf.contrib.eager.defun(input_signature=[\n tf.contrib.eager.TensorSpec(shape=[50, 300, None], dtype=tf.float32),\n tf.contrib.eager.TensorSpec(shape=[300, 100], dtype=tf.float32)\n ])\n def my_sequence_model(words, another_tensor):\n ...\n\n # Note how the third dimension of the first input can vary freely.\n words = tf.random_uniform(([50, 300, 10])\n second_input = tf.random_uniform([300, 100])\n my_sequence_model(words, second_input)\n\n words = tf.random_uniform(([50, 300, 20])\n my_sequence_model(words, second_input)\n\n # Passing an input with an incompatible shape will raise an error.\n words = tf.random_uniform(([50, 100, 20])\n my_sequence_model(words, second_input) # <---- This will raise an error.\n\n ```\n\n Python functions that are compiled with an `input_signature` must only accept\n Tensors as arguments and must not take unnamed keyword arguments (**kwargs).\n\n _Tracing_\n Be aware that because `F` only logs TensorFlow operations, all the other\n Python code that `f` executes will only shape the _construction_ of the graphs\n that `F` executes: the Python code won't be executed when the graphs\n themselves are executed, though it will be executed every time the Python\n function is traced (and a given Python function might be traced multiple\n times, once for each input signature it is invoked with). For example, whereas\n the Python function\n\n ```python\n import tensorflow as tf\n import numpy as np\n\n tf.enable_eager_execution()\n\n def add_noise():\n return tf.eye(5) + np.random.randn(5, 5)\n ```\n\n will return a different output everytime it is invoked, the compiled function\n `compiled = tf.contrib.eager.defun(add_noise)` will return the same value\n every time it is called, since a particular random offset generated by NumPy\n will be inserted into the graph as a TensorFlow constant. The solution is to\n replace the call to `np.random.randn` with `tf.random_normal((5, 5))`.\n\n _Python Side-Effects_\n A corollary of the previous discussion on tracing is the following: If a\n Python function `f` has Python side-effects, then executing `f` multiple times\n will not necessarily be semantically equivalent to executing `F =\n tf.contrib.eager.defun(f)` multiple times; this difference is due to the fact\n that `defun` only captures the subgraph of TensorFlow operations that is\n constructed when `f` is called in a graph-building context.\n\n _Python Control Flow_.\n The structure of many machine learning computations depend upon whether one is\n training or validating, and it is common to nest specialized logic under `if\n training:` blocks. By mapping each input signature to a unique graph, `defun`\n lets users transparently compile such code, as the following code snippet\n demonstrates:\n\n ```python\n import tensorflow as tf\n\n tf.enable_eager_execution()\n\n @tf.contrib.eager.defun\n def lossy_matmul(W, x, training=True):\n outputs = tf.matmul(W, x)\n if training:\n outputs = tf.nn.dropout(outputs, keep_probability=0.2)\n return outputs\n\n W = tf.random_normal((3, 5))\n x = tf.random_normal((5, 1))\n\n # Executes a graph that applies dropout.\n lossy_outputs = lossy_matmul(W, x, training=True)\n\n # Executes a graph that does not apply dropout.\n exact_outputs = lossy_matmul(W, x, training=False)\n ```\n\n On the other hand, because `defun` generates graphs by tracing and not by\n source code analysis, it fully unrolls Python `for` and `while` loops,\n potentially creating large graphs. If your Python function has native loops\n that run for many iterations, consider replacing them with `tf.while_loop`\n operations.\n\n When constructing graphs, `tf.Tensor` objects cannot be used as Python\n `bool` objects. This means, for example, that you should replace code in `f`\n resembling\n\n ```python\n\n if tensor < 10:\n true_fn()\n else:\n false_fn()\n ```\n\n with `tf.cond(tensor < 10, true_fn, false_fn)`.\n\n _Variables_\n TensorFlow operations related to variable creation and initialization are\n automatically lifted out of the graphs generated by `defun`. In practice, this\n implies that variable creation and initialization only happen the first time\n `F` is called, and that variables are reused every time thereafter. Many\n TensorFlow APIs, like `tf.keras.layers.Layer` objects, create variables the\n first time they are called and reuse them thereafter. Automatic variable\n lifting makes it possible to compile these APIs without extra effort, at the\n cost of introducing a discrepancy between the semantics of executing Python\n functions and their corresponding compiled functions. For example:\n\n ```python\n import tensorflow as tf\n\n tf.enable_eager_execution()\n\n def fn():\n x = tf.Variable(0.0)\n x.assign_add(1.0)\n return x.read_value()\n\n # `fn` is a Python function, so x is created, initialized, and destroyed upon\n # every invocation\n assert fn().numpy() == fn().numpy() == 1.0\n\n compiled = tf.contrib.eager.defun(fn)\n\n # Compiling `fn` with `defun` hoists all variables outside of the generated\n # graph, so initialization happens exactly once.\n assert compiled().numpy() == 1.0\n assert compiled().numpy() == 2.0\n ```\n\n Finally, because each input signature is bound to a unique graph, if your\n Python function constructs `tf.Variable` objects, then each graph constructed\n for that Python function will reference a unique set of variables. To\n circumvent this problem, we recommend against compiling Python functions that\n create `tf.Variable` objects. Instead, Python functions should either\n lexically close over `tf.Variable` objects or accept them as arguments,\n preferably encapsulated in an object-oriented container. If you must create\n variables inside your Python function and you want each graph generated for it\n to reference the same set of variables, add logic to your Python function that\n ensures that variables are only created the first time it is called and are\n reused for every subsequent invocation; note that this is precisely what\n `tf.keras.layers.Layer` objects do, so we recommend using them to represent\n variable-bearing computations whenever possible.\n\n Args:\n func: function to be compiled. If `func` is None, returns a\n decorator that can be invoked with a single argument - `func`. The\n end result is equivalent to providing all the arguments up front.\n In other words, defun(input_signature=...)(func) is equivalent to\n defun(func, input_signature=...). The former allows\n the following use case:\n @tf.contrib.eager.defun(input_signature=...)\n def foo(...):\n ...\n\n input_signature: A possibly nested sequence of\n `tf.contrib.eager.TensorSpec` objects specifying the shapes and dtypes of\n the Tensors that will be supplied to this function. If `None`, a separate\n function is instantiated for each inferred input signature. If a\n signature is specified, every input to `func` must be a `Tensor`, and\n `func` cannot accept `**kwargs`.\n\n Returns:\n If `func` is not None, returns a callable that will execute the compiled\n function (and return zero or more `tf.Tensor` objects).\n If `func` is None, returns a decorator that, when invoked with a single\n `func` argument, returns a callable equivalent to the case above.\n\n Raises:\n TypeError: If `input_signature` is neither `None` nor a sequence of\n `tf.contrib.eager.TensorSpec` objects.\n \"\"\"\n\n if input_signature is not None:\n _validate_signature(input_signature)\n\n # TODO(apassos): deal with captured global state. Deal with control flow.\n def decorated(function):\n try:\n name = function.__name__\n except AttributeError:\n name = \"function\"\n return tf_decorator.make_decorator(\n function,\n PolymorphicFunction(function, name, input_signature=input_signature))\n\n # This code path is for the `foo = tfe.defun(foo, ...)` use case\n if func is not None:\n return decorated(func)\n\n # This code path is for the\n #\n # @tfe.defun(...)\n # def foo(...):\n # ...\n #\n # use case, which is equivalent to `foo = tfe.defun(...)(foo)`\n return decorated\n\n\nclass AutomaticControlDependencies(object):\n \"\"\"Context manager to automatically add control dependencies.\n\n Code under this context manager will act as if a sensible set of control\n dependencies were present. More specifically:\n 1. All stateful ops in the scope will execute\n 2. Stateful ops which modify the same resource will execute in program order\n\n Note: creating variables in an automatic control dependencies context is not\n supported (the value of the variables will never change as they will keep\n getting reinitialized).\n\n NOT THREAD SAFE\n \"\"\"\n\n def __init__(self):\n self._returned_tensors = set()\n\n def mark_as_return(self, tensor):\n \"\"\"Acts like identity but marks the `Tensor` as a return value.\n\n This will possibly return a copy of the `Tensor`. Usage:\n\n ```\n with AutomaticControlDependencies() as a:\n ...\n t = a.mark_as_return(t)\n _ = ...(t...) # i.e. it's safe to use t here\n ```\n\n Args:\n tensor: the `Tensor` to be marked\n\n Returns:\n a copy of the `Tensor`.\n \"\"\"\n if isinstance(tensor, ops.IndexedSlices):\n values = array_ops.identity(tensor.values)\n indices = array_ops.identity(tensor.indices)\n self._returned_tensors.add(indices)\n self._returned_tensors.add(values)\n return ops.IndexedSlices(values, indices, dense_shape=tensor.dense_shape)\n # We want to make the return values depend on the stateful operations, but\n # we don't want to introduce a cycle, so we make the return value the result\n # of a new identity operation that the stateful operations definitely don't\n # depend on.\n tensor = array_ops.identity(tensor)\n self._returned_tensors.add(tensor)\n return tensor\n\n def __enter__(self):\n if context.executing_eagerly():\n return self\n # This code assumes no other thread is adding ops to the graph while\n # we're adding ops to the graph.\n # TODO(apassos): Fix this by locking the graph or using a temporary\n # graph (but that would mess up devices and collections at least,\n # probably other things as well).\n self._graph = ops.get_default_graph()\n self._n_operations = len(self._graph.get_operations())\n return self\n\n def _process_switch(self, switch_op, ops_which_must_run,\n last_op_using_resource_tensor, merge_for_resource):\n \"\"\"Processes a switch node for a resource input.\n\n When tensorflow creates a cond, it creates a control flow context for each\n branch of the cond. Each external tensor accessed by that branch is routed\n through a switch op, which gets created in the graph _after_ the op which\n uses that tensor get created.\n\n If the resource comes from another switch op we process that one first.\n\n _process_switch creates a corresponding merge node for the switch node. This\n merge node is added to the outer control flow context of the switch\n node. We also ensure that:\n\n 1. The switch node executes after the previous op which used the resource\n tensor\n\n 2. Any op which uses a resource output of the switch node executes before\n the merge for the switch node.\n\n 3. The next op which uses the input resource to the switch node (which\n might be another switch node for the other branch of the conditional)\n will execute after the merge node is done.\n\n 4. The merge node is marked as must_run so it will run even if no\n subsequent operation uses the resource.\n\n Args:\n switch_op: the switch op to be processed\n ops_which_must_run: the set of ops which must run\n last_op_using_resource_tensor: map from resource tensor to last op using\n it\n merge_for_resource: map from resource tensor to merge which must follow\n all usages of it.\n \"\"\"\n inp = switch_op.inputs[0]\n if inp.dtype == dtypes_module.resource and inp.op.type == \"Switch\":\n self._process_switch(inp.op, ops_which_must_run,\n last_op_using_resource_tensor, merge_for_resource)\n if switch_op.outputs[0] in merge_for_resource:\n return\n new_merge = control_flow_ops.merge(switch_op.outputs,\n name=\"artificial_merge\")\n new_merge[0].op._control_flow_context = ( # pylint: disable=protected-access\n switch_op._control_flow_context.outer_context) # pylint: disable=protected-access\n # Ensures the merge always runs\n ops_which_must_run.add(new_merge[0].op)\n if inp in last_op_using_resource_tensor:\n # Ensures the switch executes after the previous op using the resource.\n switch_op._add_control_input(last_op_using_resource_tensor[inp]) # pylint: disable=protected-access\n # Ensure the next op outside the cond happens after the merge.\n last_op_using_resource_tensor[inp] = new_merge[0].op\n if inp in merge_for_resource:\n merge_for_resource[inp]._add_control_input(new_merge[0].op) # pylint: disable=protected-access\n for o in switch_op.outputs:\n # Ensures the merge will execute after all ops inside the cond\n merge_for_resource[o] = new_merge[0].op\n\n def __exit__(self, unused_type, unused_value, unused_traceback):\n if context.executing_eagerly():\n return\n\n if self._graph is not ops.get_default_graph():\n raise RuntimeError(\n \"Graph changed while trying to add control dependencies.\")\n\n # map from resource tensor to the last op which used it\n last_op_using_resource_tensor = {}\n # set of conditional and loop exits\n ops_which_must_run = set()\n # merge which must depend on ops which use this resource\n merge_for_resource = {}\n\n new_operations = self._graph.get_operations()[self._n_operations:]\n\n # Ensures that uses of resource tensors get serialized properly and all\n # execute. This is done by keeping a map from resource tensor to the last op\n # in graph-construction order which used it (last_op_using_resource_tensor).\n #\n # Conditionals are written in TensorFlow such that every external tensor\n # accessed in the conditional goes through a switch op and every return\n # tensor (it's guaranteed that there will be at least one) goes through a\n # merge op.\n #\n # To handle conditionals, switches are handled in a special way (see\n # comments for _process_switch). Merge nodes created by TF's conditional\n # logic (as opposed to by _process_switch) are forced to run and also get a\n # control dependency added to them to ensure all stateful ops inside their\n # control flow context run.\n #\n # We also ensure that if an op is using a resource output by a switch node\n # (that is, a resource tensor for which there's a value in\n # merge_for_resource) this op will run before the merge for that resource.\n #\n # We try to add control inputs to nodes respecting their control flow\n # contexts to avoid dead nodes propagating everywhere and leading to\n # \"retval[0] doesn't have value\" errors. If a node gets a control dependency\n # on a dead node (i.e. a note from an untaken control flow branch) that node\n # will be marked as dead unless it's a merge node.\n #\n # TODO(apassos): serialize non-resource-taking stateful ops as well, and\n # test that it works. Support while loops. Support init_scope escaping from\n # this.\n for op in new_operations:\n # TODO(apassos) make this code safely support while loops.\n if isinstance(op._control_flow_context, control_flow_ops.WhileContext): # pylint: disable=protected-access\n continue\n control_inputs = set()\n # Ensure stateful ops run\n if (op.type not in self._graph._registered_ops # pylint: disable=protected-access\n or self._graph._registered_ops[op.type].is_stateful): # pylint: disable=protected-access\n ops_which_must_run.add(op)\n # Ignore switches (they're handled separately)\n if op.type == \"Switch\" and op.inputs[0].dtype == dtypes_module.resource:\n continue\n # Make merges trigger all other computation which must run\n if op.type == \"Merge\":\n for o in ops_which_must_run:\n op._add_control_input(o) # pylint: disable=protected-access\n for inp in o.inputs:\n if inp in last_op_using_resource_tensor:\n last_op_using_resource_tensor[inp] = op\n ops_which_must_run = set([op])\n continue\n for inp in op.inputs:\n if inp.dtype == dtypes_module.resource:\n # Deal with switches, finally.\n if inp.op.type == \"Switch\":\n self._process_switch(inp.op, ops_which_must_run,\n last_op_using_resource_tensor,\n merge_for_resource)\n # Ensure uses of resources are serialized\n if inp in last_op_using_resource_tensor:\n if (last_op_using_resource_tensor[inp]._control_flow_context # pylint: disable=protected-access\n is op._control_flow_context): # pylint: disable=protected-access\n control_inputs.add(last_op_using_resource_tensor[inp])\n # Ensure merges happen after the closing of a cond block\n if inp in merge_for_resource:\n merge_for_resource[inp]._add_control_input(op) # pylint: disable=protected-access\n last_op_using_resource_tensor[inp] = op\n control_inputs = [c for c in control_inputs\n if c._control_flow_context is op._control_flow_context] # pylint: disable=protected-access\n op._add_control_inputs(control_inputs) # pylint: disable=protected-access\n\n # Ensure all ops which must run do run\n for r in self._returned_tensors:\n if ops_which_must_run:\n r.op._add_control_inputs( # pylint: disable=protected-access\n [o for o in ops_which_must_run\n if o._control_flow_context is r.op._control_flow_context]) # pylint: disable=protected-access\n\n\ndef automatic_control_dependencies(f):\n \"\"\"Wraps f to automatically insert control dependencies.\n\n The inserted dependencies ensure that:\n 1. All stateful ops in f run when the result of f runs\n 2. Updates to the same resources happen in order.\n\n Args:\n f: the function to be wrapped.\n\n Returns:\n The wrapped function.\n \"\"\"\n\n def wrapper(*args, **kwds):\n with AutomaticControlDependencies() as a:\n result = f(*args, **kwds)\n result_flat = [a.mark_as_return(t) for t in nest.flatten(result)]\n return nest.pack_sequence_as(result, result_flat)\n\n return tf_decorator.make_decorator(f, wrapper)\n" ]
[ [ "tensorflow.python.ops.array_ops.identity", "tensorflow.python.eager.context.context", "tensorflow.python.ops.functional_ops.partitioned_call", "tensorflow.python.util.nest.flatten", "tensorflow.python.pywrap_tensorflow.TF_FunctionToFunctionDef", "tensorflow.python.pywrap_tensorflow.TF_GetBuffer", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.core.framework.function_pb2.FunctionDef", "tensorflow.python.eager.context.global_seed", "tensorflow.python.util.tf_inspect.ismethod", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.eager.tape.pop_tape", "tensorflow.python.framework.ops.IndexedSlices", "tensorflow.python.framework.c_api_util.ScopedTFFunction", "tensorflow.python.framework.ops.internal_convert_to_tensor", "tensorflow.python.framework.c_api_util.tf_buffer", "tensorflow.python.training.distribution_strategy_context.get_distribution_strategy", "tensorflow.python.framework.device.merge_device", "tensorflow.python.ops.variable_scope.get_variable_scope", "tensorflow.python.util.nest.assert_same_structure", "tensorflow.python.util.tf_decorator.make_decorator", "tensorflow.python.framework.tensor_spec.TensorSpec.from_tensor", "tensorflow.python.util.compat.as_str", "tensorflow.python.util.nest.pack_sequence_as", "tensorflow.python.framework.dtypes.DType", "tensorflow.python.ops.resource_variable_ops.get_resource_handle_data", "tensorflow.python.eager.tape.variable_accessed", "tensorflow.python.ops.gradients_impl._GradientsHelper", "tensorflow.python.eager.tape.record_operation", "tensorflow.python.eager.tape.push_new_tape", "tensorflow.python.util.nest.map_structure", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.ops.convert_to_tensor_or_indexed_slices", "tensorflow.python.util.tf_inspect.getfullargspec", "tensorflow.python.eager.graph_only_ops.graph_placeholder", "tensorflow.python.framework.ops.uid", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.eager.tape.should_record", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.ops.control_flow_ops.merge" ] ]
Knowledge-Precipitation-Tribe/Decision-tree-and-Random-forest
[ "1b7331af6e91ba4f377beba74ea88e53e7e95016" ]
[ "code/decisionTreeRegressor/decisionTreeRegressor.py" ]
[ "# -*- coding: utf-8 -*-#\n'''\n# Name: decisionTreeRegressor\n# Description: \n# Author: super\n# Date: 2020/3/17\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.tree import DecisionTreeRegressor\n\ndef load_data():\n N = 100\n x = np.random.rand(N) * 6 - 3 # [-3,3)\n x.sort()\n y = np.sin(x) + np.random.randn(N) * 0.05\n x = x.reshape(-1, 1)\n return x, y\n\nif __name__ == \"__main__\":\n x, y = load_data()\n x_test = np.linspace(-3, 3, 50).reshape(-1, 1)\n depth = [2,4,6,8,10]\n clr = 'rgbmy'\n\n plt.plot(x, y, 'k^', linewidth=2, label='Actual')\n for i, d in enumerate(depth):\n reg = DecisionTreeRegressor(criterion='mse', max_depth=d)\n reg.fit(x,y)\n y_hat = reg.predict(x_test)\n plt.plot(x_test, y_hat, '-', color=clr[i], linewidth=2, label='Depth=%d' % d)\n plt.legend()\n plt.savefig('DecisionTreeRegressor.png')\n plt.show()" ]
[ [ "numpy.sin", "numpy.random.rand", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "numpy.random.randn", "sklearn.tree.DecisionTreeRegressor", "matplotlib.pyplot.show", "numpy.linspace" ] ]
shiminhu/jittor
[ "8aa5974fef8106d3ddea8209fb44b33cec120a09" ]
[ "python/jittor/dataset/dataset.py" ]
[ "# ***************************************************************\n# Copyright (c) 2020 Jittor. Authors: \n# Meng-Hao Guo <[email protected]>\n# Dun Liang <[email protected]>. \n# All Rights Reserved.\n# This file is subject to the terms and conditions defined in\n# file 'LICENSE.txt', which is part of this source code package.\n# ***************************************************************\nimport numpy as np\nfrom urllib import request\nimport gzip\nimport pickle\nimport os\nfrom jittor.dataset.utils import get_random_list, get_order_list, collate_batch\nfrom collections.abc import Sequence, Mapping\nimport pathlib\nfrom PIL import Image\nfrom jittor_utils.ring_buffer import RingBuffer\nimport multiprocessing as mp\nimport signal\nfrom jittor_utils import LOG\nimport jittor as jt\n\ndataset_root = os.path.join(pathlib.Path.home(), \".cache\", \"jittor\", \"dataset\")\nmp_log_v = os.environ.get(\"mp_log_v\", 0) \nmpi = jt.mpi\n\nclass Worker:\n def __init__(self, target, args, buffer_size):\n buffer = mp.Array('c', buffer_size, lock=False)\n self.buffer = RingBuffer(buffer)\n self.p = mp.Process(target=target, args=args+(self.buffer,))\n self.p.daemon = True\n self.p.start()\n\nclass Dataset(object):\n '''\n base class for reading data\n \n Example::\n\n class YourDataset(Dataset):\n def __init__(self):\n super().__init__()\n self.set_attrs(total_len=1024)\n\n def __getitem__(self, k):\n return k, k*k\n\n dataset = YourDataset().set_attrs(batch_size=256, shuffle=True)\n for x, y in dataset:\n ......\n '''\n def __init__(self,\n batch_size = 16,\n shuffle = False,\n drop_last = False,\n num_workers = 0,\n buffer_size = 512*1024*1024):\n super().__init__()\n self.total_len = None\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.drop_last = drop_last\n self.num_workers = num_workers\n self.buffer_size = buffer_size\n\n def __getitem__(self, index):\n raise NotImplementedError\n\n def __len__(self):\n assert self.total_len >= 0\n assert self.batch_size > 0\n if self.drop_last:\n return self.total_len // self.batch_size\n return (self.total_len-1) // self.batch_size + 1\n\n def set_attrs(self, **kw):\n '''set attributes of dataset, equivalent to set_attr\n \n Attrs:\n\n * batch_size(int): batch size, default 16.\n * totol_len(int): totol lenght.\n * shuffle(bool): shuffle at each epoch, default False.\n * drop_last(bool): if true, the last batch of dataset might smaller than batch_size, default True.\n * num_workers: number of workers for loading data\n * buffer_size: buffer size for each worker in bytes, default(512MB).\n '''\n for k,v in kw.items():\n assert hasattr(self, k), k\n setattr(self, k, v)\n return self\n\n def to_jittor(self, batch):\n if isinstance(batch, np.ndarray):\n return jt.array(batch)\n assert isinstance(batch, Sequence)\n new_batch = []\n for a in batch:\n if isinstance(a, np.ndarray) or \\\n isinstance(a, int) or \\\n isinstance(a, float):\n new_batch.append(jt.array(a))\n else:\n new_batch.append(a)\n return new_batch\n\n def collate_batch(self, batch):\n return collate_batch(batch)\n\n def terminate(self):\n if hasattr(self, \"workers\"):\n for w in self.workers:\n w.p.terminate()\n \n def _worker_main(self, worker_id, buffer):\n try:\n gid_obj = self.gid.get_obj()\n gid_lock = self.gid.get_lock()\n while True:\n with gid_lock:\n while gid_obj.value >= self.batch_len:\n self.num_idle.value += 1\n self.num_idle_c.notify()\n self.gidc.wait()\n self.num_idle.value -= 1\n cid = gid_obj.value\n self.idmap[cid] = worker_id\n gid_obj.value += 1\n self.gidc.notify()\n batch = []\n if mp_log_v:\n print(f\"#{worker_id} {os.getpid()} load batch\", cid*self.real_batch_size, min(self.real_len, (cid+1)*self.real_batch_size))\n for i in range(cid*self.real_batch_size, min(self.real_len, (cid+1)*self.real_batch_size)):\n batch.append(self[self.index_list[i]])\n batch = self.collate_batch(batch)\n if mp_log_v:\n print(f\"#{worker_id} {os.getpid()} send\", type(batch).__name__, [ type(b).__name__ for b in batch ], buffer)\n buffer.send(batch)\n except:\n os.kill(os.getppid(), signal.SIGINT)\n raise\n\n def _stop_all_workers(self):\n # wait until all workers idle\n if self.num_idle.value < self.num_workers:\n with self.gid.get_lock():\n self.gid.get_obj().value = self.batch_len\n if mp_log_v:\n print(\"idle num\", self.num_idle.value)\n while self.num_idle.value < self.num_workers:\n self.num_idle_c.wait()\n if mp_log_v:\n print(\"idle num\", self.num_idle.value)\n # clean workers' buffer\n for w in self.workers:\n w.buffer.clear()\n \n def _init_workers(self):\n self.index_list = mp.Array('i', self.real_len, lock=False)\n workers = []\n # batch id to worker id\n self.idmap = mp.Array('i', self.batch_len, lock=False)\n # global token index\n self.gid = mp.Value('i', self.batch_len)\n # global token index condition\n self.gidc = mp.Condition(self.gid.get_lock())\n # number of idle workers\n self.num_idle = mp.Value('i', 0, lock=False)\n # number of idle workers condition\n self.num_idle_c = mp.Condition(self.gid.get_lock())\n for i in range(self.num_workers):\n w = Worker(target=self._worker_main, args=(i,), \n buffer_size=self.buffer_size)\n workers.append(w)\n self.workers = workers\n self.index_list_numpy = np.ndarray(dtype='int32', shape=self.real_len, buffer=self.index_list)\n\n def __del__(self):\n if mp_log_v:\n print(\"dataset deleted\")\n self.terminate()\n\n def __iter__(self):\n if self.shuffle == False:\n index_list = get_order_list(self.total_len)\n else:\n index_list = get_random_list(self.total_len)\n \n # scatter index_list for all mpi process\n # scatter rule:\n # batch 1 batch 2\n # [........] [........] ...\n # 00011122 00011122\n # if last batch is smaller than world_size\n # pad to world_size\n # last batch\n # [.] -> [012]\n if mpi:\n world_size = mpi.world_size()\n world_rank = mpi.world_rank()\n index_list = np.int32(index_list)\n mpi.broadcast(index_list, 0)\n\n assert self.batch_size >= world_size, \\\n f\"Batch size({self.batch_size}) is smaller than MPI world_size({world_size})\"\n real_batch_size = (self.batch_size-1) // world_size + 1\n if real_batch_size * world_size != self.batch_size:\n LOG.w(\"Batch size is not divisible by MPI world size, \"\n \"The distributed version may be different from \"\n \"the single-process version.\")\n fix_batch = self.total_len // self.batch_size\n last_batch = self.total_len - fix_batch * self.batch_size\n fix_batch_l = index_list[0:fix_batch*self.batch_size] \\\n .reshape(-1,self.batch_size)\n fix_batch_l = fix_batch_l[\n :,real_batch_size*world_rank:real_batch_size*(world_rank+1)]\n real_batch_size = fix_batch_l.shape[1]\n fix_batch_l = fix_batch_l.flatten()\n if not self.drop_last and last_batch > 0:\n last_batch_l = index_list[-last_batch:]\n real_last_batch = (last_batch-1)//world_size+1\n l = real_last_batch * world_rank\n r = l + real_last_batch\n if r > last_batch: r = last_batch\n if l >= r: l = r-1\n index_list = np.concatenate([fix_batch_l, last_batch_l[l:r]])\n else:\n index_list = fix_batch_l\n\n self.real_len = len(index_list)\n self.real_batch_size = real_batch_size\n assert self.total_len // self.batch_size == \\\n self.real_len // self.real_batch_size\n else:\n self.real_len = self.total_len\n self.real_batch_size = self.batch_size\n \n self.batch_len = len(self)\n \n if not hasattr(self, \"workers\") and self.num_workers:\n self._init_workers()\n \n if self.num_workers:\n self._stop_all_workers()\n self.index_list_numpy[:] = index_list\n gid_obj = self.gid.get_obj()\n gid_lock = self.gid.get_lock()\n with gid_lock:\n gid_obj.value = 0\n self.gidc.notify_all()\n for i in range(self.batch_len):\n # try not get lock first\n if gid_obj.value <= i:\n with gid_lock:\n if gid_obj.value <= i:\n if mp_log_v:\n print(\"wait\")\n self.gidc.wait()\n worker_id = self.idmap[i]\n w = self.workers[worker_id]\n if mp_log_v:\n print(f\"#{worker_id} {os.getpid()} recv buffer\", w.buffer)\n batch = w.buffer.recv()\n if mp_log_v:\n print(f\"#{worker_id} {os.getpid()} recv\", type(batch).__name__, [ type(b).__name__ for b in batch ])\n batch = self.to_jittor(batch)\n yield batch\n else:\n batch_data = []\n for idx in index_list:\n batch_data.append(self[int(idx)])\n if len(batch_data) == self.real_batch_size:\n batch_data = self.collate_batch(batch_data)\n batch_data = self.to_jittor(batch_data)\n yield batch_data\n batch_data = []\n\n # depend on drop_last\n if not self.drop_last and len(batch_data) > 0:\n batch_data = self.collate_batch(batch_data)\n batch_data = self.to_jittor(batch_data)\n yield batch_data\n\n\nclass ImageFolder(Dataset):\n \"\"\"A image classify dataset, load image and label from directory:\n \n * root/label1/img1.png\n * root/label1/img2.png\n * ...\n * root/label2/img1.png\n * root/label2/img2.png\n * ...\n\n Args:\n\n * root(string): Root directory path.\n\n Attributes:\n\n * classes(list): List of the class names.\n * class_to_idx(dict): map from class_name to class_index.\n * imgs(list): List of (image_path, class_index) tuples\n \"\"\"\n def __init__(self, root, transform=None):\n # import ipdb; ipdb.set_trace()\n super().__init__()\n self.root = root\n self.transform = transform\n self.classes = sorted([d.name for d in os.scandir(root) if d.is_dir()])\n self.class_to_idx = {v:k for k,v in enumerate(self.classes)}\n self.imgs = []\n image_exts = set(('.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff'))\n \n for i, class_name in enumerate(self.classes):\n class_dir = os.path.join(root, class_name)\n for dname, _, fnames in sorted(os.walk(class_dir, followlinks=True)):\n for fname in sorted(fnames):\n if os.path.splitext(fname)[-1].lower() in image_exts:\n path = os.path.join(class_dir, fname)\n self.imgs.append((path, i))\n LOG.i(f\"Found {len(self.classes)} classes and {len(self.imgs)} images.\")\n self.set_attrs(total_len=len(self.imgs))\n \n def __getitem__(self, k):\n with open(self.imgs[k][0], 'rb') as f:\n img = Image.open(f).convert('RGB')\n if self.transform:\n img = self.transform(img)\n return img, self.imgs[k][1]\n" ]
[ [ "numpy.concatenate", "numpy.ndarray", "numpy.int32" ] ]
Klettgau/CSC-332
[ "cf0563d1230cac124ed2146ab2e211a15f216c23" ]
[ "ciphers/Hill.py" ]
[ "import string\nimport numpy\nfrom flask import jsonify\nfrom flask_restful import Resource\n\n\ndef generate_key( dimension, test_flag):\n counter = 0\n if test_flag == 1:\n return numpy.array([[23, 2, 17, 17],\n [15, 25, 21, 18],\n [21, 19, 9, 13],\n [19, 6, 24, 21]]), numpy.array([[10, 13, 19, 21],\n [21, 14, 5, 10],\n [19, 8, 16, 5],\n [14, 11, 25, 22]])\n while True:\n counter += 1\n key = numpy.random.randint(0, 26, (dimension, dimension))\n inv_key, signal = modinv(key)\n if signal:\n break\n return key, inv_key\n\n\ndef modinv( key):\n det = int(round(numpy.linalg.det(key) % 26))\n dinv = numpy.round((numpy.linalg.det(key) * numpy.linalg.inv(key)))\n number = numpy.arange(1, 27)\n rez = numpy.mod(det * number, 26)\n zz = numpy.where(rez == 1)\n if numpy.size(zz) == 0:\n return (-1, False)\n zz = zz[0].item(0) + 1 # one off\n key_matrix = numpy.mod(zz * dinv, 26).astype(int) # the adjuate * d^-1\n # key inverse K-1\n return (key_matrix, True)\n\n\ndef pad_message( msg, key_row):\n msg_size = len(msg)\n while msg_size % key_row != 0:\n for i, ele in enumerate(msg):\n msg.append(ele)\n if len(msg) % key_row == 0:\n break\n if len(msg) % key_row == 0:\n break\n return msg\n\n\ndef resize_array( msg, dimension):\n msg_array = numpy.array(msg)\n msg_len = msg_array.shape[0]\n msg_array.resize((int(msg_len / dimension), dimension))\n return msg_array\n\n\ndef encode( msg, dimension, test_flag=0):\n lower_message = [string.ascii_lowercase.index(i) for i in msg]\n padded_message = pad_message(lower_message, dimension)\n key, inv_key = generate_key(dimension, test_flag)\n msg_array = resize_array(padded_message, dimension)\n print(msg_array)\n return numpy.mod(numpy.dot(msg_array, key), 26), inv_key\n\n\ndef decode( encoded_matrix, key):\n return{'decoded_message': numpy.mod(numpy.dot(encoded_matrix, key), 26),\n 'encode_message': encoded_matrix,\n 'key': key}\n\n\nmsg, inv_key = encode(\"retreat\", 4)\nprint(msg)\nprint(decode(msg, inv_key))\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.linalg.det", "numpy.where", "numpy.random.randint", "numpy.arange", "numpy.size", "numpy.linalg.inv", "numpy.mod" ] ]
jackw01/serial-grapher
[ "24d873e2274b8da14c2f0d688ab8a6d3599dc37d" ]
[ "serialgrapher/__init__.py" ]
[ "__version__ = \"0.1.0\"\n\nimport argparse\nimport time\nimport collections\nimport threading\nimport csv\n\nimport serial\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n#serialgrapher -p COM3 --y-max 100\n#serialgrapher -p COM3 -l 5000 --y-max 100 --rate-limit 0.1\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Utility for graphing CSV data received over a serial port'\n )\n parser.add_argument('-p', type=str, metavar='PORT',\n help='Serial port')\n parser.add_argument('-b', type=int, default=115200, metavar='BAUD_RATE',\n help='Baud rate')\n parser.add_argument('-l', type=int, default=1000, metavar='LENGTH',\n help='Number of data points to show on graph')\n parser.add_argument('--dont-save', action='store_true',\n help='Don\\'t save the data to a CSV file')\n parser.add_argument('--rate-limit', type=float, default=2, metavar='LIMIT',\n help='Maximum sample rate in samples/second')\n parser.add_argument('--auto-scale-y', action='store_true',\n help='Automatically scale the y axis')\n parser.add_argument('--y-min', type=float, default=0.0, metavar='MIN',\n help='Minimum y value')\n parser.add_argument('--y-max', type=float, default=1.0, metavar='MAX',\n help='Maximum y value')\n args = parser.parse_args()\n\n ser = serial.Serial(args.p, args.b, timeout=10)\n\n if not args.dont_save:\n csv_file = open(f'{time.strftime(\"%Y%m%dT%H%M%S\")}.csv', 'w', newline='')\n writer = csv.writer(csv_file)\n\n def get_line():\n return ser.readline().decode('ascii').strip()\n\n line = ''\n while line == '':\n line = get_line()\n headers = line.split(',')\n if not args.dont_save:\n writer.writerow(['Time'] + headers)\n\n DataPoint = collections.namedtuple('DataPoint', 'time values')\n\n data = collections.deque([DataPoint(0, [0] * len(headers))] * args.l, args.l)\n fig, axes = plt.subplots(len(headers))\n\n plots = []\n value_texts = []\n for i, header in enumerate(headers):\n ax = axes if len(headers) == 1 else axes[i]\n ax.set_xlim([0, args.l])\n ax.set_ylim([args.y_min, args.y_max])\n ax.set_xlabel('Time (s)')\n ax.set_ylabel(header)\n plots.append(ax.plot([], [], label=header)[0])\n value_texts.append(ax.text(0.02, 0.9, '', transform=ax.transAxes))\n\n run_serial_loop = True\n\n event = threading.Event()\n read_interval = 1.0 / args.rate_limit\n\n def read_serial():\n while run_serial_loop:\n values = [float(v) for v in get_line().split(',')]\n t = time.perf_counter()\n if t - data[-1].time > read_interval:\n data.append(DataPoint(t, values))\n if not args.dont_save:\n writer.writerow([t] + values)\n #event.wait(max(0, read_interval - (time.perf_counter() - t)))\n\n thread = threading.Thread(target=read_serial, daemon=True)\n thread.start()\n\n y_limit_margin = 0.05\n\n def animate(frame):\n for i, plot in enumerate(plots):\n ax = axes if len(headers) == 1 else axes[i]\n times = [dp.time for dp in data]\n time_max = max(times)\n prev_xlim = ax.get_xlim()\n ax.set_xlim([prev_xlim[0] + (time_max - prev_xlim[1]),\n max(time_max, min(times) + 0.001)])\n\n series = [dp.values[i] for dp in data]\n if args.auto_scale_y:\n series_min, series_max = min(series), max(series)\n margin = y_limit_margin * (series_max - series_min)\n ax.set_ylim([series_min - margin, series_max + margin])\n plot.set_data(times, series)\n value_texts[i].set_text(f'{headers[i]}: {series[-1]}')\n\n animation_interval = max(read_interval * 1000, 200)\n anim = animation.FuncAnimation(fig, animate, fargs=(), interval=animation_interval)\n\n plt.show()\n\n run_serial_loop = False\n event.set()\n thread.join()\n ser.close()\n if not args.dont_save:\n csv_file.close()\n\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.animation.FuncAnimation" ] ]
ivandebono/nnest
[ "490b0797312c22a1019f5f400db684b1be5e8fe5" ]
[ "examples/mcmc/gauss.py" ]
[ "import os\nimport sys\nimport argparse\n\nimport numpy as np\nfrom scipy.stats import multivariate_normal\n\nsys.path.append(os.getcwd())\n\n\ndef main(args):\n\n from nnest import MCMCSampler\n\n def loglike(x):\n return multivariate_normal.logpdf(x, mean=np.zeros(args.x_dim), cov=np.eye(args.x_dim) + args.corr * (1 - np.eye(args.x_dim)))\n\n def transform(x):\n return 3. * x\n\n sampler = MCMCSampler(args.x_dim, loglike, transform=transform, log_dir=args.log_dir, hidden_dim=args.hidden_dim,\n num_layers=args.num_layers, num_blocks=args.num_blocks, num_slow=args.num_slow,\n use_gpu=args.use_gpu)\n sampler.run(train_iters=args.train_iters, mcmc_steps=args.mcmc_steps, single_thin=10)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--x_dim', type=int, default=2,\n help=\"Dimensionality\")\n parser.add_argument('--train_iters', type=int, default=2000,\n help=\"number of train iters\")\n parser.add_argument(\"--mcmc_steps\", type=int, default=10000)\n parser.add_argument('--hidden_dim', type=int, default=128)\n parser.add_argument('--num_layers', type=int, default=1)\n parser.add_argument('--batch_size', type=int, default=100)\n parser.add_argument('-use_gpu', action='store_true')\n parser.add_argument('--flow', type=str, default='nvp')\n parser.add_argument('--num_blocks', type=int, default=5)\n parser.add_argument('--noise', type=float, default=-1)\n parser.add_argument('--run_num', type=str, default='')\n parser.add_argument('--num_slow', type=int, default=0)\n parser.add_argument('--corr', type=float, default=0.99)\n parser.add_argument('--log_dir', type=str, default='logs/gauss_mcmc')\n\n args = parser.parse_args()\n main(args)\n" ]
[ [ "numpy.zeros", "numpy.eye" ] ]
vincnt/tcn-audio-fx
[ "c3ca38a7975ca99f7aebebf310a016e1cbcfdf0c" ]
[ "model_utilities.py" ]
[ "import torch\nimport torch.nn as nn\n\nclass GatedActivation(nn.Module):\n def __init__(self, num_channels):\n super().__init__()\n self.num_channels = num_channels\n def forward(self, input):\n out_hidden_split = torch.split(input, self.num_channels, dim=1)\n out = torch.tanh(out_hidden_split[0]) * torch.sigmoid(out_hidden_split[1])\n return out\n\nclass FlowThroughActivation(nn.Module):\n def __init__(self):\n super().__init__()\n def forward(self, input):\n return input\n\nclass CondGatedActivation(nn.Module):\n def __init__(self, num_channels, num_cond):\n super().__init__()\n self.num_channels = num_channels//2\n self.conv = nn.Conv1d(in_channels=num_cond, out_channels=num_channels*2, kernel_size=1, padding=0, groups=1, bias=False)\n\n def forward(self, input, cond):\n out_hidden_split = torch.split(input, self.num_channels, dim=1)\n\n cond = cond.permute(0,2,1)\n cond = cond.expand(-1, -1, input.shape[-1])\n cond_out = self.conv(cond)\n\n cond_out_split = torch.split(cond_out, self.num_channels, dim=1)\n\n out = torch.tanh(out_hidden_split[0] + cond_out_split[0]) * torch.sigmoid(out_hidden_split[1] + cond_out_split[1])\n return out\n\nclass DeepCondGatedActivation(nn.Module):\n def __init__(self, num_channels, num_cond):\n super().__init__()\n self.num_channels = num_channels\n self.conv = nn.Sequential(\n nn.Conv1d(in_channels=num_cond, out_channels=num_channels*2, kernel_size=1, padding=0, groups=1, bias=False),\n torch.nn.ReLU(),\n nn.Conv1d(in_channels=num_channels*2, out_channels=num_channels, kernel_size=1, padding=0, groups=1, bias=False),\n torch.nn.ReLU(),\n nn.Conv1d(in_channels=num_channels, out_channels=num_channels*2, kernel_size=1, padding=0, groups=1, bias=False),\n )\n\n def forward(self, input, cond):\n out_hidden_split = torch.split(input, self.num_channels, dim=1)\n\n cond = cond.permute(0,2,1)\n cond = cond.expand(-1, -1, input.shape[-1])\n cond_out = self.conv(cond)\n cond_out_split = torch.split(cond_out, self.num_channels, dim=1)\n\n out = torch.tanh(out_hidden_split[0] + cond_out_split[0]) * torch.sigmoid(out_hidden_split[1] + cond_out_split[1])\n return out\n\nclass ApplyFiLM(torch.nn.Module):\n def __init__(self):\n super(ApplyFiLM, self).__init__()\n\n def forward(self, x, cond):\n g, b = torch.chunk(cond, 2, dim=-1) # split into g and b for linear function\n g = g.permute(0,2,1) # rearranges the dimensions\n b = b.permute(0,2,1)\n x = (x * g) + b\n return x\n\nclass BasicFiLM(torch.nn.Module):\n def __init__(self, embed_dim, num_cond_params):\n super(BasicFiLM, self).__init__()\n self.num_cond_params = num_cond_params\n self.embed_dim = embed_dim # number of latent features to project to\n self.net = torch.nn.Linear(num_cond_params, embed_dim * 2)\n\n def forward(self, x, cond):\n assert cond.shape[-1] == self.num_cond_params # for weird cuda broadcasting error\n cond = self.net(cond)\n g, b = torch.chunk(cond, 2, dim=-1) # split into g and b for linear function\n g = g.permute(0,2,1) # rearranges the dimensions\n b = b.permute(0,2,1)\n x = (x * g) + b\n return x\n\nclass ShallowFilm(BasicFiLM):\n def __init__(self, embed_dim, num_cond_params):\n super(ShallowFilm, self).__init__(embed_dim, num_cond_params)\n self.net = torch.nn.Sequential(\n torch.nn.Linear(num_cond_params, embed_dim),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim, embed_dim),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim, embed_dim*2),\n )\n\nclass DeepFiLM(BasicFiLM):\n def __init__(self, embed_dim, num_cond_params):\n super(DeepFiLM, self).__init__(embed_dim, num_cond_params)\n self.net = torch.nn.Sequential(\n torch.nn.Linear(num_cond_params, embed_dim * 4),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*4, embed_dim*4),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*4, embed_dim*2),\n )\n\nclass DeeperFiLM(BasicFiLM):\n def __init__(self, embed_dim, num_cond_params):\n super(DeeperFiLM, self).__init__(embed_dim, num_cond_params)\n self.net = torch.nn.Sequential(\n torch.nn.Linear(num_cond_params, embed_dim * 10),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*10, embed_dim*5),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*5, embed_dim*10),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*10, embed_dim*2),\n )\n\nclass SiLUFiLM(BasicFiLM):\n def __init__(self, embed_dim, num_cond_params):\n super(SiLUFiLM, self).__init__(embed_dim, num_cond_params)\n self.net = torch.nn.Sequential(\n torch.nn.Linear(num_cond_params, embed_dim * 4),\n torch.nn.SiLU(),\n torch.nn.Linear(embed_dim*4, embed_dim*4),\n torch.nn.SiLU(),\n torch.nn.Linear(embed_dim*4, embed_dim*2),\n )\n\nclass BottleneckFiLM(BasicFiLM):\n def __init__(self, embed_dim, num_cond_params):\n super(BottleneckFiLM, self).__init__(embed_dim, num_cond_params)\n self.net = torch.nn.Sequential(\n torch.nn.Linear(num_cond_params, embed_dim * 4),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*4, embed_dim*2),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*2, embed_dim),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim, embed_dim*2),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*2, embed_dim*4),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*4, embed_dim*2),\n )\n\nclass DeepBottleneckFiLM(BasicFiLM):\n def __init__(self, embed_dim, num_cond_params):\n super(DeepBottleneckFiLM, self).__init__(embed_dim, num_cond_params)\n self.net = torch.nn.Sequential(\n torch.nn.Linear(num_cond_params, embed_dim * 4),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*4, embed_dim*8),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*8, embed_dim*4),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*4, embed_dim*2),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*2, embed_dim),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim, embed_dim*2),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*2, embed_dim*4),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*4, embed_dim*8),\n torch.nn.ReLU(),\n torch.nn.Linear(embed_dim*8, embed_dim*2),\n )\n \nclass DeepSequential(nn.Module):\n def __init__(self, input, mid, output):\n super().__init__()\n self.net = torch.nn.Sequential(\n torch.nn.Linear(input, mid * 4),\n torch.nn.ReLU(),\n torch.nn.Linear(mid*4, mid*4),\n torch.nn.ReLU(),\n torch.nn.Linear(mid*4, output*2),\n )\n def forward(self, input):\n return self.net(input)\n\nclass DeeperSequential(nn.Module):\n def __init__(self, input, mid, output):\n super().__init__()\n self.net = torch.nn.Sequential(\n torch.nn.Linear(input, mid * 8),\n torch.nn.ReLU(),\n torch.nn.Linear(mid*8, mid*4),\n torch.nn.ReLU(),\n torch.nn.Linear(mid*4, mid), \n torch.nn.ReLU(),\n torch.nn.Linear(mid, output*2),\n )\n def forward(self, input):\n return self.net(input)\n\nclass SimpleSequential(nn.Module):\n def __init__(self, input, mid, output):\n super().__init__()\n self.net = torch.nn.Sequential(\n torch.nn.Linear(input, mid * 2),\n torch.nn.ReLU(),\n torch.nn.Linear(mid*2, mid*2),\n torch.nn.ReLU(),\n torch.nn.Linear(mid*2, output*2),\n )\n def forward(self, input):\n return self.net(input)" ]
[ [ "torch.nn.Linear", "torch.sigmoid", "torch.nn.Conv1d", "torch.nn.SiLU", "torch.split", "torch.nn.ReLU", "torch.tanh", "torch.chunk" ] ]
ITBOX-ITBOY/cvat-centos7-pg
[ "fd0c8fbc62713ef0ff619ab67c351e93cbc7dd7c" ]
[ "datumaro/tests/test_widerface_format.py" ]
[ "import os.path as osp\nfrom unittest import TestCase\n\nimport numpy as np\nfrom datumaro.components.extractor import (AnnotationType, Bbox, DatasetItem,\n Label, LabelCategories)\nfrom datumaro.components.dataset import Dataset\nfrom datumaro.plugins.widerface_format import WiderFaceConverter, WiderFaceImporter\nfrom datumaro.util.test_utils import TestDir, compare_datasets\n\n\nclass WiderFaceFormatTest(TestCase):\n def test_can_save_and_load(self):\n source_dataset = Dataset.from_iterable([\n DatasetItem(id='1', subset='train', image=np.ones((8, 8, 3)),\n annotations=[\n Bbox(0, 2, 4, 2),\n Bbox(0, 1, 2, 3, attributes = {\n 'blur': '2', 'expression': '0', 'illumination': '0',\n 'occluded': '0', 'pose': '2', 'invalid': '0'}),\n Label(0),\n ]\n ),\n DatasetItem(id='2', subset='train', image=np.ones((10, 10, 3)),\n annotations=[\n Bbox(0, 2, 4, 2, attributes = {\n 'blur': '2', 'expression': '0', 'illumination': '1',\n 'occluded': '0', 'pose': '1', 'invalid': '0'}),\n Bbox(3, 3, 2, 3, attributes = {\n 'blur': '0', 'expression': '1', 'illumination': '0',\n 'occluded': '0', 'pose': '2', 'invalid': '0'}),\n Bbox(2, 1, 2, 3, attributes = {\n 'blur': '2', 'expression': '0', 'illumination': '0',\n 'occluded': '0', 'pose': '0', 'invalid': '1'}),\n Label(1),\n ]\n ),\n\n DatasetItem(id='3', subset='val', image=np.ones((8, 8, 3)),\n annotations=[\n Bbox(0, 1.1, 5.3, 2.1, attributes = {\n 'blur': '2', 'expression': '1', 'illumination': '0',\n 'occluded': '0', 'pose': '1', 'invalid': '0'}),\n Bbox(0, 2, 3, 2, attributes = {\n 'occluded': 'False'}),\n Bbox(0, 2, 4, 2),\n Bbox(0, 7, 3, 2, attributes = {\n 'blur': '2', 'expression': '1', 'illumination': '0',\n 'occluded': '0', 'pose': '1', 'invalid': '0'}),\n ]\n ),\n\n DatasetItem(id='4', subset='val', image=np.ones((8, 8, 3))),\n ], categories={\n AnnotationType.label: LabelCategories.from_iterable(\n 'label_' + str(i) for i in range(3)),\n })\n\n with TestDir() as test_dir:\n WiderFaceConverter.convert(source_dataset, test_dir, save_images=True)\n parsed_dataset = Dataset.import_from(test_dir, 'wider_face')\n\n compare_datasets(self, source_dataset, parsed_dataset)\n\n def test_can_save_dataset_with_no_subsets(self):\n source_dataset = Dataset.from_iterable([\n DatasetItem(id='a/b/1', image=np.ones((8, 8, 3)),\n annotations=[\n Bbox(0, 2, 4, 2, label=2),\n Bbox(0, 1, 2, 3, label=1, attributes = {\n 'blur': '2', 'expression': '0', 'illumination': '0',\n 'occluded': '0', 'pose': '2', 'invalid': '0'}),\n ]\n ),\n ], categories={\n AnnotationType.label: LabelCategories.from_iterable(\n 'label_' + str(i) for i in range(3)),\n })\n\n with TestDir() as test_dir:\n WiderFaceConverter.convert(source_dataset, test_dir, save_images=True)\n parsed_dataset = Dataset.import_from(test_dir, 'wider_face')\n\n compare_datasets(self, source_dataset, parsed_dataset)\n\n def test_can_save_dataset_with_non_widerface_attributes(self):\n source_dataset = Dataset.from_iterable([\n DatasetItem(id='a/b/1', image=np.ones((8, 8, 3)),\n annotations=[\n Bbox(0, 2, 4, 2),\n Bbox(0, 1, 2, 3, attributes = {\n 'non-widerface attribute': '0',\n 'blur': 1, 'invalid': '1'}),\n Bbox(1, 1, 2, 2, attributes = {\n 'non-widerface attribute': '0'}),\n ]\n ),\n ], categories=[])\n\n target_dataset = Dataset.from_iterable([\n DatasetItem(id='a/b/1', image=np.ones((8, 8, 3)),\n annotations=[\n Bbox(0, 2, 4, 2),\n Bbox(0, 1, 2, 3, attributes = {\n 'blur': '1', 'invalid': '1'}),\n Bbox(1, 1, 2, 2),\n ]\n ),\n ], categories=[])\n\n with TestDir() as test_dir:\n WiderFaceConverter.convert(source_dataset, test_dir, save_images=True)\n parsed_dataset = Dataset.import_from(test_dir, 'wider_face')\n\n compare_datasets(self, target_dataset, parsed_dataset)\n\nDUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'widerface_dataset')\n\nclass WiderFaceImporterTest(TestCase):\n def test_can_detect(self):\n self.assertTrue(WiderFaceImporter.detect(DUMMY_DATASET_DIR))\n\n def test_can_import(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='0_Parade_image_01', subset='train',\n image=np.ones((10, 15, 3)),\n annotations=[\n Bbox(1, 2, 2, 2, attributes = {\n 'blur': '0', 'expression': '0', 'illumination': '0',\n 'occluded': '0', 'pose': '0', 'invalid': '0'}),\n Label(0),\n ]\n ),\n DatasetItem(id='1_Handshaking_image_02', subset='train',\n image=np.ones((10, 15, 3)),\n annotations=[\n Bbox(1, 1, 2, 2, attributes = {\n 'blur': '0', 'expression': '0', 'illumination': '1',\n 'occluded': '0', 'pose': '0', 'invalid': '0'}),\n Bbox(5, 1, 2, 2, attributes = {\n 'blur': '0', 'expression': '0', 'illumination': '1',\n 'occluded': '0', 'pose': '0', 'invalid': '0'}),\n Label(1),\n ]\n ),\n DatasetItem(id='0_Parade_image_03', subset='val',\n image=np.ones((10, 15, 3)),\n annotations=[\n Bbox(0, 0, 1, 1, attributes = {\n 'blur': '2', 'expression': '0', 'illumination': '0',\n 'occluded': '0', 'pose': '2', 'invalid': '0'}),\n Bbox(3, 2, 1, 2, attributes = {\n 'blur': '0', 'expression': '0', 'illumination': '0',\n 'occluded': '1', 'pose': '0', 'invalid': '0'}),\n Bbox(5, 6, 1, 1, attributes = {\n 'blur': '2', 'expression': '0', 'illumination': '0',\n 'occluded': '0', 'pose': '2', 'invalid': '0'}),\n Label(0),\n ]\n ),\n ], categories= ['Parade', 'Handshaking'])\n\n dataset = Dataset.import_from(DUMMY_DATASET_DIR, 'wider_face')\n\n compare_datasets(self, expected_dataset, dataset)\n" ]
[ [ "numpy.ones" ] ]
joeyzhou85/python
[ "9c0cbe33076a570a3c02825b7c6d9866a760e777" ]
[ "machine_learning/linear_regression.py" ]
[ "\"\"\"\nLinear regression is the most basic type of regression commonly used for\npredictive analysis. The idea is pretty simple, we have a dataset and we have\na feature's associated with it. The Features should be choose very cautiously\nas they determine, how much our model will be able to make future predictions.\nWe try to set these Feature weights, over many iterations, so that they best\nfits our dataset. In this particular code, i had used a CSGO dataset (ADR vs\nRating). We try to best fit a line through dataset and estimate the parameters.\n\"\"\"\nfrom __future__ import print_function\n\nimport requests\nimport numpy as np\n\n\ndef collect_dataset():\n \"\"\" Collect dataset of CSGO\n The dataset contains ADR vs Rating of a Player\n :return : dataset obtained from the link, as matrix\n \"\"\"\n response = requests.get('https://raw.githubusercontent.com/yashLadha/' +\n 'The_Math_of_Intelligence/master/Week1/ADRvs' +\n 'Rating.csv')\n lines = response.text.splitlines()\n data = []\n for item in lines:\n item = item.split(',')\n data.append(item)\n data.pop(0) # This is for removing the labels from the list\n dataset = np.matrix(data)\n return dataset\n\n\ndef run_steep_gradient_descent(data_x, data_y,\n len_data, alpha, theta):\n \"\"\" Run steep gradient descent and updates the Feature vector accordingly_\n :param data_x : contains the dataset\n :param data_y : contains the output associated with each data-entry\n :param len_data : length of the data_\n :param alpha : Learning rate of the model\n :param theta : Feature vector (weight's for our model)\n ;param return : Updated Feature's, using\n curr_features - alpha_ * gradient(w.r.t. feature)\n \"\"\"\n n = len_data\n\n prod = np.dot(theta, data_x.transpose())\n prod -= data_y.transpose()\n sum_grad = np.dot(prod, data_x)\n theta = theta - (alpha / n) * sum_grad\n return theta\n\n\ndef sum_of_square_error(data_x, data_y, len_data, theta):\n \"\"\" Return sum of square error for error calculation\n :param data_x : contains our dataset\n :param data_y : contains the output (result vector)\n :param len_data : len of the dataset\n :param theta : contains the feature vector\n :return : sum of square error computed from given feature's\n \"\"\"\n prod = np.dot(theta, data_x.transpose())\n prod -= data_y.transpose()\n sum_elem = np.sum(np.square(prod))\n error = sum_elem / (2 * len_data)\n return error\n\n\ndef run_linear_regression(data_x, data_y):\n \"\"\" Implement Linear regression over the dataset\n :param data_x : contains our dataset\n :param data_y : contains the output (result vector)\n :return : feature for line of best fit (Feature vector)\n \"\"\"\n iterations = 100000\n alpha = 0.0001550\n\n no_features = data_x.shape[1]\n len_data = data_x.shape[0] - 1\n\n theta = np.zeros((1, no_features))\n\n for i in range(0, iterations):\n theta = run_steep_gradient_descent(data_x, data_y,\n len_data, alpha, theta)\n error = sum_of_square_error(data_x, data_y, len_data, theta)\n print('At Iteration %d - Error is %.5f ' % (i + 1, error))\n\n return theta\n\n\ndef main():\n \"\"\" Driver function \"\"\"\n data = collect_dataset()\n\n len_data = data.shape[0]\n data_x = np.c_[np.ones(len_data), data[:, :-1]].astype(float)\n data_y = data[:, -1].astype(float)\n\n theta = run_linear_regression(data_x, data_y)\n len_result = theta.shape[1]\n print('Resultant Feature vector : ')\n for i in range(0, len_result):\n print('%.5f' % (theta[0, i]))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.square", "numpy.matrix", "numpy.dot", "numpy.zeros", "numpy.ones" ] ]
Golbstein/BayesianOptimization
[ "ad76e37ab21db6abea052c087e5efd34a5fbfd98" ]
[ "bayes_opt/bayesian_optimization.py" ]
[ "import warnings\nimport numpy as np\n\nfrom .target_space import TargetSpace\nfrom .event import Events, DEFAULT_EVENTS\nfrom .logger import _get_default_logger\nfrom .util import UtilityFunction, acq_max, ensure_rng\n\nfrom sklearn.gaussian_process.kernels import Matern\nfrom sklearn.gaussian_process import GaussianProcessRegressor\n\n\nclass Queue:\n def __init__(self):\n self._queue = []\n\n @property\n def empty(self):\n return len(self) == 0\n\n def __len__(self):\n return len(self._queue)\n\n def __next__(self):\n if self.empty:\n raise StopIteration(\"Queue is empty, no more objects to retrieve.\")\n obj = self._queue[0]\n self._queue = self._queue[1:]\n return obj\n\n def next(self):\n return self.__next__()\n\n def add(self, obj):\n \"\"\"Add object to end of queue.\"\"\"\n self._queue.append(obj)\n\n\nclass Observable(object):\n \"\"\"\n\n Inspired/Taken from\n https://www.protechtraining.com/blog/post/879#simple-observer\n \"\"\"\n def __init__(self, events):\n # maps event names to subscribers\n # str -> dict\n self._events = {event: dict() for event in events}\n\n def get_subscribers(self, event):\n return self._events[event]\n\n def subscribe(self, event, subscriber, callback=None):\n if callback == None:\n callback = getattr(subscriber, 'update')\n self.get_subscribers(event)[subscriber] = callback\n\n def unsubscribe(self, event, subscriber):\n del self.get_subscribers(event)[subscriber]\n\n def dispatch(self, event):\n for _, callback in self.get_subscribers(event).items():\n callback(event, self)\n\n\nclass BayesianOptimization(Observable):\n def __init__(self, f, pbounds, random_state=None, verbose=2, n_warmup=50000, n_iter=100, do_opt=True):\n \"\"\"\"\"\"\n self._random_state = ensure_rng(random_state)\n self._n_warmup = n_warmup\n self._n_iter = n_iter\n self._do_opt = do_opt\n\n # Data structure containing the function to be optimized, the bounds of\n # its domain, and a record of the evaluations we have done so far\n self._space = TargetSpace(f, pbounds, random_state)\n\n # queue\n self._queue = Queue()\n\n # Internal GP regressor\n self._gp = GaussianProcessRegressor(\n kernel=Matern(nu=2.5),\n alpha=1e-6,\n normalize_y=True,\n n_restarts_optimizer=25,\n random_state=self._random_state,\n )\n\n self._verbose = verbose\n super(BayesianOptimization, self).__init__(events=DEFAULT_EVENTS)\n\n @property\n def space(self):\n return self._space\n\n @property\n def max(self):\n return self._space.max()\n\n @property\n def res(self):\n return self._space.res()\n\n def register(self, params, target):\n \"\"\"Expect observation with known target\"\"\"\n self._space.register(params, target)\n self.dispatch(Events.OPTMIZATION_STEP)\n\n def probe(self, params, lazy=True):\n \"\"\"Probe target of x\"\"\"\n if lazy:\n self._queue.add(params)\n else:\n self._space.probe(params)\n self.dispatch(Events.OPTMIZATION_STEP)\n\n def suggest(self, utility_function):\n \n \"\"\"Most promissing point to probe next\"\"\"\n if len(self._space) == 0:\n return self._space.array_to_params(self._space.random_sample())\n\n # Sklearn's GP throws a large number of warnings at times, but\n # we don't really need to see them here.\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n self._gp.fit(self._space.params, self._space.target)\n\n # Finding argmax of the acquisition function.\n suggestion = acq_max(\n ac=utility_function.utility,\n gp=self._gp,\n y_max=self._space.target.max(),\n bounds=self._space.bounds,\n random_state=self._random_state,\n n_warmup=self._n_warmup,\n n_iter=self._n_iter,\n do_opt=self._do_opt\n )\n\n return self._space.array_to_params(suggestion)\n\n def _prime_queue(self, init_points):\n \"\"\"Make sure there's something in the queue at the very beginning.\"\"\"\n if self._queue.empty and self._space.empty:\n init_points = max(init_points, 1)\n\n for _ in range(init_points):\n self._queue.add(self._space.random_sample())\n\n def _prime_subscriptions(self):\n if not any([len(subs) for subs in self._events.values()]):\n _logger = _get_default_logger(self._verbose)\n self.subscribe(Events.OPTMIZATION_START, _logger)\n self.subscribe(Events.OPTMIZATION_STEP, _logger)\n self.subscribe(Events.OPTMIZATION_END, _logger)\n\n def maximize(self,\n init_points=5,\n n_iter=25,\n acq='ucb',\n kappa=2.576,\n xi=0.0,\n **gp_params):\n \"\"\"Mazimize your function\"\"\"\n self._prime_subscriptions()\n self.dispatch(Events.OPTMIZATION_START)\n self._prime_queue(init_points)\n self.set_gp_params(**gp_params)\n\n util = UtilityFunction(kind=acq, kappa=kappa, xi=xi)\n iteration = 0\n while not self._queue.empty or iteration < n_iter:\n try:\n x_probe = next(self._queue)\n except StopIteration:\n x_probe = self.suggest(util)\n iteration += 1\n\n self.probe(x_probe, lazy=False)\n\n self.dispatch(Events.OPTMIZATION_END)\n\n def set_bounds(self, new_bounds):\n \"\"\"\n A method that allows changing the lower and upper searching bounds\n\n Parameters\n ----------\n new_bounds : dict\n A dictionary with the parameter name and its new bounds\n \"\"\"\n self._space.set_bounds(new_bounds)\n\n def set_gp_params(self, **params):\n self._gp.set_params(**params)\n" ]
[ [ "sklearn.gaussian_process.kernels.Matern" ] ]
wmkirby1/CS-VQE
[ "9a0a7634dcb77f064957c772cf229b7103cce3a8" ]
[ "misc/legacy/fermions/yaferp/circuits/circuit.py" ]
[ "'''\nCreated on 17 Dec 2014\n\n@author: andrew\n\nprobably an inefficient strategy for all this but what the hell.\n'''\n\n'''define a general struct for gates'''\nimport string\nimport numpy\nimport scipy.sparse\nimport copy\nimport math\nfrom yaferp.general import directFermions\nimport cirq\n\n#import cPickle\ntry:\n import pyquil.gates\nexcept:\n pass\n\n\n\nGATETYPES = [['cnot',2],['rz',1],['h',1],['hy',1],['hyinv',1],['i',1],['cz',2]]\n\nimport pickle as cPickle\n\nclass Gate:\n def __init__(self,gateType,qubits,angle=0):\n '''SQRs are represented by an axis (x,y,z) specified in the type.\n angle is the angle of rotation as a fraction of 2 pi - so for instance the angle stored here is the theta in:\n i.e Rx(theta) = e^(-i*theta*2pi*X/2)\n '''\n self.type = gateType.lower() #string - cnot, rx, ry, rz, h, hy, hyinv, i, cz\n self.qubits = qubits #list of zero-indexed ints, control is first\n self.angle = angle #rotations and \"identity\" need an angle specified - see function description\n def readable(self):\n if self.type == 'rx' or self.type == 'ry' or self.type == 'rz' or self.type == 'i': \n thing = self.type.upper() + ' on qubit ' + str(self.qubits) + ' angle ' + str(self.angle)\n elif self.type == 'h':\n thing = 'H on qubit ' + str(self.qubits)\n elif self.type == 'hy':\n thing = 'Hy on qubit ' + str(self.qubits)\n elif self.type == 'hyinv':\n thing = 'Inverse Hy on qubit ' + str(self.qubits)\n elif self.type == 'cnot':\n thing = 'CNOT controlled on qubit ' + str(self.qubits[0])+' acting on qubit ' + str(self.qubits[1])\n elif self.type == 'cz':\n thing = 'Cz controlled on qubit ' + str(self.qubits[0])+' acting on qubit ' + str(self.qubits[1])\n print(thing)\n return thing\n def qasm(self):\n if self.type == 'cnot':\n return ' cnot ' + str(self.qubits[0]) + ',' + str(self.qubits[1])\n elif self.type == 'cz':\n return ' c-z ' + str(self.qubits[0]) + ',' + str(self.qubits[1])\n elif self.type == 'h':\n return ' h ' + str(self.qubits)\n elif self.type == 'i':\n return \" def thing,0,'I(\" +str(abs(self.angle)) +\")' \\n thing \" + str(self.qubits)\n elif self.type == 'hy':\n return \" def thing,0,'Y' \\n thing \" + str(self.qubits)\n elif self.type == 'hyinv':\n return \" def thing,0,'Y^\\\\dagger' \\n thing \" + str(self.qubits)\n else:\n if self.type == 'rx':\n subscript = 'x'\n elif self.type == 'ry':\n subscript = 'y'\n elif self.type == 'rz':\n subscript = 'z'\n return \" def thing,0,'R_\" + subscript + \"(\" + str(abs(self.angle)) + \")' \\n thing \" + str(self.qubits)\n\n\n\n def getInverse(self):\n if self.type == 'h' or self.type == 'cnot':\n return self\n else:\n inverse = Gate(self.type, self.qubits,(self.angle * -1.))\n return inverse\n def act(self,state,debugOn=0):\n '''act the gate on a state vector, return the state vector.\n\n args:\n state -- a dense or sparse state vector to be acted upon by the gate\n debugOn -- print debug info?\n\n TODO: majorly refactor this whole thing\n '''\n #print(state)\n if debugOn:\n initialNorm = state.conjugate().transpose() * state\n state = scipy.sparse.csr_matrix(state,dtype=numpy.complex128)\n numQubits = int(math.log(state.shape[0],2))\n if self.type == 'i':\n newState = state * numpy.exp(self.angle * -2j * numpy.pi)\n \n elif self.type == 'cnot':\n controlQubit = self.qubits[0]\n controlQubitDec = 2**controlQubit\n targetQubit = self.qubits[1]\n targetQubitDec = 2**targetQubit\n wtf = [x for x in range(2**numQubits) if (controlQubitDec & x != 0)]\n unchangedIndices = [x for x in range(2**numQubits) if (x not in wtf)]\n newState2 = [state[x,0] if x in unchangedIndices else state[x^targetQubitDec, 0] for x in range(2**numQubits)]\n newState = scipy.sparse.csr_matrix(newState2,dtype=numpy.complex128).transpose()\n \n \n elif self.type in ['rz','rx','ry']:\n realAngle = self.angle*2*numpy.pi\n identityCoefficient = numpy.cos(realAngle/2)\n pauliCoefficient = -1j * numpy.sin(realAngle/2)\n stateCopy = copy.deepcopy(state)\n \n firstComponent = identityCoefficient * stateCopy\n if self.type == 'rz':\n secondComponent = pauliCoefficient * directFermions.oneZOnState(state, self.qubits)\n newState = firstComponent + secondComponent\n \n elif self.type == 'cz':\n firstQubitMask = 2**self.qubits[0]\n secondQubitMask = 2**self.qubits[1]\n mask = firstQubitMask + secondQubitMask\n indicesToPhase = [x for x in range(2**numQubits) if x & mask == mask]\n for index in indicesToPhase:\n state[index,0] = state[index,0] * -1\n newState = state\n\n elif self.type == 'h':\n #really needs optimisation\n state2 = copy.deepcopy(state)\n xComponent = directFermions.oneXOnState(state, self.qubits)\n zComponent = directFermions.oneZOnState(state2, self.qubits)\n newState = (2.**-(1./2.)) * (xComponent + zComponent)\n \n elif self.type == 'hy':\n '''hy = 1/sqrt(2) I + iX'''\n firstTerm = copy.deepcopy(state)\n zComponent = 1j * directFermions.oneXOnState(state, self.qubits)\n newState = (2.**-(1./2.)) * (firstTerm + zComponent)\n \n elif self.type == 'hyinv':\n '''hy = 1/sqrt(\") I - iX'''\n firstTerm = copy.deepcopy(state)\n zComponent = -1j * directFermions.oneXOnState(state, self.qubits)\n newState = (2.**-(1./2.)) * (firstTerm + zComponent)\n \n else:\n print('error, no action for gate ' + self.type + ' found')\n \n if debugOn:\n newNorm = newState.conjugate().transpose() * newState\n if newNorm - initialNorm >= 0.01:\n print ('Error - gate ' + self.type + ' on qubit ' + str(self.qubits) + ' increasing norm')\n print ('norm ' + str(newNorm - initialNorm))\n if initialNorm - newNorm >= 0.01:\n print ('Error - gate ' + self.type + ' on qubit ' + str(self.qubits) + ' decreasing norm')\n print ('norm ' + str(newNorm - initialNorm))\n\n return newState\n\n def sparseMatrixForm(self):\n '''return the matrix form of a SQG, return matrix form of CNOT tensor identity for qubits in between control and target'''\n if self.type == 'i':\n term = numpy.exp(-1j * 2 * numpy.pi * self.angle)\n return scipy.sparse.csc_matrix([[term,0],[0,term]])\n elif self.type == 'h':\n return scipy.sparse.csc_matrix([[1,1],[1,-1]])\n elif self.type == 'cnot':\n '''do cnot'''\n pass\n elif self.type == 'rx':\n angle = numpy.pi * self.angle\n diagonal = numpy.cos(angle)\n offDiagonal = -1j * numpy.sin(angle)\n return scipy.sparse.csc_matrix([[diagonal,offDiagonal],[offDiagonal,diagonal]])\n elif self.type == 'ry':\n angle = numpy.pi * self.angle\n diagonal = numpy.cos(angle)\n sinTerms = numpy.sin(angle)\n return scipy.sparse.csc_matrix([[diagonal,-1 * sinTerms],[sinTerms,diagonal]])\n elif self.type == 'rz':\n iTheta = self.angle * numpy.pi* 1j\n firstPhase = numpy.exp(-1* iTheta)\n secondPhase = numpy.exp(iTheta)\n return scipy.sparse.csc_matrix([[firstPhase,0],[0,secondPhase]])\n return\n\n def toSparseUnitary(self,circuitDepth):\n '''int circuitDepth = total number of qubits in circuit'''\n if not self.type == 'cnot':\n activeMatrix = self.sparseMatrixForm()\n if self.qubits > 0:\n pregroup = scipy.sparse.identity(self.qubits,format='csc')\n unitary = scipy.sparse.kron(pregroup,activeMatrix,format='csc')\n else:\n unitary = activeMatrix\n if self.qubits < circuitDepth:\n postgroup = scipy.sparse.identity(circuitDepth - self.qubits,format='csc')\n unitary = scipy.sparse.kron(unitary,postgroup,format='csc')\n return unitary\n \n def checkCommute(self,otherGate):\n if self.type == 'i' or otherGate.type == 'i':\n return 1\n elif self.type in ['rx', 'ry', 'rz', 'h', 'hy','hyinv']:\n if otherGate.type == self.type:\n return 1 #always commute if same gate type\n elif otherGate.type == 'cnot' and self.type == 'rz':\n if self.qubits == otherGate.qubits[1]:\n return 0\n else:\n return 1\n elif otherGate.type == 'cnot' and self.type in ['hy','hyinv'] and self.qubits == otherGate.qubits[1]:\n return 1\n elif otherGate.type == 'cnot':\n if self.qubits in otherGate.qubits:\n return 0\n else:\n return 1\n \n elif otherGate.type == 'cz':\n if self.type == 'rz':\n return 1\n elif self.qubits in otherGate.qubits:\n return 0\n else:\n return 1 \n \n elif self.qubits != otherGate.qubits:\n return 1 #always commute if SQG on different qubits\n else:\n return 0 #else is different gate on same qubit, hence probably don't commute\n elif self.type == 'cnot':\n if otherGate.type in ['rx', 'ry', 'h']:\n if otherGate.qubits in self.qubits:\n return 0 #TODO: again this should be 1 if the other qubit is a simple X rotation but w/e\n else:\n return 1\n elif otherGate.type in ['hy','hyinv']:\n if not otherGate.qubits in self.qubits :\n return 1\n elif self.qubits[1] == otherGate.qubits:\n return 1\n else:\n return 0\n elif otherGate.type == 'rz':\n if otherGate.qubits == self.qubits[1]:\n return 0\n else:\n return 1\n elif otherGate.type == 'cnot':\n if (self.qubits[0] == otherGate.qubits[1]) or (self.qubits[1] == otherGate.qubits[0]):\n return 0 #CNOTs commute unless one's target is the other's control\n else:\n return 1 \n elif otherGate.type == 'cz':\n if (self.qubits[1] in otherGate.qubits):\n return 0\n else:\n return 1\n else:\n return -1\n elif self.type == 'cz':\n if otherGate.type in ['rx','ry','h','hy','hyinv']:\n if otherGate.qubits in self.qubits:\n return 0\n else:\n return 1\n elif otherGate.type == 'rz' or otherGate.type == 'cz':\n return 1\n elif otherGate.type == 'cnot':\n if (otherGate.qubits[1] in self.qubits):\n return 0\n else:\n return 1\n \n return -1\n \n def canCancel(self,otherGate):\n if (self.type == 'hy' and otherGate.type == 'hyinv') or (self.type == 'hyinv' and otherGate.type == 'hy'):\n if self.qubits == otherGate.qubits:\n return 1\n else:\n return 0\n elif self.type == 'cz' and otherGate.type == 'cz':\n if (self.qubits == otherGate.qubits) or ((self.qubits[0] == otherGate.qubits[1]) and self.qubits[1] == otherGate.qubits[0]):\n return 1\n else:\n return 0\n elif self.type==otherGate.type and self.qubits==otherGate.qubits:\n return 1 \n else:\n return 0\n\n\n# def _getPyquillerisedAngle_(self,pyQuilMemoryHolder):\n# if isinstance(self.angle,sympy.Expr):\n# return self.angle.subs(parametersToPyquilMap)\n\n def pyquil(self,pyQuilMemoryHolder=None,pyQuilMemoryIndex=None):\n if self.type == 'cnot':\n return pyquil.gates.CNOT(self.qubits[0],self.qubits[1])\n elif self.type == 'rz':\n assert not pyQuilMemoryHolder is None\n assert not pyQuilMemoryIndex is None\n # pyquilAngle = self._getPyquillerisedAngle_(parametersToPyquilMap)\n clive = pyQuilMemoryHolder[pyQuilMemoryIndex]\n return (pyquil.gates.RZ(clive, self.qubits),self.angle*2.*numpy.pi)\n # return (pyquil.gates.RZ(pyQuilMemoryHolder[pyQuilMemoryIndex],self.qubits), self.angle)\n elif self.type == 'h':\n return pyquil.gates.H(self.qubits)\n elif self.type == 'hy':\n return pyquil.gates.RX((-1. * numpy.pi)/2.,self.qubits)\n elif self.type == 'hyinv':\n return pyquil.gates.RX(numpy.pi/2.,self.qubits)\n else:\n raise ValueError(\"gate not supported!\")\n\n\n return\n\n def toCirq(self,qubit,qubit2=None,set_to_true_if_you_are_panicking=False):\n\n CIRQ_GATE_DICT = {'h':cirq.H,\n 'hy':cirq.rx,\n 'hyinv':cirq.rx,\n 'rz':cirq.rz,\n 'cnot':cirq.CNOT}\n cirqGateMethod = CIRQ_GATE_DICT[self.type]\n\n # if self.type in ['hy','hyinv','rz']:\n # if self.type == 'hy':\n # cirqGateMethod1 =\n # return cirqGateMethod(self.angle)(qubit)\n if self.type == 'hy':\n return cirqGateMethod((-1. * numpy.pi)/2.)(qubit)\n elif self.type == 'hyinv':\n return cirqGateMethod((1. * numpy.pi)/2.)(qubit)\n elif self.type == 'rz':\n if set_to_true_if_you_are_panicking:\n return cirqGateMethod(self.angle * 2. * numpy.pi)(qubit)\n else:\n return cirqGateMethod(self.angle * 2. * numpy.pi /-1j)(qubit)\n\n\n if type(self.qubits) != int:\n assert qubit2 is not None, \"2 qubit gates need a second qubit.\"\n return cirqGateMethod(qubit,qubit2)\n\n else:\n return cirqGateMethod(qubit)\n\n\nclass Circuit:\n def __init__(self,listGates,numQubits=-1,useSubcircuits=False):\n self.listGates = listGates\n if numQubits==-1:\n maxQubitNumber = 0\n for gate in listGates:\n if isinstance(gate.qubits, list):\n maxQubitNumberInGate = max(gate.qubits)\n else:\n maxQubitNumberInGate = gate.qubits\n if maxQubitNumberInGate > maxQubitNumber:\n maxQubitNumber = maxQubitNumberInGate\n self.numQubits = maxQubitNumber\n else:\n self.numQubits = numQubits\n if useSubcircuits:\n self.subcircuits = [] #list containing indices of subcircuits\n else:\n self.subcircuits = None\n def update(self):\n maxQubitNumber = 0\n for gate in self.listGates:\n if isinstance(gate.qubits, list):\n maxQubitNumberInGate = max(gate.qubits)\n else:\n maxQubitNumberInGate = gate.qubits\n if maxQubitNumberInGate > maxQubitNumber:\n maxQubitNumber = maxQubitNumberInGate\n self.numQubits = maxQubitNumber \n return\n def numRotations(self):\n counter = 0\n for x in self.listGates:\n if x.type in ['rx','ry','rz']:\n counter += 1\n return counter\n\n def pyquil(self,pyQuilMemoryHolder,pyQuilProgram):\n pyQuilIndex = 0\n parametersExprs = []\n for gate in self.listGates:\n thing = gate.pyquil(pyQuilMemoryHolder,pyQuilIndex)\n if isinstance(thing,tuple):\n pyQuilIndex += 1\n parametersExprs.append(thing[1])\n pyQuilProgram += thing[0]\n else:\n pyQuilProgram += thing\n return pyQuilProgram,parametersExprs\n\n def toCirq(self,qubits,circuitToAppendTo=None,insertStrategy=cirq.InsertStrategy.EARLIEST,set_to_true_if_you_are_panicking=False):\n #qubits = list(reversed(qubits2))\n if circuitToAppendTo is None:\n cirquit = cirq.Circuit()\n else:\n cirquit = circuitToAppendTo\n\n for gate in self.listGates:\n qubitIndices = gate.qubits\n if type(qubitIndices) is not int:\n thisCirqGate = gate.toCirq(*[qubits[i] for i in qubitIndices],set_to_true_if_you_are_panicking=set_to_true_if_you_are_panicking)\n else:\n thisCirqGate = gate.toCirq(qubits[qubitIndices],set_to_true_if_you_are_panicking=set_to_true_if_you_are_panicking)\n\n cirquit.append(thisCirqGate,strategy=insertStrategy)\n\n return cirquit\n\n def addRight(self,target):\n try:\n self.listGates.extend(target.listGates)\n except:\n self.listGates.append(target)\n return self\n def addLeft(self,target):\n try:\n newList = target.listGates.extend(self.listGates)\n self.listGates = newList\n except:\n self.listGates.prepend(target)\n return self\n def getReverse(self):\n reversedGateList = reversed(self.listGates)\n return Circuit(reversedGateList)\n def getInverse(self):\n reversedGateList = reversed(self.listGates)\n inverseGateList = []\n for gate in reversedGateList:\n inverseGateList.append(gate.getInverse())\n inverse = Circuit(inverseGateList)\n return inverse\n \n def readable(self):\n for gate in self.listGates:\n gate.readable()\n return\n def act(self,state,debugOn=0):\n for gate in self.listGates:\n state = gate.act(state,debugOn)\n return state\n def expectationValue(self,state,debugOn=0):\n # print(state)\n state = scipy.sparse.csr_matrix(state, dtype=numpy.complex128)\n originalState = copy.deepcopy(state)\n state = self.act(state,debugOn)\n firstBit = originalState.conjugate().transpose()\n #print(originalState.todense())\n #print(state.todense()\n expectation = firstBit * state\n return expectation[0,0]\n def angle(self,state,debugOn=0):\n expVal = self.expectationValue(state,debugOn)\n ang = numpy.angle(expVal)\n return ang\n \n def getQubitListGates(self):\n '''return a list of N lists where N is num. qubits. \n each sublist contains the indices of self.listGates where the corresponding qubit is involved.\n TODO: store this shit'''\n qubitListGates = []\n for qubitIndex in range(self.numQubits+1):\n listOfGatesThatThisQubitIsInvolvedIn = [] #cba\n for (gateIndex,gate) in enumerate(self.listGates):\n if not (isinstance(gate.qubits, list)):\n if qubitIndex==gate.qubits:\n listOfGatesThatThisQubitIsInvolvedIn.append(gateIndex)\n else:\n if qubitIndex in gate.qubits:\n listOfGatesThatThisQubitIsInvolvedIn.append(gateIndex)\n qubitListGates.append(listOfGatesThatThisQubitIsInvolvedIn)\n return qubitListGates\n \n def parallelisedInternal(self): \n '''good luck!'''\n numUnplacedQubitListGates = copy.deepcopy(self.getQubitListGates())\n depletedQubitsList = []\n self.listTimesteps = []\n #for j in [1,2,3,4,5,6]:\n while len(depletedQubitsList) != self.numQubits+1:\n timestep = numpy.full(self.numQubits+1,-2,dtype=int)\n for i in range(self.numQubits+1):\n if i in depletedQubitsList:\n timestep[i] = -1\n continue\n if timestep[i] != -2:\n continue\n if numUnplacedQubitListGates[i] == []:\n depletedQubitsList.append(i)\n timestep[i] = -1\n continue\n firstGateIndex = numUnplacedQubitListGates[i].pop(0)\n #print(firstGateIndex)\n if isinstance(self.listGates[firstGateIndex].qubits, int):\n timestep[i] = firstGateIndex\n else: #next gate for this qubit is entangling gate\n otherQubitIndex = [x for x in self.listGates[firstGateIndex].qubits if x != i][0]\n if timestep[otherQubitIndex] != -2:\n numUnplacedQubitListGates[i].insert(0,firstGateIndex)\n timestep[i] = -1\n else:\n # print(otherQubitIndex)\n #print(firstGateIndex)\n # print(numUnplacedQubitListGates)\n # print(i)\n #print(otherQubitIndex)\n # self.listGates[firstGateIndex].readable()\n #print(numUnplacedQubitListGates[otherQubitIndex])\n otherQubitNextGateIndex = numUnplacedQubitListGates[otherQubitIndex][0]\n if otherQubitNextGateIndex >= firstGateIndex:\n timestep[i] = firstGateIndex\n timestep[otherQubitIndex] = firstGateIndex\n numUnplacedQubitListGates[otherQubitIndex].pop(0)\n elif otherQubitNextGateIndex < firstGateIndex:\n numUnplacedQubitListGates[i].insert(0,firstGateIndex)\n timestep[i] = -1\n #print(timestep)\n self.listTimesteps.append(timestep)\n return self.listTimesteps\n \n def timestepIndices(self,whichStep=None):\n try:\n timesteps = self.listTimesteps\n except AttributeError:\n timesteps = self.parallelisedInternal()\n \n if whichStep == None:\n return timesteps\n else:\n return timesteps(whichStep)\n \n def timestep(self,whichStep=None):\n try:\n timesteps = self.listTimesteps\n except AttributeError:\n timesteps = self.parallelisedInternal()\n \n if whichStep == None:\n listTimeCircs = []\n for listGatesInStep in timesteps:\n seenGates= {}\n fixedGates = []\n for thisGate in listGatesInStep:\n if thisGate in seenGates: continue\n seenGates[thisGate] = 1\n fixedGates.append(thisGate)\n gatesInStep = [self.listGates[i] for i in fixedGates if i != -1]\n stepCircuit = Circuit(gatesInStep)\n listTimeCircs.append(stepCircuit)\n return listTimeCircs\n else:\n listGatesInStep = timesteps[whichStep]\n seenGates= {}\n fixedGates = []\n for thisGate in listGatesInStep:\n if thisGate in seenGates: continue\n seenGates[thisGate] = 1\n fixedGates.append(thisGate)\n gatesInStep = [self.listGates[i] for i in fixedGates if i != -1]\n stepCircuit = Circuit(gatesInStep)\n return stepCircuit\n \n \n \n \n \n \n \n \n \n def parallelDepth(self):\n '''count the depth of the circuit'''\n \n \n def qasm(self):\n text = ''\n for i in range(self.numQubits+1):\n text = text + \" qubit \" + str(i) + \",q_\" + str(i) + \"\\n\"\n text += \"\\n\"\n numDefs = 0\n for gate in self.listGates:\n thisString = gate.qasm()\n includesThing = string.find(thisString,'thing')\n if includesThing != -1:\n newString = string.replace(thisString,'thing', 'thing'+str(numDefs))\n numDefs += 1\n else:\n newString = thisString\n newString = newString + '\\n'\n text += newString\n return text\n \n def removeGate(self,gateIndex):\n self.listGates.pop(gateIndex)\n if self.subcircuits != None:\n self.removeGateUpdateSubcircuit(gateIndex)\n return self\n def involvedQubits(self):\n qubits = []\n for gate in self.listGates:\n if isinstance(gate.qubits,int):\n qubits.append(gate.qubits)\n else:\n for thisQubit in gate.qubits:\n if thisQubit not in qubits:\n qubits.append(thisQubit)\n return qubits\n \n def markSubcircuit(self):\n if self.subcircuits != None:\n self.subcircuits.append(self.numGates()-1)\n return\n \n def numGates(self):\n return len(self.listGates)\n \n def getSubcircuitBoundsByIndex(self,index):\n subCircuitEnd = self.subcircuits[index]\n if index == 0:\n subCircuitStart = 0\n else:\n subCircuitStart = self.subcircuits[index-1]+1\n return (subCircuitStart,subCircuitEnd) \n \n def getSubcircuitByIndex(self,index):\n subCircuitStart,subCircuitEnd = self.getSubcircuitBoundsByIndex(index)\n subListGates = self.listGates[subCircuitStart:subCircuitEnd+1]\n return Circuit(subListGates)\n \n def removeSubcircuitDuplicates(self):\n seen = set()\n seen_add = seen.add\n return [x for x in self.subcircuits if x not in seen and not seen_add(x)]\n '''noDuplicates = []\n [noDuplicates.append(x) for x in self.subcircuits if not noDuplicates.count(x)]\n self.subcircuits = noDuplicates\n return self'''\n \n \n def removeGateUpdateSubcircuit(self,removedIndex):\n # self.removeSubcircuitDuplicates()\n '''newSubcircuits = []\n for x in self.subcircuits:\n if x<removedIndex:\n newSubcircuits.append(x)\n else:\n newSubcircuits.append(x-1)'''\n #newSubcircuits = [self.subcircuits[i] if self.subcircuits[i]<removedIndex else self.subcircuits[i]-1 if self.subcircuits[i]!=self.subcircuits[i+1] for i in range(len(self.subcircuits))]\n if removedIndex in self.subcircuits and removedIndex-1 in self.subcircuits: #if the removed gate is the entirety of a subcircuit\n self.subcircuits.remove(removedIndex)\n\n newSubcircuits = [x if x<removedIndex else x-1 for x in self.subcircuits]\n self.subcircuits = newSubcircuits\n ''' for x,i in enumerate(self.subcircuits):\n if \n \n newSubcircuits = [x if x<removedIndex else x-1 for x in self.subcircuits]\n self.subcircuits = newSubcircuits\n self.removeSubcircuitDuplicates()\n lastseen = -1\n newSubcircuits = [x if x<removedIndex and x!= lastseen and not else for x in self.subcircuits]\n \n return [x for x in self.subcircuits if x not in seen and not seen_add(x)] \n \n ''' \n return self\n \n def cancelDuplicates(self,showWarnings=1):\n '''NOTE: THIS PROCEDURE DESTROYS SUBCIRCUIT STRUCTURE'''\n thisGate = self.listGates[0]\n thisGateIndex = 0\n while thisGate != None:\n numGates = self.numGates()\n if thisGateIndex+1 >= numGates:\n thisGate = None\n break\n nextGate = self.listGates[thisGateIndex+1]\n if not thisGate.canCancel(nextGate):\n doesCommuteNext = thisGate.checkCommute(nextGate)\n increment = 2\n hasCancelledAfterCommutation = False\n while doesCommuteNext:\n if thisGateIndex+increment >= self.numGates():\n break\n followingGate = self.listGates[thisGateIndex+increment]\n #if not (thisGate.type == followingGate.type and thisGate.qubits == followingGate.qubits):\n if not thisGate.canCancel(followingGate):\n doesCommuteNext = thisGate.checkCommute(followingGate)\n increment += 1\n else:\n if thisGate.type == 'rx' or thisGate.type == 'ry' or thisGate.type == 'rz' or thisGate.type == 'i':\n thisGate.angle = thisGate.angle + followingGate.angle\n self.listGates[thisGateIndex] = thisGate\n self.removeGate(thisGateIndex+increment)\n doesCommuteNext = 0\n hasCancelledAfterCommutation = True\n continue\n else:\n self.removeGate(thisGateIndex+increment)\n self.removeGate(thisGateIndex)\n thisGate = self.listGates[thisGateIndex]\n doesCommuteNext = 0\n hasCancelledAfterCommutation = True\n continue\n \n if not hasCancelledAfterCommutation:\n thisGateIndex += 1\n thisGate = self.listGates[thisGateIndex]\n continue\n else:\n if thisGate.type == 'rx' or thisGate.type == 'ry' or thisGate.type == 'rz' or thisGate.type == 'i':\n thisGate.angle = thisGate.angle + nextGate.angle\n self.listGates[thisGateIndex] = thisGate\n self.removeGate(thisGateIndex+1)\n continue\n else:\n self.removeGate(thisGateIndex)\n self.removeGate(thisGateIndex)\n if not thisGateIndex >= self.numGates():\n thisGate = self.listGates[thisGateIndex]\n continue\n return self\n \n def fullCancelDuplicates(self):\n '''NOTE: THIS PROCEDURE DESTROYS SUBCIRCUIT STRUCTURE'''\n while True:\n # oldCircuit = copy.deepcopy(self)\n oldNumGates = self.numGates()\n if oldNumGates == 0:\n return self\n self.cancelDuplicates()\n if self.numGates() == oldNumGates:\n return self\n return\n \n def find(self, gateType, reverseSearch=0):\n '''find first instance of a specific type of gate, return its index\n return -1 on fail'''\n if reverseSearch == 1:\n for index,gate in reversed(list(enumerate(self.listGates))):\n if gate.type == gateType.lower():\n return index\n else:\n for index,gate in enumerate(self.listGates):\n if gate.type == gateType.lower():\n return index\n return -1\n def flipHYDirection(self):\n for gate in self.listGates:\n if gate.type == 'hy':\n gate.type = 'hyinv'\n elif gate.type == 'hyinv':\n gate.type = 'hy'\n return self \n \n def toInteriorBasisChange(self):\n '''puts basis changes inside of the circuit. \n NOTE: this assumes basis changes are currently at front/end\n also returns signed int giving change in gate count'''\n originalNumGates = self.numGates()\n listBasisChangedQubits = []\n \n def splitSubcircuit(self):\n '''splits a subcircuit even further, into inital basis changes, initial CNOTS, middle rotation, final CNOTs, final basis changes'''\n numGates = self.numGates()\n initialRotationCircuit = Circuit([])\n initialCNOTCircuit = Circuit([])\n middleGate = Circuit([])\n finalCNOTCircuit = Circuit([])\n finalRotationCircuit = Circuit([])\n if numGates == 0:\n return (initialRotationCircuit,initialCNOTCircuit,middleGate,finalCNOTCircuit,finalRotationCircuit)\n \n for index,gate in enumerate(self.listGates):\n if gate.type not in ['h','hy','hyinv']:\n currentIndex = index\n if currentIndex == numGates:\n return (initialRotationCircuit,initialCNOTCircuit,middleGate,finalCNOTCircuit,finalRotationCircuit)\n break\n else:\n initialRotationCircuit.addRight(gate)\n \n for index,gate in enumerate(self.listGates):\n if index < currentIndex:\n continue\n elif gate.type != 'cnot':\n currentIndex = index\n if currentIndex == numGates:\n return (initialRotationCircuit,initialCNOTCircuit,middleGate,finalCNOTCircuit,finalRotationCircuit)\n break\n else:\n initialCNOTCircuit.addRight(gate)\n \n middleGate = copy.deepcopy(self.listGates[currentIndex])\n currentIndex += 1\n \n for index,gate in enumerate(self.listGates):\n if index < currentIndex:\n continue\n elif gate.type != 'cnot':\n currentIndex = index\n if currentIndex == numGates:\n return (initialRotationCircuit,initialCNOTCircuit,middleGate,finalCNOTCircuit,finalRotationCircuit)\n break\n else:\n finalCNOTCircuit.addRight(gate)\n \n for index,gate in enumerate(self.listGates):\n if index < currentIndex:\n continue\n elif gate.type not in ['h','hy','hyinv']:\n currentIndex = index\n if currentIndex == numGates:\n return (initialRotationCircuit,initialCNOTCircuit,middleGate,finalCNOTCircuit,finalRotationCircuit)\n break\n else:\n finalRotationCircuit.addRight(gate)\n \n return (initialRotationCircuit,initialCNOTCircuit,middleGate,finalCNOTCircuit,finalRotationCircuit)\n \n def swapHY(self):\n newcircuit = Circuit([])\n for gate in self.listGates:\n if gate.type == 'hy':\n gate.type = 'hyinv'\n newcircuit.addRight(gate)\n return newcircuit\n \n def dump(self,filepath,overwrite=1):\n if overwrite:\n mode = 'wb'\n else:\n mode = 'ab'\n with open(filepath,mode):\n cPickle.dump(self,filepath,-1)\n return\n \n \n \n def subcircuitToInterior(self):\n (initialRotationCircuit,initialCNOTCircuit,middleGate,finalCNOTCircuit,finalRotationCircuit) = self.splitSubcircuit()\n circuit = Circuit([])\n middleQubit = middleGate.qubits\n \n initialCNOTQubits = initialCNOTCircuit.involvedQubits()\n initialRotatedQubits = [x for x in initialRotationCircuit.involvedQubits() if x!= middleQubit]\n #initialRotatedQubitsNoMiddle = [x for x in initialRotatedQubits if x != middleQubit]\n initialZQubits = [x for x in initialCNOTQubits if not x in initialRotatedQubits and not x == middleQubit]\n if isinstance(initialZQubits,int):\n initialZQubits = [initialZQubits]\n if initialZQubits:\n initialLastZQubit = initialZQubits[-1]\n if initialRotatedQubits:\n initialLastRotatedQubit = initialRotatedQubits[-1]\n if len(initialRotationCircuit.listGates) == 0:\n initialTargetRotation = 'rz'\n else:\n for gate in initialRotationCircuit.listGates:\n if gate.qubits == middleQubit:\n initialTargetRotation = gate.type\n break\n initialTargetRotation = 'rz'\n \n \n \n \n \n \n # '''if initialRotatedQubits:\n # for gate in initialRotationCircuit.listGates:\n # if gate.qubits == middleQubit:\n # initialTargetRotation = gate.type\n # break\n # initialTargetRotation = 'rz'\n # else:\n # if initialRotationCircuit.numGates() != 0:\n \n \n # initialTargetRotation = 'rz'''\n \n finalCNOTQubits = finalCNOTCircuit.involvedQubits()\n finalRotatedQubits = [x for x in finalRotationCircuit.involvedQubits() if x!= middleQubit]\n finalZQubits = [x for x in finalCNOTQubits if not x in finalRotatedQubits and not x == middleQubit]\n if isinstance(finalZQubits,int):\n initialZQubits = [finalZQubits]\n if finalZQubits:\n finalFirstZQubit = finalZQubits[0] #-1?\n if finalRotatedQubits:\n finalFirstRotatedQubit = finalRotatedQubits[0]\n if finalRotationCircuit.numGates() == 0:\n finalTargetRotation = 'rz'\n else:\n for gate in finalRotationCircuit.listGates:\n if gate.qubits == middleQubit:\n finalTargetRotation = gate.type\n break\n finalTargetRotation = 'rz'\n \n \n \n if initialZQubits: \n circuit.addRight(cnotChainGates(initialZQubits))\n if initialTargetRotation == 'h':\n circuit.addRight(Gate('cz',[initialLastZQubit,middleQubit]))\n else:\n circuit.addRight(Gate('cnot',[initialLastZQubit,middleQubit]))\n if initialRotatedQubits:\n circuit.addRight(initialRotationCircuit)\n circuit.addRight(cnotChainGates(initialRotatedQubits))\n # if initialTargetRotation == 'rz':\n circuit.addRight(Gate('cnot',[initialLastRotatedQubit,middleQubit]))\n circuit.addRight(middleGate)\n if finalRotatedQubits:\n #if finalTargetRotation == 'rz':\n circuit.addRight(Gate('cnot',[finalFirstRotatedQubit,middleQubit]))\n circuit.addRight(cnotChainGates(finalRotatedQubits,1))\n circuit.addRight(finalRotationCircuit)\n if finalZQubits:\n if finalTargetRotation == 'h':\n circuit.addRight(Gate('cz',[finalFirstZQubit,middleQubit]))\n else:\n circuit.addRight(Gate('cnot',[finalFirstZQubit,middleQubit]))\n circuit.addRight(cnotChainGates(finalZQubits,1))\n \n return circuit #finally\n \n def circuitToInterior(self):\n newCircuit = Circuit([])\n numSubcircuits = len(self.subcircuits)\n newSubcircuits = []\n for i in range(numSubcircuits):\n thisSubcircuit = self.getSubcircuitByIndex(i)\n newSubcircuit = thisSubcircuit.subcircuitToInterior()\n newCircuit.addRight(newSubcircuit)\n newSubcircuits.append(newCircuit.numGates()-1)\n newCircuit.update()\n return newCircuit\n\n \n \ndef changeBasisGates(listBasisChanges):\n '''return a circuit which implements basis changes, assuming qubits are currently in standard computational (ie pauli Z) basis)\n listBasisChanges: list of tuples, each tuple is (index of qubit, desired basis 1=x, 2=y, 3=z).\n nb list should be in qubit index ascending order\n nb as ops are self-inverse this will also return qubits to computational basis'''\n \n \n \n circuit = Circuit([])\n ''' if len(listBasisChanges) == 1:\n qubitIndex = listBasisChanges[0][0]\n whichBasis = listBasisChanges[0][1]\n if whichBasis == 1:\n gate = Gate('h',qubitIndex)\n circuit.addRight(gate)\n elif whichBasis == 2:\n gate = Gate('rx',qubitIndex,-0.25)\n circuit.addRight(gate)\n return circuit\n '''\n for qubitIndex, whichBasis in listBasisChanges:\n if whichBasis == 1:\n gate = Gate('h',qubitIndex)\n circuit.addRight(gate)\n elif whichBasis == 2:\n #gate = Gate('rx',qubitIndex,-0.25)\n gate = Gate('hyinv',qubitIndex)\n circuit.addRight(gate)\n return circuit\n\ndef cnotChainGates(indices,doReverse=0):\n '''take a list of qubit indices, return a circuit applying sequential CNOTS between each pair\n i.e. [1,2,3] -> CNOT(1,2) CNOT(2,3)'''\n circuit = Circuit([])\n if len(indices) < 2:\n return circuit\n if not doReverse:\n for i in range(len(indices)-1):\n gate = Gate('cnot',[indices[i],indices[i+1]])\n circuit.addRight(gate)\n else:\n for i in range(len(indices)-1):\n gate = Gate('cnot',[indices[i+1],indices[i]])\n circuit.addRight(gate)\n \n \n return circuit\n\ndef cnotChainGatesAncilla(indices,ancillaIndex):\n '''take a list of qubit indices, return a circuit applying sequential CNOTS between each pair\n i.e. [1,2,3] -> CNOT(1,2) CNOT(2,3)'''\n \n circuit = Circuit([])\n if len(indices) < 2:\n return circuit\n for i in range(len(indices)):\n # print([indices[i],ancillaIndex])\n gate = Gate('cnot',[indices[i],ancillaIndex])\n circuit.addRight(gate)\n \n return circuit \n \n \ndef pauliStringToCircuit(op):\n '''take op term as [coefficient, [list of paulis]], return circuit'''\n coefficient = op[0]\n pauliString = op[1]\n numQubits = len(pauliString)\n circuit = Circuit([])\n try:\n if list(pauliString) == [0]*numQubits: #identity is a special case, apply \"global\" phase\n identity = Gate('i',0,coefficient/(2 * numpy.pi))\n circuit.addRight(identity)\n return circuit\n else:\n #get (qubit,pauli) pairs for identity paulis\n nonIdentityPaulis = [(index,value) for (index,value) in enumerate(reversed(pauliString)) if value!= 0]\n sortedNonIdentityPaulis = sorted(nonIdentityPaulis,key=lambda thing:thing[0]) #sort paulis in increasing qubit order\n \n basisChangeCircuit = changeBasisGates(sortedNonIdentityPaulis)\n involvedQubitIndices = [thing[0] for thing in sortedNonIdentityPaulis]\n cnotsCircuit = cnotChainGates(involvedQubitIndices)\n leftCircuit = basisChangeCircuit.addRight(cnotsCircuit)\n rightCircuit = leftCircuit.getInverse()\n rightCircuit.flipHYDirection()\n lastPauliIndex = sortedNonIdentityPaulis[-1][0]\n middleGate = Gate('rz',lastPauliIndex,(coefficient/numpy.pi))\n leftCircuit.addRight(middleGate)\n finalCircuit = leftCircuit.addRight(rightCircuit)\n except:\n print(op)\n \n # for thisIndex, thisPauli in enumerate(reversed(pauliString)):\n # if thisPauli == 1:\n # gate = Gate('rx',1/2)\n return finalCircuit\n\ndef oplistToCircuit(oplist):\n circuit = Circuit([])\n for op in oplist:\n termCircuit = pauliStringToCircuit(op)\n circuit.addRight(termCircuit)\n circuit.markSubcircuit()\n circuit.update()\n return circuit\n\ndef pauliStringToInteriorCircuit(op):\n '''take op term as [coefficient, [list of paulis]], return circuit'''\n coefficient = op[0]\n pauliString = op[1]\n numQubits = len(pauliString)\n leftCircuit = Circuit([])\n \n if list(pauliString) == [0]*numQubits: #identity is a special case, apply \"global\" phase\n identity = Gate('i',0,coefficient/(2 * numpy.pi))\n leftCircuit.addRight(identity)\n return leftCircuit\n else:\n nonIdentityPaulis = [(index,value) for (index,value) in enumerate(reversed(pauliString)) if value!= 0]\n sortedNonIdentityPaulis = sorted(nonIdentityPaulis,key=lambda thing:thing[0]) #sort paulis in increasing qubit order\n lastPauliIndex,lastPauliType = sortedNonIdentityPaulis[-1]\n #get list of X, Y and Z terms\n xQubits = [qubit for (qubit, pauli) in sortedNonIdentityPaulis if pauli== 1]\n yQubits = [qubit for (qubit, pauli) in sortedNonIdentityPaulis if pauli== 2]\n zQubits = [qubit for (qubit, pauli) in sortedNonIdentityPaulis if pauli== 3]\n xAndYQubits = [qubit for (qubit, pauli) in sortedNonIdentityPaulis if pauli in [1,2]]\n #build the circuit components\n zCNOTCircuit = cnotChainGates(zQubits)\n leftCircuit.addRight(zCNOTCircuit)\n if lastPauliType != 3 and zQubits != [] :\n controlledZ = Gate('cz',[max(zQubits),lastPauliIndex])\n leftCircuit.addRight(controlledZ)\n \n basisChangeCircuit = changeBasisGates(sortedNonIdentityPaulis)\n leftCircuit.addRight(basisChangeCircuit)\n xyCNOTCircuit = cnotChainGates(xAndYQubits)\n leftCircuit.addRight(xyCNOTCircuit)\n if lastPauliType == 3 and xAndYQubits != [] :\n lastCNOT = Gate('cnot', [max(xAndYQubits), lastPauliIndex])\n leftCircuit.addRight(lastCNOT)\n \n rightCircuit = leftCircuit.getInverse()\n rightCircuit.flipHYDirection()\n middleGate = Gate('rz',lastPauliIndex,(coefficient/numpy.pi))\n leftCircuit.addRight(middleGate)\n finalCircuit = leftCircuit.addRight(rightCircuit)\n \n return finalCircuit\n\ndef OBSOLETE_pauliStringToAncillaCircuit(op):\n '''take op term as [coefficient, [list of paulis]], return circuit'''\n coefficient = op[0]\n pauliString = op[1]\n numQubits = len(pauliString)\n ancillaIndex = numQubits\n leftCircuit = Circuit([])\n \n if pauliString == [0]*numQubits: #identity is a special case, apply \"global\" phase\n identity = Gate('i',0,coefficient/(2 * numpy.pi))\n leftCircuit.addRight(identity)\n return leftCircuit\n else:\n nonIdentityPaulis = [(index,value) for (index,value) in enumerate(reversed(pauliString)) if value!= 0]\n sortedNonIdentityPaulis = sorted(nonIdentityPaulis,key=lambda thing:thing[0]) #sort paulis in increasing qubit order\n lastPauliIndex,lastPauliType = sortedNonIdentityPaulis[-1]\n #get list of X, Y and Z terms\n xQubits = [qubit for (qubit, pauli) in sortedNonIdentityPaulis if pauli== 1]\n yQubits = [qubit for (qubit, pauli) in sortedNonIdentityPaulis if pauli== 2]\n zQubits = [qubit for (qubit, pauli) in sortedNonIdentityPaulis if pauli== 3]\n xAndYQubits = [qubit for (qubit, pauli) in sortedNonIdentityPaulis if pauli in [1,2]]\n #build the circuit components\n if lastPauliType == 3 and zQubits != []:\n zQubitsReduced = [qubit for qubit in zQubits if qubit != lastPauliIndex]\n if zQubitsReduced != []:\n zCNOTCircuit = cnotChainGatesAncilla(zQubitsReduced,ancillaIndex)\n leftCircuit.addRight(zCNOTCircuit)\n lastCNOT = Gate('cnot',[ancillaIndex,lastPauliIndex])\n leftCircuit.addRight(lastCNOT)\n elif lastPauliType != 3 and zQubits != []:\n zCNOTCircuit = cnotChainGatesAncilla(zQubits,ancillaIndex)\n leftCircuit.addRight(zCNOTCircuit)\n controlledZ = Gate('cz',[max(zQubits),lastPauliIndex])\n leftCircuit.addRight(controlledZ)\n \n basisChangeCircuit = changeBasisGates(sortedNonIdentityPaulis)\n leftCircuit.addRight(basisChangeCircuit)\n xyCNOTCircuit = cnotChainGates(xAndYQubits)\n leftCircuit.addRight(xyCNOTCircuit)\n if lastPauliType == 3 and xAndYQubits != [] :\n lastCNOT = Gate('cnot', [max(xAndYQubits), lastPauliIndex])\n leftCircuit.addRight(lastCNOT)\n \n rightCircuit = leftCircuit.getInverse()\n middleGate = Gate('rz',lastPauliIndex,(coefficient/numpy.pi))\n leftCircuit.addRight(middleGate)\n finalCircuit = leftCircuit.addRight(rightCircuit)\n \n return leftCircuit\ndef oplistToInteriorCircuit(oplist):\n circuit = Circuit([])\n for op in oplist:\n termCircuit = pauliStringToInteriorCircuit(op)\n circuit.addRight(termCircuit)\n circuit.markSubcircuit()\n circuit.update()\n return circuit\n \ndef OBSOLETE_oplistToAncillaCircuit(oplist):\n circuit = Circuit([])\n for op in oplist:\n termCircuit = pauliStringToAncillaCircuit(op)\n circuit.addRight(termCircuit)\n circuit.markSubcircuit()\n circuit.update()\n return circuit\n\n\ndef cnotChainAncilla(indices,ancillaIndex):\n '''take a list of qubit indices, return a circuit applying sequential CNOTS between each pair\n i.e. [1,2,3] -> CNOT(1,2) CNOT(2,3)'''\n circuit = Circuit([])\n if len(indices) < 2:\n return circuit\n for i in range(len(indices)-1):\n gate = Gate('cnot',[indices[i],ancillaIndex])\n circuit.addRight(gate)\n \n \n return circuit\n\ndef pauliStringToAncillaCircuit(op,ancillaIndex=-1):\n '''take op term as [coefficient, [list of paulis]], return circuit'''\n coefficient = op[0]\n pauliString = op[1]\n numQubits = len(pauliString)\n circuit = Circuit([])\n if ancillaIndex == -1:\n ancillaIndex = numQubits\n if list(pauliString) == [0]*numQubits: #identity is a special case, apply \"global\" phase\n identity = Gate('i',0,coefficient/(2 * numpy.pi))\n circuit.addRight(identity)\n return circuit\n else:\n #get (qubit,pauli) pairs for identity paulis\n nonIdentityPaulis = [(index,value) for (index,value) in enumerate(reversed(pauliString)) if value!= 0]\n sortedNonIdentityPaulis = sorted(nonIdentityPaulis,key=lambda thing:thing[0]) #sort paulis in increasing qubit order\n lastPauliIndex = sortedNonIdentityPaulis[-1][0]\n leftCircuit = changeBasisGates(sortedNonIdentityPaulis)\n involvedQubitIndices = [thing[0] for thing in sortedNonIdentityPaulis]\n leftCircuit.addRight(cnotChainAncilla(involvedQubitIndices,ancillaIndex))\n if len(involvedQubitIndices) > 1:\n leftCircuit.addRight(Gate('cnot',[ancillaIndex,lastPauliIndex]))\n rightCircuit = leftCircuit.getInverse()\n rightCircuit.flipHYDirection()\n middleGate = Gate('rz',lastPauliIndex,(coefficient/numpy.pi))\n leftCircuit.addRight(middleGate)\n finalCircuit = leftCircuit.addRight(rightCircuit)\n \n \n # for thisIndex, thisPauli in enumerate(reversed(pauliString)):\n # if thisPauli == 1:\n # gate = Gate('rx',1/2)\n return finalCircuit\n \ndef oplistToAncillaCircuit(oplist,ancillaInd=-1):\n circuit = Circuit([])\n for op in oplist:\n termCircuit = pauliStringToAncillaCircuit(op,ancillaInd)\n circuit.addRight(termCircuit)\n circuit.markSubcircuit()\n circuit.update()\n return circuit\n\n\n\n \n \n \n " ]
[ [ "numpy.full", "numpy.sin", "numpy.angle", "numpy.exp", "numpy.cos" ] ]
gautamkmr/caffe2
[ "cde7f21d1e34ec714bc08dbfab945a1ad30e92ff" ]
[ "caffe2/python/test_util.py" ]
[ "## @package test_util\n# Module caffe2.python.test_util\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport numpy as np\nfrom caffe2.python import workspace\n\nimport unittest\n\n\ndef rand_array(*dims):\n # np.random.rand() returns float instead of 0-dim array, that's why need to\n # do some tricks\n return np.array(np.random.rand(*dims) - 0.5).astype(np.float32)\n\n\nclass TestCase(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n workspace.GlobalInit([\n 'caffe2',\n '--caffe2_log_level=0',\n ])\n\n def setUp(self):\n self.ws = workspace.C.Workspace()\n workspace.ResetWorkspace()\n\n def tearDown(self):\n workspace.ResetWorkspace()\n" ]
[ [ "numpy.random.rand" ] ]
hotttao/gostock
[ "3aac0d9cd08d32329fde9d17109945299ed8c443" ]
[ "spider/db_tools/mysql_db.py" ]
[ "import os\nimport pandas\nfrom sqlalchemy import create_engine\n\nMYSQL_USER = os.getenv(\"MYSQL_USER\")\nMYSQL_PASSWORD = os.getenv(\"MYSQL_PASSWORD\")\nMYSQL_HOST = os.getenv(\"MYSQL_HOST\")\nMYSQL_PORT = os.getenv(\"MYSQL_PORT\")\nMYSQL_DEFAULT_DB = os.getenv(\"MYSQL_DEFAULT_DB\")\nMYSQL_DSN = os.getenv(\"MYSQL_DSN\")\n\nMYSQL_DSN_FORMAT = \"mysql+pymysql://{USER}:{PASSWORD}@{HOST}:{PORT}/{DEFAULT_DB}\"\nSQL_SCHEMA = 'ALTER TABLE {table} MODIFY id INT NOT NULL PRIMARY KEY;'\n\nclass MySQLDB:\n def __init__(self, user=MYSQL_USER, password=MYSQL_PASSWORD,\n host=MYSQL_HOST, port=MYSQL_PORT,\n default_db=MYSQL_DEFAULT_DB, dsn=MYSQL_DSN):\n self.user = user\n self.password = password\n self.host = host\n self.port = port\n self.dsn = dsn or \\\n MYSQL_DSN_FORMAT.format(USER=user, PASSWORD=password,\n HOST=host, PORT=port,\n DEFAULT_DB=default_db)\n print(dsn)\n self.engine = create_engine(self.dsn)\n\n def query(self, sql):\n df = pandas.read_sql(sql, self.engine)\n return df\n\n def insert(self, df, table):\n df.to_sql(table, self.engine, if_exists=\"append\",index=False)\n\n def alter_table(self, table):\n with self.engine.connect() as conn:\n sql_schema = SQL_SCHEMA.format(table=table)\n conn.execute(sql_schema)\n\n" ]
[ [ "pandas.read_sql" ] ]
YusukeNagasaka/Batched-SpMM
[ "bb7d1989bbf57fc3a22dfa1483749c4c6a1acad3" ]
[ "batched_call.py" ]
[ "import tensorflow as tf\nfrom tensorflow.python.framework import ops\n\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_sparse_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import sparse_ops\n\nclass BatchedSpMM:\n\tdef __init__(self):\n\t\tself.b_module = tf.load_op_library('./batched.so')\n\n\tdef call(self, sp_matrices, dense_matrices, adjoint_a=False, adjoint_b=False):\n\t\tsp_indices = [sp_m.indices for sp_m in sp_matrices]\n\t\tsp_values = [sp_m.values for sp_m in sp_matrices]\n\t\tsp_shape = [sp_m.dense_shape for sp_m in sp_matrices]\n\t\treturn self.b_module.bspmm(sp_ids = sp_indices, sp_values = sp_values, sp_shape = sp_shape, rhs = dense_matrices, adjoint_a = adjoint_a, adjoint_b = adjoint_b)\n\nclass BatchedSpMDT:\n\tdef __init__(self):\n\t\tself.b_module = tf.load_op_library('./batched.so')\n\n\tdef call(self, sp_matrices, dense_matrices, adjoint_a=False, adjoint_b=False):\n\t\tsp_indices = [sp_m.indices for sp_m in sp_matrices]\n\t\tsp_values = [sp_m.values for sp_m in sp_matrices]\n\t\tsp_shape = [sp_m.dense_shape for sp_m in sp_matrices]\n\n\t\treturn self.b_module.bspmdt(sp_ids = sp_indices, sp_values = sp_values, sp_shape = sp_shape, rhs = dense_matrices, adjoint_a = adjoint_a, adjoint_b = adjoint_b)\n\nb_module = tf.load_op_library('./batched.so')\n\[email protected](\"Bspmdt\")\ndef _bspmdt_grad(op, *grad):\n\n \"\"\"Gradients for the dense tensors in the SparseTensorDenseMatMul ops.\n Args:\n op: the Bspmdt ops\n grads: the incoming gradients\n\n Returns:\n Gradients for each of the 4 input tensors:\n (sparse_indices, sparse_values, sparse_shape, dense_tensor)\n The gradients for indices and shape are None.\n\n \"\"\"\n numTensors = (len(op.inputs) - 1) // 3\n \n a_indices = op.inputs[0:numTensors]\n a_values = op.inputs[numTensors:numTensors*2]\n a_shape = op.inputs[numTensors*2:numTensors*3]\n b = op.inputs[numTensors*3]\n adj_a = op.get_attr(\"adjoint_a\")\n adj_b = op.get_attr(\"adjoint_b\")\n \n # gradient w.r.t. dense\n a_values_grads = []\n b_list = [b[i] for i in range(numTensors)]\n \n b_grads = b_module.bspmm(a_indices, a_values, a_shape, grad, adjoint_a=True, adjoint_b=False)\n\n bg_row=tf.shape(b_grads[0])[0]\n bg_col=tf.shape(b_grads[0])[1]\n b_grads = tf.reshape(b_grads, (numTensors * bg_row, bg_col))\n\n if adj_b:\n b_grads = [array_ops.transpose(b_g) for b_g in b_grads]\n \n for t in range(numTensors):\n rows = a_indices[t][:, 0]\n cols = a_indices[t][:, 1]\n parts_a = array_ops.gather(grad[t], rows if not adj_a else cols)\n parts_b = array_ops.gather(b_list[t] if not adj_b else array_ops.transpose(b_list[t]), cols if not adj_a else rows)\n a_values_grads.append(math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1))\n\t\n return_val = [None for _ in range(numTensors)] + a_values_grads + [None for _ in range(numTensors)] + [b_grads]\n return tuple(return_val)\n \n" ]
[ [ "tensorflow.shape", "tensorflow.python.ops.array_ops.gather", "tensorflow.load_op_library", "tensorflow.reshape", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.ops.array_ops.transpose" ] ]
brendaneross/AlgoTrader2A
[ "cac938b78f8602f3aa7623b6626f54e6f3734b02" ]
[ "main.py" ]
[ "import numpy as np\r\nimport pandas as pd\r\nimport requests\r\nimport math\r\nfrom scipy import stats\r\nimport xlsxwriter\r\nfrom secrets import IEX_CLOUD_API_TOKEN\r\nfrom tabulate import tabulate\r\n\r\n\r\ndef get_stock_data(symbol):\r\n sandbox_api_url = f'https://sandbox.iexapis.com/stable/stock/{symbol}/stats/year1ChangePercent?token={IEX_CLOUD_API_TOKEN}'\r\n # print(sandbox_api_url)\r\n data = requests.get(sandbox_api_url).json()\r\n return data\r\n\r\n\r\ndef portfolio_input():\r\n global portfolio_size\r\n portfolio_size = input('Enter the value of your portfolio:')\r\n try:\r\n float(portfolio_size)\r\n except ValueError:\r\n print('That is not a number!\\nPlease try again.')\r\n portfolio_size = input('Enter the value of your portfolio:')\r\n\r\n\r\n# breaking batch to lists of 100 (or less) stock symbols\r\n# https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks\r\ndef chunks(lst, n):\r\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\r\n for i in range(0, len(lst), n):\r\n yield lst[i:i + n]\r\n\r\n\r\ndef main():\r\n stocks = pd.read_csv('sp_500_stocks.csv')\r\n my_columns = ['Ticker', 'Stock Price', 'One-Year Price Return', 'Number of Shares to Buy']\r\n\r\n # preparing batches\r\n stock_groups = list(chunks(stocks['Ticker'], 100))\r\n stock_strings = []\r\n for i in range(0, len(stock_groups)):\r\n stock_strings.append(','.join(stock_groups[i]))\r\n\r\n final_dataframe = pd.DataFrame(columns=my_columns)\r\n\r\n # batches and dataframe loop\r\n for stock_string in stock_strings:\r\n batch_api_call_url = f'https://sandbox.iexapis.com/stable/stock/market/batch?' \\\r\n f'symbols={stock_string}&types=price,stats&token={IEX_CLOUD_API_TOKEN}'\r\n data = requests.get(batch_api_call_url).json()\r\n for stock in stock_string.split(','):\r\n final_dataframe = final_dataframe.append(\r\n pd.Series([\r\n stock,\r\n data[stock]['price'],\r\n data[stock]['stats']['year1ChangePercent'],\r\n 'N/A'\r\n ], index=my_columns),\r\n ignore_index=True\r\n )\r\n\r\n final_dataframe.sort_values('One-Year Price Return', ascending=False, inplace=True)\r\n final_dataframe = final_dataframe[:50]\r\n final_dataframe.reset_index(drop=True, inplace=True)\r\n\r\n # getting the number of shares per security with regard to portfolio size input\r\n portfolio_input()\r\n\r\n position_size = float(portfolio_size)\r\n for i in range(0, len(final_dataframe)):\r\n final_dataframe.loc[i, 'Number of Shares to Buy'] = math.floor(position_size/final_dataframe.loc[i, 'Stock Price'])\r\n\r\n # display dataframe with final calculations\r\n print(tabulate(final_dataframe, headers=my_columns))\r\n\r\n \"\"\"Setting up the XLSX Writer for data export\"\"\"\r\n\r\n writer = pd.ExcelWriter('recommended_trades.xlsx', engine='xlsxwriter')\r\n final_dataframe.to_excel(writer, 'Recommended Trades', index=False)\r\n\r\n \"\"\"Formatting Export File\"\"\"\r\n background_color = '#0a0a23'\r\n font_color = '#ffffff'\r\n\r\n string_format = writer.book.add_format(\r\n {\r\n 'font_color': font_color,\r\n 'bg_color': background_color,\r\n 'border': 1\r\n }\r\n )\r\n\r\n dollar_format = writer.book.add_format(\r\n {\r\n 'num_format': '$0.00',\r\n 'font_color': font_color,\r\n 'bg_color': background_color,\r\n 'border': 1\r\n }\r\n )\r\n\r\n integer_format = writer.book.add_format(\r\n {\r\n 'num_format': '0',\r\n 'font_color': font_color,\r\n 'bg_color': background_color,\r\n 'border': 1\r\n }\r\n )\r\n\r\n decimal_format = writer.book.add_format(\r\n {\r\n 'num_format': '0%',\r\n 'font_color': font_color,\r\n 'bg_color': background_color,\r\n 'border': 1\r\n }\r\n )\r\n \"\"\"This is ugly.... can be a loop instead\"\"\"\r\n \"\"\"\r\n writer.sheets['Recommended Trades'].set_column('A:A', 18, string_format)\r\n writer.sheets['Recommended Trades'].set_column('B:B', 18, string_format)\r\n writer.sheets['Recommended Trades'].set_column('C:C', 18, string_format)\r\n writer.sheets['Recommended Trades'].set_column('D:D', 18, string_format)\r\n \"\"\"\r\n\r\n column_formats = {\r\n 'A': ['Ticker', string_format],\r\n 'B': ['Stock Price', dollar_format],\r\n 'C': ['One-Year Price Return', decimal_format],\r\n 'D': ['Number of Shares to Buy', integer_format]\r\n }\r\n\r\n for column in column_formats.keys():\r\n writer.sheets['Recommended Trades'].set_column(f'{column}:{column}', 18, column_formats[column][1])\r\n writer.sheets['Recommended Trades'].write(f'{column}1', column_formats[column][0], column_formats[column][1])\r\n writer.save()\r\n\r\n\r\n# Press the green button in the gutter to run the script.\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n# TODO: add secrets to gitignore\r\n\r\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "pandas.Series", "pandas.ExcelWriter" ] ]
LcpMarvel/face_recognition_backend
[ "b392e2c392b6f2ff238fc2c0d5b680f31a86c0f5" ]
[ "app/app/service/face_recognition.py" ]
[ "import numpy as np\nimport json\nimport face_recognition\nimport math\n\nfrom .face_interface import FaceInterface, FaceNotFoundException\nfrom ..config import app\nfrom ..model.face import Face\n\nclass FaceRecognition(FaceInterface):\n def encode(self, image_info):\n file = face_recognition.load_image_file(image_info.image_path())\n encodings = face_recognition.face_encodings(\n file,\n num_jitters=app.config['FACE_ENCODING_NUM_JITTERS'],\n model=app.config['FACE_ENCODING_MODEL']\n )\n\n if len(encodings) == 0:\n raise FaceNotFoundException\n\n return encodings[0].tolist()\n\n def detect(self, image_info):\n face_image = face_recognition.load_image_file(image_info.image_path())\n locations = self._face_locations(face_image)\n\n face_locations = list(\n map(lambda location: self._convert_location(location), locations)\n )\n\n face_num =len(face_locations)\n\n return face_num, face_locations\n\n def search(self, image_info):\n face_image = face_recognition.load_image_file(image_info.image_path())\n face_locations = self._face_locations(face_image)\n\n face_encodings = face_recognition.face_encodings(face_image, face_locations)\n\n faces = Face.query.all()\n known_encodings = []\n known_ids = []\n for face in faces:\n known_encodings.append(face.encoding)\n known_ids.append(face.id)\n\n face_array = []\n for index in range(len(face_encodings)):\n face_id = -1\n trust = 0\n face_to_check=face_encodings[index]\n position = face_locations[index]\n matches = face_recognition.compare_faces(\n known_encodings,\n face_to_check,\n tolerance=app.config['FACE_COMPARE_TOLERANCE']\n )\n\n if app.config['FACE_COMPARE_BY_TOLERANCE']:\n if True in matches:\n first_match_index = matches.index(True)\n face_id = known_ids[first_match_index]\n trust = 100\n else:\n face_distances = face_recognition.face_distance(known_encodings, face_to_check)\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n face_id = known_ids[best_match_index]\n trust = self.face_distance_to_conf(face_distances[best_match_index], app.config['FACE_COMPARE_TOLERANCE'])\n\n if face_id > 0:\n matched_face = Face.find(face_id)\n meta_data = json.loads(matched_face.meta_data) if matched_face.meta_data else None\n\n face_result = {\"faceId\": face_id, \"faceMetaData\": meta_data, \"trust\": trust, \"position\": self._convert_location(position)}\n face_array.append(face_result)\n\n return face_array\n\n def _face_locations(self, face_image):\n return face_recognition.face_locations(\n face_image,\n number_of_times_to_upsample=app.config['FACE_LOCATION_NUM_UNSAMPLE']\n )\n\n def _convert_location(self, location):\n [top, right, bottom, left] = location\n\n return {\n 'top': top,\n 'left': left,\n 'width': right - left,\n 'height': bottom - top\n }\n\n def face_distance_to_conf(self, face_distance, face_match_threshold=0.6):\n if face_distance > face_match_threshold:\n range = (1.0 - face_match_threshold)\n linear_val = (1.0 - face_distance) / (range * 2.0)\n return linear_val\n else:\n range = face_match_threshold\n linear_val = 1.0 - (face_distance / (range * 2.0))\n return linear_val + ((1.0 - linear_val) * math.pow((linear_val - 0.5) * 2, 0.2))\n" ]
[ [ "numpy.argmin" ] ]
bl6g6/nanodet_ir
[ "5ce60b9def5d1d86dd69be8def86a7ffccb25e76" ]
[ "nanodet/trainer/task.py" ]
[ "# Copyright 2021 RangiLyu.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport os\nimport warnings\nimport json\nimport torch\nimport logging\nfrom pytorch_lightning import LightningModule\nfrom typing import Any, List, Dict, Tuple, Optional\n\nfrom ..model.arch import build_model\nfrom nanodet.util import mkdir\n\n\nclass TrainingTask(LightningModule):\n \"\"\"\n Pytorch Lightning module of a general training task.\n Including training, evaluating and testing.\n Args:\n cfg: Training configurations\n evaluator: Evaluator for evaluating the model performance.\n \"\"\"\n\n def __init__(self, cfg, evaluator=None):\n super(TrainingTask, self).__init__()\n self.cfg = cfg\n self.model = build_model(cfg.model)\n self.evaluator = evaluator\n self.save_flag = -10\n self.log_style = 'NanoDet' # Log style. Choose between 'NanoDet' or 'Lightning'\n # TODO: use callback to log\n\n def forward(self, x):\n x = self.model(x)\n return x\n\n @torch.no_grad()\n def predict(self, batch, batch_idx=None, dataloader_idx=None):\n preds = self.forward(batch['img'])\n results = self.model.head.post_process(preds, batch)\n return results\n\n def on_train_start(self) -> None:\n self.lr_scheduler.last_epoch = self.current_epoch-1\n\n def training_step(self, batch, batch_idx):\n preds, loss, loss_states = self.model.forward_train(batch)\n\n # log train losses\n if self.log_style == 'Lightning':\n self.log('lr', self.optimizers().param_groups[0]['lr'], on_step=True, on_epoch=False, prog_bar=True)\n for k, v in loss_states.items():\n self.log('Train/'+k, v, on_step=True, on_epoch=True, prog_bar=True, sync_dist=True)\n elif self.log_style == 'NanoDet' and self.global_step % self.cfg.log.interval == 0:\n lr = self.optimizers().param_groups[0]['lr']\n log_msg = 'Train|Epoch{}/{}|Iter{}({})| lr:{:.2e}| '.format(self.current_epoch+1,\n self.cfg.schedule.total_epochs, self.global_step, batch_idx, lr)\n self.scalar_summary('Train_loss/lr', 'Train', lr, self.global_step)\n for l in loss_states:\n log_msg += '{}:{:.4f}| '.format(l, loss_states[l].mean().item())\n self.scalar_summary('Train_loss/' + l, 'Train', loss_states[l].mean().item(), self.global_step)\n self.info(log_msg)\n\n return loss\n\n def training_epoch_end(self, outputs: List[Any]) -> None:\n self.trainer.save_checkpoint(os.path.join(self.cfg.save_dir, 'model_last.ckpt'))\n self.lr_scheduler.step()\n\n def validation_step(self, batch, batch_idx):\n preds, loss, loss_states = self.model.forward_train(batch)\n\n if self.log_style == 'Lightning':\n self.log('Val/loss', loss, on_step=True, on_epoch=False, prog_bar=True, logger=False)\n for k, v in loss_states.items():\n self.log('Val/' + k, v, on_step=False, on_epoch=True, prog_bar=False, sync_dist=True)\n elif self.log_style == 'NanoDet' and batch_idx % self.cfg.log.interval == 0:\n lr = self.optimizers().param_groups[0]['lr']\n log_msg = 'Val|Epoch{}/{}|Iter{}({})| lr:{:.2e}| '.format(self.current_epoch+1,\n self.cfg.schedule.total_epochs, self.global_step, batch_idx, lr)\n for l in loss_states:\n log_msg += '{}:{:.4f}| '.format(l, loss_states[l].mean().item())\n self.info(log_msg)\n\n dets = self.model.head.post_process(preds, batch)\n return dets\n\n def validation_epoch_end(self, validation_step_outputs):\n \"\"\"\n Called at the end of the validation epoch with the outputs of all validation steps.\n Evaluating results and save best model.\n Args:\n validation_step_outputs: A list of val outputs\n\n \"\"\"\n results = {}\n for res in validation_step_outputs:\n results.update(res)\n eval_results = self.evaluator.evaluate(results, self.cfg.save_dir, rank=self.local_rank)\n metric = eval_results[self.cfg.evaluator.save_key]\n # save best model\n if metric > self.save_flag:\n self.save_flag = metric\n best_save_path = os.path.join(self.cfg.save_dir, 'model_best')\n mkdir(self.local_rank, best_save_path)\n self.trainer.save_checkpoint(os.path.join(best_save_path, \"model_best.ckpt\"))\n txt_path = os.path.join(best_save_path, \"eval_results.txt\")\n if self.local_rank < 1:\n with open(txt_path, \"a\") as f:\n f.write(\"Epoch:{}\\n\".format(self.current_epoch+1))\n for k, v in eval_results.items():\n f.write(\"{}: {}\\n\".format(k, v))\n else:\n warnings.warn('Warning! Save_key is not in eval results! Only save model last!')\n if self.log_style == 'Lightning':\n for k, v in eval_results.items():\n self.log('Val_metrics/' + k, v, on_step=False, on_epoch=True, prog_bar=False, sync_dist=True)\n elif self.log_style == 'NanoDet':\n for k, v in eval_results.items():\n self.scalar_summary('Val_metrics/' + k, 'Val', v, self.current_epoch+1)\n\n def test_step(self, batch, batch_idx):\n dets = self.predict(batch, batch_idx)\n return dets\n\n def test_epoch_end(self, test_step_outputs):\n results = {}\n for res in test_step_outputs:\n results.update(res)\n res_json = self.evaluator.results2json(results)\n json_path = os.path.join(self.cfg.save_dir, 'results.json')\n json.dump(res_json, open(json_path, 'w'))\n\n if self.cfg.test_mode == 'val':\n eval_results = self.evaluator.evaluate(results, self.cfg.save_dir, rank=self.local_rank)\n txt_path = os.path.join(self.cfg.save_dir, \"eval_results.txt\")\n with open(txt_path, \"a\") as f:\n for k, v in eval_results.items():\n f.write(\"{}: {}\\n\".format(k, v))\n\n def configure_optimizers(self):\n \"\"\"\n Prepare optimizer and learning-rate scheduler\n to use in optimization.\n\n Returns:\n optimizer\n \"\"\"\n optimizer_cfg = copy.deepcopy(self.cfg.schedule.optimizer)\n name = optimizer_cfg.pop('name')\n build_optimizer = getattr(torch.optim, name)\n print('\\n ********** Optimizer name: {} ! ! ! *********** \\n'.format(name))\n if name == 'Adam' or 'Adadelta':\n optimizer_cfg.pop('momentum')\n optimizer = build_optimizer(params=self.parameters(), **optimizer_cfg)\n\n schedule_cfg = copy.deepcopy(self.cfg.schedule.lr_schedule)\n name = schedule_cfg.pop('name')\n build_scheduler = getattr(torch.optim.lr_scheduler, name)\n self.lr_scheduler = build_scheduler(optimizer=optimizer, **schedule_cfg)\n # lr_scheduler = {'scheduler': self.lr_scheduler,\n # 'interval': 'epoch',\n # 'frequency': 1}\n # return [optimizer], [lr_scheduler]\n\n return optimizer\n\n def optimizer_step(self,\n epoch=None,\n batch_idx=None,\n optimizer=None,\n optimizer_idx=None,\n optimizer_closure=None,\n on_tpu=None,\n using_native_amp=None,\n using_lbfgs=None):\n \"\"\"\n Performs a single optimization step (parameter update).\n Args:\n epoch: Current epoch\n batch_idx: Index of current batch\n optimizer: A PyTorch optimizer\n optimizer_idx: If you used multiple optimizers this indexes into that list.\n optimizer_closure: closure for all optimizers\n on_tpu: true if TPU backward is required\n using_native_amp: True if using native amp\n using_lbfgs: True if the matching optimizer is lbfgs\n \"\"\"\n # warm up lr\n if self.trainer.global_step <= self.cfg.schedule.warmup.steps:\n if self.cfg.schedule.warmup.name == 'constant':\n warmup_lr = self.cfg.schedule.optimizer.lr * self.cfg.schedule.warmup.ratio\n elif self.cfg.schedule.warmup.name == 'linear':\n k = (1 - self.trainer.global_step / self.cfg.schedule.warmup.steps) * (1 - self.cfg.schedule.warmup.ratio)\n warmup_lr = self.cfg.schedule.optimizer.lr * (1 - k)\n elif self.cfg.schedule.warmup.name == 'exp':\n k = self.cfg.schedule.warmup.ratio ** (1 - self.trainer.global_step / self.cfg.schedule.warmup.steps)\n warmup_lr = self.cfg.schedule.optimizer.lr * k\n else:\n raise Exception('Unsupported warm up type!')\n for pg in optimizer.param_groups:\n pg['lr'] = warmup_lr\n\n # update params\n optimizer.step(closure=optimizer_closure)\n optimizer.zero_grad()\n\n def get_progress_bar_dict(self):\n # don't show the version number\n items = super().get_progress_bar_dict()\n items.pop(\"v_num\", None)\n items.pop(\"loss\", None)\n return items\n\n def scalar_summary(self, tag, phase, value, step):\n \"\"\"\n Write Tensorboard scalar summary log.\n Args:\n tag: Name for the tag\n phase: 'Train' or 'Val'\n value: Value to record\n step: Step value to record\n\n \"\"\"\n if self.local_rank < 1:\n self.logger.experiment.add_scalars(tag, {phase: value}, step)\n\n def info(self, string):\n if self.local_rank < 1:\n logging.info(string)\n\n\n\n\n\n\n\n\n" ]
[ [ "torch.no_grad" ] ]
quantmew/okex-py
[ "3e96413cd4e6dd5779ff2c47b8c76be53448783d" ]
[ "okex/v5/public_api.py" ]
[ "import datetime\nfrom typing import Union, Optional, Iterable\n\nfrom .client import Client\nfrom .consts import *\nfrom .utils import enum_to_str, iterable_to_str\nfrom ..exceptions import OkexParamsException\n\nfrom .insttype import InstType\nfrom .ccytype import CcyType\n\nimport pandas as pd\n\nclass PublicAPI(Client):\n\n def __init__(self, api_key, api_secret_key, passphrase, use_server_time=False, test=False, first=False):\n Client.__init__(self, api_key, api_secret_key, passphrase, use_server_time, test, first)\n\n def instruments(self, instType:Union[InstType, str], uly: Optional[str]=None, instId:Optional[str]=None):\n params = {}\n if instType is not None:\n params['instType'] = enum_to_str(instType)\n if uly is not None:\n params['uly'] = uly\n if instId is not None:\n params['instId'] = instId\n data = self._request_with_params(GET, INSTRUMENTS, params)[\"data\"]\n\n df = pd.DataFrame(data)\n df = df.apply(pd.to_numeric, errors='ignore')\n return df\n\n def delivery_exercise_history(self, instType:Union[InstType, str],\n uly: str,\n after: Optional[Union[int, str]]=None,\n before: Optional[Union[int, str]]=None, \n limit: Optional[Union[int, str]]=None):\n pass\n\n def open_interest(self,\n instType:Union[InstType, str],\n uly: Optional[str],\n instId: Optional[str]):\n pass\n\n def funding_rate(self, instId:str):\n pass" ]
[ [ "pandas.DataFrame" ] ]
seyuboglu/meerkat
[ "3c6ee2da8b84c609804ec22ccb1a663360769347" ]
[ "tests/meerkat/ops/test_merge.py" ]
[ "\"\"\"Unittests for Datasets.\"\"\"\nimport os\nfrom typing import Dict\n\nimport numpy as np\nimport pytest\nimport torch\n\nfrom meerkat.columns.abstract import AbstractColumn\nfrom meerkat.columns.image_column import ImageColumn\nfrom meerkat.columns.list_column import ListColumn\nfrom meerkat.columns.numpy_column import NumpyArrayColumn\nfrom meerkat.columns.tensor_column import TensorColumn\nfrom meerkat.datapanel import DataPanel\nfrom meerkat.errors import MergeError\n\nfrom ...testbeds import MockImageColumn\nfrom ..test_datapanel import DataPanelTestBed\n\n\nclass MergeTestBed(DataPanelTestBed):\n DEFAULT_CONFIG = {\n \"lengths\": [\n {\"left\": 12, \"right\": 16},\n {\"left\": 16, \"right\": 16},\n {\"left\": 16, \"right\": 12},\n ],\n \"consolidated\": [True, False],\n }\n\n def __init__(\n self,\n column_configs: Dict[str, AbstractColumn],\n simple: bool = False,\n lengths: int = 16,\n consolidated: int = 16,\n tmpdir: str = None,\n ):\n self.side_to_dp = {}\n if simple:\n # TODO (Sabri): do away with the simple testbed, and replace with the full\n # one after updating support for missing values\n # https://github.com/robustness-gym/meerkat/issues/123\n np.random.seed(1)\n self.side_to_dp[\"left\"] = DataPanel.from_batch(\n {\n \"key\": np.arange(lengths[\"left\"]),\n \"b\": list(np.arange(lengths[\"left\"])),\n \"c\": [[i] for i in np.arange(lengths[\"left\"])],\n \"d\": (torch.arange(lengths[\"left\"]) % 3),\n \"e\": [f\"1_{i}\" for i in np.arange(lengths[\"left\"])],\n }\n ).lz[np.random.permutation(np.arange(lengths[\"left\"]))]\n\n self.side_to_dp[\"right\"] = DataPanel.from_batch(\n {\n \"key\": np.arange(lengths[\"right\"]),\n \"b\": list(np.arange(lengths[\"right\"])),\n \"e\": [f\"1_{i}\" for i in np.arange(lengths[\"right\"])],\n \"f\": (np.arange(lengths[\"right\"]) % 2),\n }\n )\n else:\n for side in [\"left\", \"right\"]:\n side_tmpdir = os.path.join(tmpdir, side)\n os.makedirs(side_tmpdir)\n column_testbeds = self._build_column_testbeds(\n column_configs, length=lengths[side], tmpdir=side_tmpdir\n )\n columns = {\n name: testbed.col for name, testbed in column_testbeds.items()\n }\n dp = DataPanel.from_batch(columns)\n\n dp[\"key\"] = np.arange(len(dp))\n\n if consolidated:\n dp.consolidate()\n\n if side == \"left\":\n np.random.seed(1)\n dp = dp.lz[np.random.permutation(np.arange(len(dp)))]\n self.side_to_dp[side] = dp\n\n\[email protected]\ndef testbed(request, tmpdir):\n config = request.param\n return MergeTestBed(**config, tmpdir=tmpdir)\n\n\nclass TestMerge:\n @MergeTestBed.parametrize(params={\"sort\": [True, False]})\n def test_merge_inner(self, testbed: MergeTestBed, sort):\n dp1, dp2 = (\n testbed.side_to_dp[\"left\"],\n testbed.side_to_dp[\"right\"],\n )\n\n out = dp1.merge(\n dp2,\n on=\"key\",\n how=\"inner\",\n keep_indexes=False,\n suffixes=(\"_1\", \"_2\"),\n sort=sort,\n )\n\n assert isinstance(out, DataPanel)\n assert len(out) == min(len(dp1), len(dp2))\n\n # # check sorted\n if sort:\n assert np.all(np.diff(out[\"key\"]) >= 0)\n\n # assert set(out.columns) == set(expected_columns)\n for name in dp1.columns:\n if name in [\"key\", \"index\"]:\n continue\n\n if isinstance(out[f\"{name}_1\"], ImageColumn):\n assert out[f\"{name}_1\"].__class__ == out[f\"{name}_2\"].__class__\n assert out[f\"{name}_1\"].data.is_equal(\n out[f\"{name}_2\"].data.str.replace(\"right\", \"left\")\n )\n else:\n assert out[f\"{name}_1\"].is_equal(out[f\"{name}_2\"])\n\n @MergeTestBed.parametrize(config={\"simple\": [True]}, params={\"sort\": [True, False]})\n def test_merge_outer(self, testbed, sort):\n dp1, dp2 = (\n testbed.side_to_dp[\"left\"],\n testbed.side_to_dp[\"right\"],\n )\n out = dp1.merge(\n dp2,\n on=\"key\",\n how=\"outer\",\n keep_indexes=False,\n suffixes=(\"_1\", \"_2\"),\n sort=sort,\n )\n\n a1 = set(dp1[\"key\"])\n a2 = set(dp2[\"key\"])\n\n assert isinstance(out, DataPanel)\n assert len(out) == len(a1 | a2)\n\n # check columns\n expected_columns = [\"key\", \"b_1\", \"b_2\", \"c\", \"d\", \"e_1\", \"e_2\", \"f\", \"index\"]\n assert set(out.columns) == set(expected_columns)\n\n # check sorted\n if sort:\n assert np.all(np.diff(out[\"key\"]) >= 0)\n\n # check for `None` at unmatched rows\n mask_both = np.where([val in (a1 & a2) for val in out[\"key\"]])[0]\n mask_1 = np.where([val in (a1 - a2) for val in out[\"key\"]])[0]\n mask_2 = np.where([val in (a2 - a1) for val in out[\"key\"]])[0]\n # check for equality at matched rows\n assert list(out.lz[mask_both][\"b_1\"]) == list(out.lz[mask_both][\"b_2\"])\n # check for `values` at unmatched rows\n assert set(out.lz[mask_1][\"b_1\"]) == a1 - a2\n assert set(out.lz[mask_2][\"b_2\"]) == a2 - a1\n # check for `None` at unmatched rows\n assert list(out.lz[mask_1][\"b_2\"]) == [None] * len(mask_1)\n assert list(out.lz[mask_2][\"b_1\"]) == [None] * len(mask_2)\n\n # check for `values` at unmatched rows\n assert set(out.lz[mask_1][\"e_1\"]) == set([f\"1_{i}\" for i in a1 - a2])\n assert set(out.lz[mask_2][\"e_2\"]) == set([f\"1_{i}\" for i in a2 - a1])\n # check for equality at matched rows\n assert list(out.lz[mask_1][\"e_2\"]) == [None] * len(mask_1)\n assert list(out.lz[mask_2][\"e_1\"]) == [None] * len(mask_2)\n\n @MergeTestBed.parametrize(config={\"simple\": [True]}, params={\"sort\": [True, False]})\n def test_merge_left(self, testbed, sort):\n dp1, dp2 = (\n testbed.side_to_dp[\"left\"],\n testbed.side_to_dp[\"right\"],\n )\n out = dp1.merge(\n dp2,\n on=\"key\",\n how=\"left\",\n keep_indexes=False,\n suffixes=(\"_1\", \"_2\"),\n sort=sort,\n )\n\n a1 = set(dp1[\"key\"])\n a2 = set(dp2[\"key\"])\n\n assert isinstance(out, DataPanel)\n assert len(out) == len(a1)\n\n # check columns\n expected_columns = [\"key\", \"b_1\", \"b_2\", \"c\", \"d\", \"e_1\", \"e_2\", \"index\", \"f\"]\n assert set(out.columns) == set(expected_columns)\n\n # check sorted\n if sort:\n assert np.all(np.diff(out[\"key\"]) >= 0)\n\n # check for `None` at unmatched rows\n mask_both = np.where([val in (a1 & a2) for val in out[\"key\"]])[0]\n mask_1 = np.where([val in (a1 - a2) for val in out[\"key\"]])[0]\n\n # check for equality at matched rows\n assert list(out.lz[mask_both][\"b_1\"]) == list(out.lz[mask_both][\"b_2\"])\n # check for `values` at unmatched rows\n assert set(out.lz[mask_1][\"b_1\"]) == a1 - a2\n # check for `None` at unmatched rows\n assert list(out.lz[mask_1][\"b_2\"]) == [None] * len(mask_1)\n\n # check for `values` at unmatched rows\n assert set(out.lz[mask_1][\"e_1\"]) == set([f\"1_{i}\" for i in a1 - a2])\n # check for equality at matched rows\n assert list(out.lz[mask_1][\"e_2\"]) == [None] * len(mask_1)\n\n @MergeTestBed.parametrize(config={\"simple\": [True]}, params={\"sort\": [True, False]})\n def test_merge_right(self, testbed, sort):\n dp1, dp2 = (\n testbed.side_to_dp[\"left\"],\n testbed.side_to_dp[\"right\"],\n )\n out = dp1.merge(\n dp2,\n on=\"key\",\n how=\"right\",\n keep_indexes=False,\n suffixes=(\"_1\", \"_2\"),\n sort=sort,\n )\n\n a1 = set(dp1[\"key\"])\n a2 = set(dp2[\"key\"])\n\n assert isinstance(out, DataPanel)\n assert len(out) == len(a2)\n\n # check columns\n expected_columns = [\"key\", \"b_1\", \"b_2\", \"c\", \"d\", \"e_1\", \"e_2\", \"f\", \"index\"]\n assert set(out.columns) == set(expected_columns)\n\n # check sorted\n if sort:\n assert np.all(np.diff(out[\"key\"]) >= 0)\n\n # check for `None` at unmatched rows\n mask_both = np.where([val in (a1 & a2) for val in out[\"key\"]])[0]\n mask_2 = np.where([val in (a2 - a1) for val in out[\"key\"]])[0]\n # check for equality at matched rows\n assert list(out.lz[mask_both][\"b_1\"]) == list(out.lz[mask_both][\"b_2\"])\n # check for `values` at unmatched rows\n assert set(out.lz[mask_2][\"b_2\"]) == a2 - a1\n # check for `None` at unmatched rows\n assert list(out.lz[mask_2][\"b_1\"]) == [None] * len(mask_2)\n\n # check for `values` at unmatched rows\n assert set(out.lz[mask_2][\"e_2\"]) == set([f\"1_{i}\" for i in a2 - a1])\n # check for equality at matched rows\n assert list(out.lz[mask_2][\"e_1\"]) == [None] * len(mask_2)\n\n def test_merge_output_column_types(self):\n dp1 = DataPanel.from_batch(\n {\"a\": np.arange(3), \"b\": ListColumn([\"1\", \"2\", \"3\"])}\n )\n dp2 = dp1.copy()\n\n out = dp1.merge(dp2, on=\"b\", how=\"inner\")\n assert isinstance(out[\"b\"], ListColumn)\n\n def test_image_merge(self, tmpdir):\n length = 16\n img_col_test_bed = MockImageColumn(length=length, tmpdir=tmpdir)\n dp1 = DataPanel.from_batch(\n {\n \"a\": np.arange(length),\n \"img\": img_col_test_bed.col,\n }\n )\n rows = np.arange(4, 8)\n dp2 = DataPanel.from_batch(\n {\n \"a\": rows,\n }\n )\n\n out = dp1.merge(dp2, on=\"a\", how=\"inner\")\n assert isinstance(out[\"img\"], ImageColumn)\n assert [str(fp) for fp in out[\"img\"].data] == [\n img_col_test_bed.image_paths[row] for row in rows\n ]\n\n def test_no_on(self):\n length = 16\n # check dictionary not hashable\n dp1 = DataPanel.from_batch(\n {\n \"a\": ListColumn([{\"a\": 1}] * length),\n \"b\": list(np.arange(length)),\n }\n )\n dp2 = dp1.copy()\n with pytest.raises(MergeError):\n dp1.merge(dp2)\n\n def test_check_merge_columns(self):\n length = 16\n # check dictionary not hashable\n dp1 = DataPanel.from_batch(\n {\n \"a\": ListColumn([{\"a\": 1}] * length),\n \"b\": list(np.arange(length)),\n }\n )\n dp2 = dp1.copy()\n with pytest.raises(MergeError):\n dp1.merge(dp2, on=[\"a\"])\n\n # check multi-on\n with pytest.raises(MergeError):\n dp1.merge(dp2, on=[\"a\", \"b\"])\n\n # check multi-dimensional numpy array\n dp1 = DataPanel.from_batch(\n {\n \"a\": NumpyArrayColumn(np.stack([np.arange(5)] * length)),\n \"b\": list(np.arange(length)),\n }\n )\n dp2 = dp1.copy()\n with pytest.raises(MergeError):\n dp1.merge(dp2, on=\"a\")\n\n # check multi-dimensional numpy array\n dp1 = DataPanel.from_batch(\n {\n \"a\": TensorColumn(torch.stack([torch.arange(5)] * length)),\n \"b\": list(np.arange(length)),\n }\n )\n dp2 = dp1.copy()\n with pytest.raises(MergeError):\n dp1.merge(dp2, on=\"a\")\n\n # checks that **all** cells are hashable (not just the first)\n dp1 = DataPanel.from_batch(\n {\n \"a\": ListColumn([\"hello\"] + [{\"a\": 1}] * (length - 1)),\n \"b\": list(np.arange(length)),\n }\n )\n dp2 = dp1.copy()\n with pytest.raises(MergeError):\n dp1.merge(dp2, on=\"a\")\n\n # checks if Cells in cell columns are NOT hashable\n dp1 = DataPanel.from_batch(\n {\n \"a\": ImageColumn.from_filepaths([\"a\"] * length),\n \"b\": list(np.arange(length)),\n }\n )\n dp2 = dp1.copy()\n with pytest.raises(MergeError):\n dp1.merge(dp2, on=\"a\")\n\n # checks that having a column called __right_indices__ raises a merge error\n dp1 = DataPanel.from_batch(\n {\n \"a\": ListColumn([\"hello\"] + [{\"a\": 1}] * (length - 1)),\n \"b\": list(np.arange(length)),\n \"__right_indices__\": list(np.arange(length)),\n }\n )\n dp2 = dp1.copy()\n with pytest.raises(MergeError):\n dp1.merge(dp2, on=\"__right_indices__\")\n" ]
[ [ "torch.arange", "numpy.random.seed", "numpy.diff", "numpy.where", "numpy.arange" ] ]
emikoifish/woltka
[ "e4207bba3550b75bb153c7d094d0b1a577a15bc4" ]
[ "woltka/tests/test_biom.py" ]
[ "#!/usr/bin/env python3\n\n# ----------------------------------------------------------------------------\n# Copyright (c) 2020--, Qiyun Zhu.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# ----------------------------------------------------------------------------\n\nfrom unittest import TestCase, main\nfrom os import remove\nfrom os.path import join, dirname, realpath\nfrom shutil import rmtree\nfrom tempfile import mkdtemp\n\nimport pandas as pd\nfrom biom import load_table\nfrom numpy.testing import assert_array_equal\nfrom pandas.testing import assert_frame_equal\n\nfrom woltka.biom import profile_to_biom, write_biom\n\n\nclass BiomTests(TestCase):\n def setUp(self):\n self.tmpdir = mkdtemp()\n self.datdir = join(dirname(realpath(__file__)), 'data')\n\n def tearDown(self):\n rmtree(self.tmpdir)\n\n def test_profile_to_biom(self):\n # default mode\n profile = {'S1': {'G1': 4, 'G2': 5, 'G3': 8},\n 'S2': {'G1': 2, 'G4': 3, 'G5': 7},\n 'S3': {'G2': 3, 'G5': 5}}\n table = profile_to_biom(profile)\n samples = ['S1', 'S2', 'S3']\n assert_array_equal(table.ids('sample'), samples)\n features = ['G1', 'G2', 'G3', 'G4', 'G5']\n assert_array_equal(table.ids('observation'), features)\n data = [[4, 2, 0], [5, 0, 3], [8, 0, 0], [0, 3, 0], [0, 7, 5]]\n obs = table.to_dataframe(dense=True).astype(int).values\n assert_array_equal(obs, data)\n\n # with sample Ids\n table = profile_to_biom(profile, samples=['S3', 'S1'])\n obs = table.to_dataframe(dense=True).astype(int)\n exp = pd.DataFrame([[0, 4], [3, 5], [0, 8], [0, 0], [5, 0]],\n index=features, columns=['S3', 'S1'])\n assert_frame_equal(obs, exp)\n\n # some sample Ids are not in profile\n table = profile_to_biom(profile, samples=['S3', 'S0', 'S1'])\n obs = table.to_dataframe(dense=True).astype(int)\n assert_frame_equal(obs, exp)\n\n # with taxon names\n namedic = {'G1': 'Actinobacteria',\n 'G2': 'Firmicutes',\n 'G3': 'Bacteroidetes',\n 'G4': 'Cyanobacteria'}\n table = profile_to_biom(profile, namedic=namedic)\n obs = table.to_dataframe(dense=True).astype(int)\n exp = pd.DataFrame(data, features, samples)\n assert_frame_equal(obs, exp)\n obs = table.metadata_to_dataframe('observation')['Name']\n names = ['Actinobacteria', 'Firmicutes', 'Bacteroidetes',\n 'Cyanobacteria', None]\n assert_array_equal(obs, names)\n\n # with taxon names to replace Ids\n table = profile_to_biom(profile, namedic=namedic, name_as_id=True)\n obs = table.to_dataframe(dense=True).astype(int)\n exp = pd.DataFrame(data, names[:4] + ['G5'], samples)\n assert_frame_equal(obs, exp)\n\n # with ranks\n rankdic = {'G1': 'class', 'G2': 'phylum', 'G4': 'phylum'}\n table = profile_to_biom(profile, rankdic=rankdic)\n obs = table.metadata_to_dataframe('observation')['Rank']\n exp = ['class', 'phylum', None, 'phylum', None]\n assert_array_equal(obs, exp)\n\n # with lineages\n tree = {'G1': '74', '74': '72', 'G2': '72', 'G3': '70', 'G4': '72',\n 'G5': '1', '72': '2', '70': '2', '2': '1', '1': '1'}\n table = profile_to_biom(profile, tree=tree)\n obs = table.metadata_to_dataframe('observation')['Lineage']\n exp = ['2;72;74', '2;72', '2;70', '2;72', None]\n assert_array_equal(obs, exp)\n\n # with lineages and names as Ids\n namedic.update({\n '74': 'Actino', '72': 'Terra', '70': 'FCB', '2': 'Bacteria'})\n table = profile_to_biom(\n profile, tree=tree, namedic=namedic, name_as_id=True)\n obs = table.metadata_to_dataframe('observation')['Lineage']\n exp = ['Bacteria;Terra;Actino', 'Bacteria;Terra', 'Bacteria;FCB',\n 'Bacteria;Terra', None]\n assert_array_equal(obs, exp)\n\n # with stratification\n profile = {'S1': {('A', 'G1'): 4,\n ('A', 'G2'): 5,\n ('B', 'G1'): 8},\n 'S2': {('A', 'G1'): 2,\n ('B', 'G1'): 3,\n ('B', 'G2'): 7},\n 'S3': {('B', 'G3'): 3,\n ('C', 'G2'): 5}}\n table = profile_to_biom(profile)\n obs = table.to_dataframe(dense=True).astype(int)\n data = [[4, 2, 0], [5, 0, 0], [8, 3, 0], [0, 7, 0], [0, 0, 3],\n [0, 0, 5]]\n index = ['A|G1', 'A|G2', 'B|G1', 'B|G2', 'B|G3', 'C|G2']\n exp = pd.DataFrame(data, index=index, columns=samples)\n assert_frame_equal(obs, exp)\n\n def test_write_biom(self):\n profile = {'S1': {'G1': 4, 'G2': 5, 'G3': 8},\n 'S2': {'G1': 2, 'G4': 3, 'G5': 7},\n 'S3': {'G2': 3, 'G5': 5}}\n exp = profile_to_biom(profile)\n fp = join(self.tmpdir, 'tmp.biom')\n write_biom(exp, fp)\n obs = load_table(fp)\n self.assertEqual(obs.descriptive_equality(exp), 'Tables appear equal')\n remove(fp)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.DataFrame", "numpy.testing.assert_array_equal", "pandas.testing.assert_frame_equal" ] ]
cabustillo13/Imagenes-microscopicas
[ "ab1a3d749cdc90e27c96a701eb6a7ce6e8e50854" ]
[ "segmentacionHistograma.py" ]
[ " # -*- coding: utf-8 -*-\n \n\"\"\" Segmentar imágenes del microscopio a través del histograma\"\"\"\n\nfrom skimage import io, img_as_ubyte, img_as_float\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom skimage.restoration import denoise_nl_means, estimate_sigma\nfrom scipy import ndimage as nd\n\nimg = io.imread(\"./Imagenes/segmentacion/BSE_25sigma_noisy.jpg\")\n\n# Cambiar el tipo de dato uint8 a float\nfloat_img = img_as_float(img)\n\n# En vez de definir arbitrariamente el sigma, se puede determinar de esta manera\nsigma_est = np.mean(estimate_sigma(float_img, multichannel=True))\n\ndenoise_img = denoise_nl_means(float_img, h=1.15 * sigma_est, fast_mode=False, \n patch_size=5, patch_distance=3, multichannel=True)\n\n# Nuevamente se realiza cambio de dato -> Me sirve simplemente para visualizar la imagen\ndenoise_img_as_8byte = img_as_ubyte(denoise_img)\n\n# Arbitrariamente podemos definir segmentos arbitrarios\nsegm1 = (denoise_img_as_8byte <= 57)\nsegm2 = (denoise_img_as_8byte > 57) & (denoise_img_as_8byte <= 110)\nsegm3 = (denoise_img_as_8byte > 110) & (denoise_img_as_8byte <= 210)\nsegm4 = (denoise_img_as_8byte > 210)\n\n# Para mostrar las imágenes -> Elimina el ruido del tamaño de la imagen, pero en blanco\nall_segments = np.zeros((denoise_img_as_8byte.shape[0], denoise_img_as_8byte.shape[1], 3))\n\nall_segments[segm1] = (1,0,0)\nall_segments[segm2] = (0,1,0)\nall_segments[segm3] = (0,0,1)\nall_segments[segm4] = (1,1,0)\nplt.imshow(all_segments)\nplt.show()\n\n\"\"\" \nMuchos puntos amarillos, puntos rojos y puntos perdidos. ¿cómo limpiar la imagen? \nPodemos utilizar operaciones binarias de opening y closing: \nEl Opening se encarga de los píxeles aislados dentro de la ventana\nEl Closing se encarga de los agujeros aislados dentro de la ventana definida\n\"\"\"\n\nsegm1_opened = nd.binary_opening(segm1, np.ones((3,3)))\nsegm1_closed = nd.binary_closing(segm1_opened, np.ones((3,3)))\n\nsegm2_opened = nd.binary_opening(segm2, np.ones((3,3)))\nsegm2_closed = nd.binary_closing(segm2_opened, np.ones((3,3)))\n\nsegm3_opened = nd.binary_opening(segm3, np.ones((3,3)))\nsegm3_closed = nd.binary_closing(segm3_opened, np.ones((3,3)))\n\nsegm4_opened = nd.binary_opening(segm4, np.ones((3,3)))\nsegm4_closed = nd.binary_closing(segm4_opened, np.ones((3,3)))\n\nall_segments_cleaned = np.zeros((denoise_img_as_8byte.shape[0], denoise_img_as_8byte.shape[1], 3)) #nothing but 714, 901, 3\n\nall_segments_cleaned[segm1_closed] = (1,0,0)\nall_segments_cleaned[segm2_closed] = (0,1,0)\nall_segments_cleaned[segm3_closed] = (0,0,1)\nall_segments_cleaned[segm4_closed] = (1,1,0)\n\nplt.imshow(all_segments_cleaned) # Todo el ruido debería limpiarse ahora\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show", "numpy.ones", "numpy.zeros", "matplotlib.pyplot.imshow" ] ]
thetak11/learning-kis
[ "f1c380f351a050291e092d13093f68f173f881b8" ]
[ "lkis.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\" Learning Koopman Invariant Subspace\n (c) Naoya Takeishi, 2017.\n [email protected]\n\"\"\"\n\nimport numpy\nfrom scipy import linalg\nfrom chainer import link\nfrom chainer import Variable\nfrom chainer import Chain\nfrom chainer import dataset\nfrom chainer import reporter as reporter_module\nfrom chainer import training\nfrom chainer import initializers\nfrom chainer.training import extensions\nimport chainer.functions as F\nimport chainer.links as L\n\n# ==========\n\ndef ls_solution(g0, g1):\n \"\"\" Get least-squares solution matrix for regression from rows of g0\n to rows of g1. Both g0 and g1 are chainer's Variable.\n \"\"\"\n g0t = F.transpose(g0)\n if g0.shape[0] >= g0.shape[1]:\n g0pinv = F.matmul(F.inv(F.matmul(g0t, g0)), g0t)\n else:\n g0pinv = F.matmul(g0t, F.inv(F.matmul(g0, g0t)))\n K = F.transpose(F.matmul(g0pinv, g1))\n return K\n\n# ==========\n\ndef dmd(y0, y1, eps=1e-6):\n \"\"\" Do DMD. Both y0 and y1 are numpy array.\n \"\"\"\n Y0 = y0.T\n Y1 = y1.T\n U, S, Vh, = linalg.svd(Y0, full_matrices=False)\n r = len(numpy.where(S>=eps)[0])\n U = U[:,:r]\n invS = numpy.diag(1./S[:r])\n V = Vh.conj().T[:,:r]\n M = numpy.dot(numpy.dot(Y1, V), invS)\n A_til = numpy.dot(U.conj().T, M)\n lam, z_til, w_til = linalg.eig(A_til, left=True)\n w = numpy.dot(numpy.dot(M, w_til), numpy.diag(1./lam)) + 1j*numpy.zeros(z_til.shape)\n z = numpy.dot(U, z_til) + 1j*numpy.zeros(z_til.shape)\n for i in range(w.shape[1]):\n z[:,i] = z[:,i] / numpy.dot(w[:,i].conj(), z[:,i])\n return lam, w, z\n\n# ==========\n\nclass DelayPairDataset(dataset.DatasetMixin):\n def __init__(self, values, dim_delay, n_lag=1):\n if isinstance(values, list):\n self.values = values\n else:\n self.values = [values,]\n self.lens = tuple(value.shape[0]-(dim_delay-1)*n_lag-1 for value in self.values)\n self.a_s = [0 for i in range(sum(self.lens))]\n for i in range(sum(self.lens)):\n for j in range(len(self.values)):\n if i >= sum(self.lens[0:j]):\n self.a_s[i] = j\n self.dim_delay = dim_delay\n self.n_lag = n_lag\n\n def __len__(self):\n return sum(self.lens)\n\n def get_example(self, i):\n tau = self.n_lag\n k = self.dim_delay\n a = self.a_s[i]\n b = i - sum(self.lens[0:a])\n return (self.values[a][b:b+(k-1)*tau+1:tau], self.values[a][b+1:b+(k-1)*tau+2:tau])\n\n# ==========\n\nclass Embedder(Chain):\n def __init__(self, dimy, delay, dim_emb):\n super(Embedder, self).__init__(l1 = L.Linear(dimy*delay, dim_emb))\n\n def __call__(self, x):\n return self.l1(x)\n\n# ==========\n\nclass Observable(Chain):\n def __init__(self, dim_g, dim_emb):\n n_h = round((dim_g+dim_emb)*0.5)\n super(Observable, self).__init__(\n l1 = L.Linear(dim_emb, n_h),\n p1 = L.PReLU(),\n b1 = L.BatchNormalization(n_h),\n l2 = L.Linear(n_h, dim_g)\n )\n self.add_persistent('dim_g', dim_g)\n\n def __call__(self, x, train=True):\n return self.l2(self.b1(self.p1(self.l1(x)), test=not train))\n\n# ==========\n\nclass Reconstructor(Chain):\n def __init__(self, dim_y, dim_g):\n n_h = round((dim_y+dim_g)*0.5)\n super(Reconstructor, self).__init__(\n l1 = L.Linear(dim_g, n_h),\n p1 = L.PReLU(),\n b1 = L.BatchNormalization(n_h),\n l2 = L.Linear(n_h, dim_y)\n )\n\n def __call__(self, x, train=True):\n # The nonlinearlity of Reconstructor is realized by p1 (PReLU function),\n # so eliminating p1 from calculation makes Reconstructor linear.\n #return self.l2(self.b1(self.l1(x), test=not train))\n return self.l2(self.b1(self.p1(self.l1(x)), test=not train))\n\n# ==========\n\nclass Network(Chain):\n def __init__(self, dim_emb, dim_g, dim_y):\n super(Network, self).__init__(\n b = L.BatchNormalization(dim_emb),\n g = Observable(dim_g, dim_emb),\n h = Reconstructor(dim_y, dim_g)\n )\n\n def __call__(self, y0, y1, phi=None, train=True):\n x0 = self.b(phi(y0), test=not train)\n x1 = self.b(phi(y1), test=not train)\n g0 = self.g(x0, train=train)\n g1 = self.g(x1, train=train)\n h0 = self.h(g0, train=train)\n h1 = self.h(g1, train=train)\n return g0, g1, h0, h1\n\n# ==========\n\nclass Loss(Chain):\n def __init__(self, phi, net, alpha=1.0, decay=0.9):\n super(Loss, self).__init__(\n phi = phi,\n net = net\n )\n self.add_persistent('alpha', alpha)\n self.add_persistent('decay', decay)\n\n def __call__(self, y0, y1, train=True):\n g0, g1, h0, h1 = self.net(y0, y1, phi=self.phi, train=train)\n\n loss1 = F.mean_squared_error(F.linear(g0, ls_solution(g0, g1)), g1)\n loss2 = F.mean_squared_error(h0, F.transpose(y0,axes=(1,0,2))[-1])\n loss3 = F.mean_squared_error(h1, F.transpose(y1,axes=(1,0,2))[-1])\n loss = loss1 + self.alpha*0.5*(loss2+loss3)\n\n reporter_module.report({\n 'loss': loss,\n 'loss_kpm': loss1,\n 'loss_rec': 0.5*(loss2+loss3)\n }, self.net)\n\n return loss\n\n# ==========\n\nclass Updater(training.StandardUpdater):\n def update_core(self):\n batch = self._iterators['main'].next()\n in_arrays = self.converter(batch, self.device)\n in_vars = tuple(Variable(x) for x in in_arrays)\n for optimizer in self._optimizers.values():\n optimizer.update(self.loss_func, *in_vars)\n\n# ==========\n\nclass Evaluator(extensions.Evaluator):\n def __init__(self, iterator, target, converter=dataset.convert.concat_examples,\n device=None, eval_hook=None, eval_func=None, trigger=(1,'epoch')):\n if isinstance(iterator, dataset.iterator.Iterator):\n iterator = {'main': iterator}\n self._iterators = iterator\n\n if isinstance(target, link.Link):\n target = {'main': target}\n self._targets = target\n\n self.converter = converter\n self.device = device\n self.eval_hook = eval_hook\n self.eval_func = eval_func\n self.trigger = trigger\n\n def evaluate(self):\n iterator = self._iterators['main']\n target = self._targets['main']\n eval_func = self.eval_func or target\n\n if self.eval_hook:\n self.eval_hook(self)\n\n if hasattr(iterator, 'reset'):\n iterator.reset()\n it = iterator\n else:\n it = copy.copy(iterator)\n\n summary = reporter_module.DictSummary()\n for batch in it:\n observation = {}\n with reporter_module.report_scope(observation):\n in_arrays = self.converter(batch, self.device)\n in_vars = tuple(Variable(x, volatile='on')\n for x in in_arrays)\n eval_func(*in_vars, train=False)\n summary.add(observation)\n\n return summary.compute_mean()\n" ]
[ [ "numpy.dot", "numpy.zeros", "scipy.linalg.svd", "numpy.where", "scipy.linalg.eig", "numpy.diag" ] ]
MarchRaBBiT/pipelinex
[ "ea8def32a71752b667f9f3522acba3fd79102fe1" ]
[ "src/pipelinex/extras/decorators/pandas_decorators.py" ]
[ "from functools import wraps\nimport pandas as pd\nfrom typing import Callable, List, Union\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\ndef log_df_summary(func: Callable) -> Callable:\n @wraps(func)\n def wrapper(df, *args, **kwargs):\n log = logging.getLogger(__name__)\n if isinstance(df, pd.DataFrame):\n shape_before = df.shape\n cols_before = df.columns.to_list()\n log.info(\n \"{}\".format(\n dict(\n DF_shape_before=\"{}\".format(shape_before),\n func=\"{}\".format(getattr(func, \"__qualname__\", repr(func))),\n )\n )\n )\n df = func(df, *args, **kwargs)\n if isinstance(df, pd.DataFrame):\n shape_after = df.shape\n cols_after = df.columns.to_list()\n cols_added = [col for col in cols_after if col not in cols_before]\n cols_removed = [col for col in cols_before if col not in cols_after]\n log.info(\n \"{}\".format(\n dict(\n DF_shape_after=\"{}\".format(shape_after),\n Columns_added=\"{}\".format(cols_added),\n Columns_removed=\"{}\".format(cols_removed),\n )\n )\n )\n return df\n\n return wrapper\n\n\ndef df_set_index(cols: Union[List[str], str],) -> Callable:\n \"\"\" decorator with arguments \"\"\"\n if not isinstance(cols, list):\n cols = [cols]\n\n def decorator(func: Callable) -> Callable:\n \"\"\" decorator without arguments \"\"\"\n\n @wraps(func)\n def wrapper(df, parameters, *args, **kwargs):\n if isinstance(df, pd.DataFrame):\n for col in cols:\n if col not in df.columns:\n log.warning(\"Could not find column: \".format(col))\n cols_ = [col for col in cols if col in df.columns]\n df.set_index(keys=cols_, inplace=True)\n df = func(df, parameters, *args, **kwargs)\n if isinstance(df, pd.DataFrame):\n df.reset_index(inplace=True)\n return df\n\n return wrapper\n\n return decorator\n\n\ndef total_seconds_to_datetime(\n cols: Union[List[str], str], origin: str = \"1970-01-01\"\n) -> Callable:\n \"\"\" decorator with arguments \"\"\"\n if not isinstance(cols, list):\n cols = [cols]\n\n def decorator(func: Callable) -> Callable:\n \"\"\" decorator without arguments \"\"\"\n\n @wraps(func)\n def wrapper(df, parameters, *args, **kwargs):\n if isinstance(df, pd.DataFrame):\n for col in cols:\n if col not in df.columns:\n log.warning(\"Could not find column: \".format(col))\n cols_ = [col for col in cols if col in df.columns]\n for col in cols_:\n df.loc[:, col] = pd.to_datetime(\n df[col], unit=\"s\", origin=pd.Timestamp(origin)\n )\n df = func(df, parameters, *args, **kwargs)\n if isinstance(df, pd.DataFrame):\n for col in cols_:\n df.loc[:, col] = (df[col] - pd.Timestamp(origin)).dt.total_seconds()\n return df\n\n return wrapper\n\n return decorator\n" ]
[ [ "pandas.Timestamp" ] ]
wavelets/chainer
[ "100773c7b86e699a320e54fe43b182a4158af771" ]
[ "chainer/functions/basic_math.py" ]
[ "import math\nfrom numbers import Number\n\nimport numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer import utils\nfrom chainer.utils import type_check\nfrom chainer import variable\n\n\n# ------------------------------------------------------------------------------\n# Arithmetic\n# ------------------------------------------------------------------------------\n\ndef _convert_value_to_string(value):\n if isinstance(value, variable.Variable):\n value = value.data\n\n if isinstance(value, float):\n return str(value)\n elif isinstance(value, (numpy.ndarray, cuda.GPUArray)):\n return 'constant array'\n else:\n raise ValueError(\n 'value must be float, ndarray, GPUArray, or Variable')\n\n\nclass Neg(function.Function):\n\n @property\n def label(self):\n return '__neg__'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n\n def forward(self, x):\n return utils.force_array(-x[0]),\n\n def backward(self, x, gy):\n return utils.force_array(-gy[0]),\n\n\ndef neg(x): # -x\n return Neg()(x)\n\n\nclass Absolute(function.Function):\n\n @property\n def label(self):\n return '|_|'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n\n def forward(self, x):\n return utils.force_array(abs(x[0])),\n\n def backward_cpu(self, x, gy):\n return utils.force_array(numpy.sign(x[0]) * gy[0]),\n\n def backward_gpu(self, x, gy):\n gx0 = cuda.empty_like(x[0])\n cuda.elementwise(\n 'float* gx0, const float* x0, const float* gy',\n 'gx0[i] = ((x0[i] > 0) - (x0[i] < 0)) * gy[i]',\n 'abs_bwd')(gx0, x[0], gy[0])\n return gx0,\n\n\ndef absolute(x):\n return Absolute()(x)\n\n\nclass Add(function.Function):\n\n @property\n def label(self):\n return '_ + _'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n type_check.expect(\n in_types[0].dtype == in_types[1].dtype,\n in_types[0].shape == in_types[1].shape\n )\n\n def forward(self, x):\n y = utils.force_array(x[0] + x[1])\n return y,\n\n def backward(self, x, gy):\n return gy[0], gy[0]\n\n\nclass AddConstant(function.Function):\n\n def __init__(self, value):\n self.value = value\n\n @property\n def label(self):\n return '_ + %s' % _convert_value_to_string(self.value)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n\n def forward(self, x):\n value = utils.force_type(x[0].dtype, self.value)\n return utils.force_array(x[0] + value),\n\n def backward(self, x, gy):\n return gy[0],\n\n\ndef add(lhs, rhs): # lhs + rhs\n if isinstance(rhs, variable.Variable):\n return Add()(lhs, rhs)\n return AddConstant(rhs)(lhs)\n\n\nclass Sub(function.Function):\n\n @property\n def label(self):\n return '_ - _'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n type_check.expect(\n in_types[0].dtype == in_types[1].dtype,\n in_types[0].shape == in_types[1].shape\n )\n\n def forward(self, x):\n return utils.force_array(x[0] - x[1]),\n\n def backward(self, x, gy):\n return gy[0], utils.force_array(-gy[0])\n\n\ndef sub(lhs, rhs): # lhs - rhs\n if isinstance(rhs, variable.Variable):\n return Sub()(lhs, rhs)\n return AddConstant(-rhs)(lhs)\n\n\nclass SubFromConstant(function.Function):\n\n def __init__(self, value):\n self.value = value\n\n @property\n def label(self):\n return '%s - _' % _convert_value_to_string(self.value)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n\n def forward(self, x):\n value = utils.force_type(x[0].dtype, self.value)\n return utils.force_array(value - x[0]),\n\n def backward(self, x, gy):\n return utils.force_array(-gy[0]),\n\n\ndef rsub(lhs, rhs): # rhs - lhs\n if isinstance(rhs, variable.Variable):\n return Sub()(rhs, lhs)\n return SubFromConstant(rhs)(lhs)\n\n\nclass Mul(function.Function):\n\n @property\n def label(self):\n return '_ * _'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n type_check.expect(\n in_types[0].dtype == numpy.float32,\n in_types[1].dtype == numpy.float32,\n in_types[0].shape == in_types[1].shape\n )\n\n def forward(self, x):\n return utils.force_array(x[0] * x[1]),\n\n def backward_cpu(self, x, gy):\n return utils.force_array(gy[0] * x[1]), utils.force_array(gy[0] * x[0])\n\n def backward_gpu(self, x, gy):\n gx0 = cuda.empty_like(x[0])\n gx1 = cuda.empty_like(x[1])\n cuda.elementwise(\n '''\n float* gx0, float* gx1, const float* x0, const float* x1,\n const float* gy\n ''', '''\n gx0[i] = gy[i] * x1[i];\n gx1[i] = gy[i] * x0[i];\n ''', 'mul_bwd')(gx0, gx1, x[0], x[1], gy[0])\n return gx0, gx1\n\n\nclass MulConstant(function.Function):\n\n def __init__(self, value):\n self.value = value\n\n @property\n def label(self):\n return '_ * %s' % _convert_value_to_string(self.value)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n\n def forward(self, x):\n value = utils.force_type(x[0].dtype, self.value)\n return utils.force_array(value * x[0]),\n\n def backward(self, x, gy):\n value = utils.force_type(gy[0].dtype, self.value)\n return utils.force_array(value * gy[0]),\n\n\ndef mul(lhs, rhs): # lhs * rhs\n if isinstance(rhs, variable.Variable):\n return Mul()(lhs, rhs)\n return MulConstant(rhs)(lhs)\n\n\nclass Div(function.Function):\n\n @property\n def label(self):\n return '_ / _'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n type_check.expect(\n in_types[0].dtype == numpy.float32,\n in_types[1].dtype == numpy.float32,\n in_types[0].shape == in_types[1].shape\n )\n\n def forward(self, x):\n return utils.force_array(x[0] / x[1]),\n\n def backward_cpu(self, x, gy):\n gx0 = utils.force_array(gy[0] / x[1])\n return gx0, utils.force_array(-gx0 * x[0] / x[1])\n\n def backward_gpu(self, x, gy):\n gx0 = cuda.empty_like(x[0])\n gx1 = cuda.empty_like(x[1])\n cuda.elementwise(\n '''\n float* gx0, float* gx1, const float* x0, const float* x1,\n const float* gy\n ''', '''\n gx0[i] = gy[i] / x1[i];\n gx1[i] = -gx0[i] * x0[i] / x1[i];\n ''', 'div_bwd')(gx0, gx1, x[0], x[1], gy[0])\n return gx0, gx1\n\n\ndef div(lhs, rhs): # lhs / rhs\n if isinstance(rhs, variable.Variable):\n return Div()(lhs, rhs)\n return MulConstant(1. / rhs)(lhs)\n\n\nclass DivFromConstant(function.Function):\n\n def __init__(self, value):\n self.value = value\n\n @property\n def label(self):\n return '_ / %s' % _convert_value_to_string(self.value)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n\n def forward(self, x):\n value = utils.force_type(x[0].dtype, self.value)\n return utils.force_array(value / x[0]),\n\n def backward_cpu(self, x, gy):\n value = utils.force_type(gy[0].dtype, self.value)\n return utils.force_array(-value * gy[0] / (x[0] ** 2)),\n\n def backward_gpu(self, x, gy):\n gx = cuda.empty_like(x[0])\n if isinstance(self.value, Number):\n cuda.elementwise(\n '''\n float* gx, const float* x, const float* gy,\n const float value\n ''',\n 'gx[i] = -value * gy[i] / (x[i] * x[i])',\n 'div_from_const_bwd')(gx, x[0], gy[0], self.value)\n else:\n cuda.elementwise(\n '''\n float* gx, const float* x, const float* gy,\n const float* value\n ''',\n 'gx[i] = -value[i] * gy[i] / (x[i] * x[i])',\n 'div_from_const_bwd')(gx, x[0], gy[0], self.value)\n return gx,\n\n\ndef rdiv(lhs, rhs): # rhs / lhs\n if isinstance(rhs, variable.Variable):\n return Div()(rhs, lhs)\n return DivFromConstant(rhs)(lhs)\n\n\nclass PowVarVar(function.Function):\n\n @property\n def label(self):\n return '_ ** _'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n type_check.expect(\n in_types[0].dtype == numpy.float32,\n in_types[1].dtype == numpy.float32,\n in_types[0].shape == in_types[1].shape\n )\n\n def forward_cpu(self, x):\n self.y = utils.force_array(x[0] ** x[1])\n return self.y,\n\n def forward_gpu(self, x):\n return x[0] ** x[1],\n\n def backward_cpu(self, x, gy):\n one = x[1].dtype.type(1)\n gx0 = utils.force_array(x[1] * (x[0] ** (x[1] - one)) * gy[0])\n gx1 = utils.force_array(numpy.log(x[0]) * self.y * gy[0])\n return gx0, gx1\n\n def backward_gpu(self, x, gy):\n gx0 = cuda.empty_like(x[0])\n gx1 = cuda.empty_like(x[1])\n cuda.elementwise(\n '''\n float* gx0, float* gx1, const float* x0, const float* x1,\n const float* gy\n ''', '''\n gx0[i] = x1[i] * powf(x0[i], x1[i] - 1) * gy[i];\n gx1[i] = __logf(x0[i]) * powf(x0[i], x1[i]) * gy[i];\n ''', 'pow_var_var_bwd')(gx0, gx1, x[0], x[1], gy[0])\n return gx0, gx1\n\n\nclass PowVarConst(function.Function):\n\n def __init__(self, value):\n self.value = value\n\n @property\n def label(self):\n return '_ ** %s' % _convert_value_to_string(self.value)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n\n def forward(self, x):\n value = utils.force_type(x[0].dtype, self.value)\n return utils.force_array(x[0] ** value),\n\n def backward_cpu(self, x, gy):\n val_1 = utils.force_type(x[0].dtype, self.value - 1)\n gx = utils.force_type(x[0].dtype, self.value) * (x[0] ** val_1) * gy[0]\n return utils.force_array(gx),\n\n def backward_gpu(self, x, gy):\n gx = cuda.empty_like(x[0])\n if isinstance(self.value, Number):\n cuda.elementwise(\n '''\n float* gx, const float* x, const float* gy,\n const float value\n ''',\n 'gx[i] = value * powf(x[i], value - 1) * gy[i]',\n 'pow_var_const_bwd')(gx, x[0], gy[0], self.value)\n else:\n cuda.elementwise(\n '''\n float* gx, const float* x, const float* gy,\n const float* value\n ''',\n 'gx[i] = value[i] * powf(x[i], value[i] - 1) * gy[i]',\n 'pow_var_const_bwd')(gx, x[0], gy[0], self.value)\n return gx,\n\n\ndef pow(lhs, rhs): # lhs ** rhs\n if isinstance(rhs, variable.Variable):\n return PowVarVar()(lhs, rhs)\n return PowVarConst(rhs)(lhs)\n\n\nclass PowConstVar(function.Function):\n\n def __init__(self, value):\n self.value = value\n\n @property\n def label(self):\n return '%s ** _' % _convert_value_to_string(self.value)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n\n def forward_cpu(self, x):\n value = utils.force_type(x[0].dtype, self.value)\n self.y = utils.force_array(value ** x[0])\n return self.y,\n\n def forward_gpu(self, x):\n y = cuda.empty_like(x[0])\n if isinstance(self.value, Number):\n cuda.elementwise('float* y, const float* x, const float value',\n 'y[i] = powf(value, x[i])',\n 'pow_const_var_fwd')(y, x[0], self.value)\n else:\n cuda.elementwise('float* y, const float* x, const float *value',\n 'y[i] = powf(value[i], x[i])',\n 'pow_const_var_fwd')(y, x[0], self.value)\n return y,\n\n def backward_cpu(self, x, gy):\n value = utils.force_type(gy[0].dtype, self.value)\n return utils.force_array(numpy.log(value) * self.y * gy[0]),\n\n def backward_gpu(self, x, gy):\n gx = cuda.empty_like(x[0])\n if isinstance(self.value, Number):\n logv = math.log(self.value)\n cuda.elementwise(\n '''\n float* gx, const float* x, const float* gy,\n const float value, const float logv\n ''',\n 'gx[i] = logv * powf(value, x[i]) * gy[i]',\n 'pow_const_var_bwd')(gx, x[0], gy[0], self.value, logv)\n else:\n cuda.elementwise(\n '''\n float* gx, const float* x, const float* gy,\n const float* value\n ''',\n 'gx[i] = __logf(value[i]) * powf(value[i], x[i]) * gy[i]',\n 'pow_const_var_bwd')(gx, x[0], gy[0], self.value)\n return gx,\n\n\ndef rpow(lhs, rhs): # rhs ** lhs\n if isinstance(rhs, variable.Variable):\n return PowVarVar()(rhs, lhs)\n return PowConstVar(rhs)(lhs)\n\n\ndef install_variable_arithmetics():\n variable.Variable.__neg__ = neg\n variable.Variable.__abs__ = absolute\n variable.Variable.__add__ = add\n variable.Variable.__radd__ = add\n variable.Variable.__sub__ = sub\n variable.Variable.__rsub__ = rsub\n variable.Variable.__mul__ = mul\n variable.Variable.__rmul__ = mul\n variable.Variable.__div__ = div\n variable.Variable.__truediv__ = div\n variable.Variable.__rdiv__ = rdiv\n variable.Variable.__rtruediv__ = rdiv\n variable.Variable.__pow__ = pow\n variable.Variable.__rpow__ = rpow\n\n# ------------------------------------------------------------------------------\n# Special functions\n# ------------------------------------------------------------------------------\n\n\nclass Exp(function.Function):\n\n @property\n def label(self):\n return 'exp'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n\n def forward_cpu(self, x):\n self.y = utils.force_array(numpy.exp(x[0]))\n return self.y,\n\n def forward_gpu(self, x):\n self.y = cuda.cumath.exp(x[0])\n return self.y,\n\n def backward(self, x, gy):\n return utils.force_array(self.y * gy[0]),\n\n\ndef exp(x):\n \"\"\"Elementwise exponential function.\"\"\"\n return Exp()(x)\n\n\nclass Log(function.Function):\n\n @property\n def label(self):\n return 'log'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n\n def forward_cpu(self, x):\n return utils.force_array(numpy.log(x[0])),\n\n def forward_gpu(self, x):\n return cuda.cumath.log(x[0]),\n\n def backward(self, x, gy):\n return utils.force_array(gy[0] / x[0]),\n\n\ndef log(x):\n \"\"\"Elementwise natural logarithm function.\"\"\"\n return Log()(x)\n\n\nclass Sin(function.Function):\n\n @property\n def label(self):\n return 'sin'\n\n def forward_cpu(self, x):\n self.y = utils.force_array(numpy.sin(x[0]))\n return self.y,\n\n def forward_gpu(self, x):\n y = cuda.cumath.sin(x[0])\n return y,\n\n def backward_cpu(self, x, gy):\n return utils.force_array(numpy.cos(x[0]) * gy[0]),\n\n def backward_gpu(self, x, gy):\n return utils.force_array(cuda.cumath.cos(x[0]) * gy[0]),\n\n\ndef sin(x):\n \"\"\"Elementwise sin function.\"\"\"\n return Sin()(x)\n\n\nclass Cos(function.Function):\n\n @property\n def label(self):\n return 'cos'\n\n def forward_cpu(self, x):\n self.y = utils.force_array(numpy.cos(x[0]))\n return self.y,\n\n def forward_gpu(self, x):\n y = cuda.cumath.cos(x[0])\n return y,\n\n def backward_cpu(self, x, gy):\n return utils.force_array(-numpy.sin(x[0]) * gy[0]),\n\n def backward_gpu(self, x, gy):\n return utils.force_array(-cuda.cumath.sin(x[0]) * gy[0]),\n\n\ndef cos(x):\n \"\"\"Elementwise cos function.\"\"\"\n return Cos()(x)\n" ]
[ [ "numpy.sin", "numpy.log", "numpy.exp", "numpy.sign", "numpy.cos" ] ]
TwentyBN/pytorch2keras
[ "d5ef05261c40cbf14db6aa00055a90f461bb39a7" ]
[ "tests/resnet18_channels_last.py" ]
[ "import numpy as np\nimport torch\nfrom torch.autograd import Variable\nfrom pytorch2keras.converter import pytorch_to_keras\nimport torchvision\n\n\nif __name__ == '__main__':\n max_error = 0\n for i in range(10):\n model = torchvision.models.resnet18()\n for m in model.modules():\n m.training = False\n\n input_np = np.random.uniform(0, 1, (1, 3, 224, 224))\n input_var = Variable(torch.FloatTensor(input_np))\n output = model(input_var)\n\n k_model = pytorch_to_keras(model, input_var, (3, 224, 224,), verbose=True, change_ordering=True)\n\n pytorch_output = output.data.numpy()\n keras_output = k_model.predict(input_np.transpose(0, 2, 3, 1))\n\n print(pytorch_output.shape, keras_output.shape)\n\n error = np.max(pytorch_output - keras_output)\n print(error)\n if max_error < error:\n max_error = error\n\n print('Max error: {0}'.format(max_error))\n" ]
[ [ "numpy.max", "torch.FloatTensor", "numpy.random.uniform" ] ]
DavidAshraf/Logo-Classifier-
[ "0e425ab9b1dd2cff2bc1cd60ebc8235bd6c162b3" ]
[ "web app/image_utils.py" ]
[ "import torch\nimport numpy as np\nresize_size=255\nimage_size=224\nmean= np.array([0.485, 0.456, 0.406])\nstd = np.array([0.229, 0.224, 0.225])\nfrom flask import jsonify\ndef process_image(image):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n # image=Image.open(image_path)\n # TODO: Process a PIL image for use in a PyTorch model\n if image.size[0] > image.size[1]:\n image.thumbnail((10000000,resize_size))\n else:\n image.thumbnail((resize_size,10000000))\n width=image.size[0]\n height=image.size[1]\n left = (width-image_size)/2\n top = (height-image_size)/2\n right = (width+image_size)/2\n bottom = (height+image_size)/2\n image = image.crop((left,top,right,bottom))\n image_np = np.array(image)/255\n mean_array=np.array(mean)\n std_array=np.array(std)\n image_np = (image_np-mean_array)/std_array\n image_np = image_np.transpose((2,0,1))\n return torch.from_numpy(image_np)\n\ndef predict(image, model, topk=5):\n ''' Predict the class (or classes) of an image using a trained deep learning model.\n '''\n \n # TODO: Implement the code to predict the class from an image file\n img = process_image(image)\n img.unsqueeze_(0)\n img=img.float()\n with torch.no_grad():\n model.eval()\n log_ps=model(img)\n ps = torch.exp(log_ps)\n preds,classes = ps.topk(topk,dim=1)\n return preds,classes" ]
[ [ "numpy.array", "torch.exp", "torch.no_grad", "torch.from_numpy" ] ]
rizoic/fluff
[ "5887071d4e5b919438a0746be501ca75e658c31f" ]
[ "fluff/fluffio.py" ]
[ "# Copyright (c) 2012-2013 Simon van Heeringen <[email protected]>\n#\n# This script is free software. You can redistribute it and/or modify it under \n# the terms of the MIT License\n# \nimport os\nimport sys\nimport tempfile\n\nimport numpy as np\nimport pybedtools\n\nfrom fluff.track import Track\n\ndef is_equal_feature(feature, vals):\n if not vals:\n return False\n if feature.chrom != vals[0]:\n return False\n if feature.start != int(vals[1]):\n return False\n if feature.end != int(vals[2]):\n return False\n return True\n\ndef _convert_value(v):\n \"\"\"\n Returns 0 if v is not specified, otherwise try int\n \"\"\"\n if v:\n try:\n v = int(v)\n return v\n except ValueError:\n return v\n return 0\n\ndef load_bed_clusters(bedfile):\n \"\"\"\n Reads a BED file, using the fourth column as cluster number\n Arguments: bedfile - a 4-column BED file\n Returns: a hash with cluster numbers as key, and a list of genomic locations as value\n \"\"\"\n cluster_data = {}\n track = pybedtools.BedTool(bedfile)\n for f in track:\n cluster_data.setdefault(_convert_value(f.score), []).append(\"{0}:{1}-{2}\".format(f.chrom, f.start, f.end))\n return cluster_data\n\ndef load_cluster_data(clust_file, datafiles, bins, rpkm, rmdup, rmrepeats, fragmentsize=None):\n data = {}\n for datafile in datafiles:\n result = []\n track = Track.load(datafile,\n rmdup=rmdup,\n rmrepeats=rmrepeats,\n fragmentsize=fragmentsize)\n result = track.binned_stats(clust_file,\n bins,\n split=True,\n rpkm=rpkm\n )\n data[os.path.basename(datafile)] = dict(\n [[\"{0}:{1}-{2}\".format(vals[0], vals[1], vals[2]), [float(x) for x in vals[3:]]] for vals in result])\n return data\n\ndef load_read_counts(readCounts):\n data = {}\n indexes = {}\n titles = []\n for line in open(readCounts):\n if line.startswith('Regions'):\n idx = 0\n for datafile in line.split('\\t')[1:]:\n if datafile.strip():\n titles.append(datafile.strip())\n data[datafile.strip()] = {}\n indexes[idx] = datafile.strip()\n idx += 1\n else:\n for idx, binsline in enumerate(line.split('\\t')[1:]):\n if binsline.strip():\n data[indexes[idx]][line.split('\\t')[0]] = [float(x) for x in binsline.split(';')]\n return titles, data\n\ndef get_free_track(overlap, start, end, max_end, min_gap):\n first = int(start - min_gap * max_end)\n if first < 0:\n first = 0\n\n for i, track in enumerate(overlap):\n if max(track[start:end]) == 0:\n track[first:int(end + min_gap * max_end)] += 1\n return overlap, i\n\n overlap.append(np.zeros(max_end, dtype=\"i\"))\n overlap[-1][first:int(end + min_gap * max_end)] += 1\n # overlap[-1][start- min_gap * max_end:end + min_gap * max_end] += 1\n return overlap, len(overlap) - 1\n\n\ndef load_annotation(interval, fname, min_gap=0.05, vis=\"stack\"):\n genes = []\n chrom, start, end = interval\n for line in open(fname):\n if not line.startswith(\"#\") and not line.startswith(\"track\"):\n vals = line.strip().split(\"\\t\")\n for i in [1, 2, 6, 7]:\n if len(vals) > i:\n vals[i] = int(vals[i])\n if vals[0] == chrom:\n if vals[1] <= end and vals[2] >= start:\n # sys.stderr.write(\"Adding {0}\\n\".format(vals[3]))\n genes.append(vals)\n if len(genes) == 0:\n return {}\n min_start = min([gene[1] for gene in genes])\n max_end = max([gene[2] for gene in genes])\n overlap = []\n gene_tracks = {}\n for gene in sorted(genes, key=lambda x: x[1]):\n if vis == \"stack\":\n overlap, i = get_free_track(overlap, gene[1] - min_start, gene[2] - min_start, max_end - min_start, min_gap)\n elif vis == \"merge\":\n i = 0\n else:\n sys.stderr.write(\"Unknown visualization\")\n if i in gene_tracks:\n gene_tracks[i].append(gene)\n else:\n gene_tracks[i] = [gene]\n return gene_tracks\n\ndef load_heatmap_data(featurefile, datafile, bins=100, up=5000, down=5000, rmdup=True, rpkm=False, rmrepeats=True,fragmentsize=None, dynam=False, guard=None):\n if guard is None:\n guard = []\n #try mode='w' to make py2 and py3 work\n tmp = tempfile.NamedTemporaryFile(mode='w+', encoding='utf-8', delete=False, prefix=\"fluff\")\n regions = []\n order = {}\n count = 0\n hashcounter = 0\n if not guard and dynam:\n filt = True\n else:\n filt = False\n for i, line in enumerate(open(featurefile)):\n if line.startswith(\"#\") or line[:5] == \"track\":\n hashcounter += 1\n continue\n vals = line.strip().split(\"\\t\")\n strand = \"+\"\n gene = \"\"\n if len(vals) >= 6:\n strand = vals[5]\n if len(vals) >= 4:\n gene = vals[3]\n\n middle = int((int(vals[2]) + int(vals[1])) / 2)\n start, end = middle, middle\n if strand == \"+\":\n start -= up\n end += down\n else:\n start -= down\n end += up\n if filt:\n if start >= 0:\n guard.append(True)\n else:\n guard.append(False)\n if not filt and start >= 0:\n if not dynam or guard[i - hashcounter]:\n regions.append([vals[0], start, end, gene, strand])\n order[\"{0}:{1}-{2}\".format(vals[0], start, end)] = count\n count += 1\n #add encode() to make py3 work\n tmp.write(\"{0}\\t{1}\\t{2}\\t{3}\\t0\\t{4}\\n\".format(vals[0], start, end, gene, strand))\n tmp.flush()\n track = Track.load(datafile,\n rmdup=rmdup,\n rmrepeats=rmrepeats,\n fragmentsize=fragmentsize)\n\n result = track.binned_stats(tmp.name, bins, split=True, rpkm=rpkm)\n # Retrieve original order\n r_data = np.array([[float(x) for x in row[3:]] for row in result])\n return os.path.basename(datafile), regions, r_data, guard # [r_order]\n\n\ndef check_data(featurefile, up=5000, down=5000):\n guard = []\n for line in open(featurefile):\n if line.startswith(\"#\") or line[:5] == \"track\":\n continue\n vals = line.strip().split(\"\\t\")\n strand = \"+\"\n \n if len(vals) >= 6:\n strand = vals[5]\n \n middle = int((int(vals[2]) + int(vals[1])) / 2)\n start, end = middle, middle\n if strand == \"+\":\n start -= up\n end += down\n else:\n start -= down\n end += up\n if start >= 0:\n guard.append(True)\n else:\n guard.append(False)\n return guard\n" ]
[ [ "numpy.zeros" ] ]
alsheabi/Detection_and_classification_of_small_objects
[ "49031b5d662390fb7e9fb6f43f7d829a97d827fd" ]
[ "loss.py" ]
[ "import torch\r\nimport torch.nn as nn\r\n\r\n\r\ndef calc_iou(a, b):\r\n\r\n area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])\r\n iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])\r\n ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])\r\n iw = torch.clamp(iw, min=0)\r\n ih = torch.clamp(ih, min=0)\r\n ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih\r\n ua = torch.clamp(ua, min=1e-8)\r\n intersection = iw * ih\r\n IoU = intersection / ua\r\n\r\n return IoU\r\n\r\n\r\nclass FocalLoss(nn.Module):\r\n def __init__(self):\r\n super(FocalLoss, self).__init__()\r\n\r\n def forward(self, classifications, regressions, anchors, annotations):\r\n alpha = 0.25\r\n gamma = 2.0\r\n batch_size = classifications.shape[0]\r\n classification_losses = []\r\n regression_losses = []\r\n\r\n anchor = anchors[0, :, :]\r\n\r\n anchor_widths = anchor[:, 2] - anchor[:, 0]\r\n anchor_heights = anchor[:, 3] - anchor[:, 1]\r\n anchor_ctr_x = anchor[:, 0] + 0.5 * anchor_widths\r\n anchor_ctr_y = anchor[:, 1] + 0.5 * anchor_heights\r\n\r\n for j in range(batch_size):\r\n\r\n classification = classifications[j, :, :]\r\n regression = regressions[j, :, :]\r\n\r\n bbox_annotation = annotations[j, :, :]\r\n bbox_annotation = bbox_annotation[bbox_annotation[:, 4] != -1]\r\n\r\n if bbox_annotation.shape[0] == 0:\r\n if torch.cuda.is_available():\r\n regression_losses.append(torch.tensor(0).float().cuda())\r\n classification_losses.append(torch.tensor(0).float().cuda())\r\n else:\r\n regression_losses.append(torch.tensor(0).float())\r\n classification_losses.append(torch.tensor(0).float())\r\n\r\n continue\r\n\r\n classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)\r\n\r\n IoU = calc_iou(anchors[0, :, :], bbox_annotation[:, :4])\r\n\r\n IoU_max, IoU_argmax = torch.max(IoU, dim=1)\r\n\r\n # compute the loss for classification\r\n targets = torch.ones(classification.shape) * -1\r\n if torch.cuda.is_available():\r\n targets = targets.cuda()\r\n # this line means that targets with iou lower than 0.4 are considered as background\r\n targets[torch.lt(IoU_max, 0.4), :] = 0\r\n # this line records indices where iou is greater than 0.5 and thus correspond to objects.-\r\n positive_indices = torch.ge(IoU_max, 0.5)\r\n\r\n num_positive_anchors = positive_indices.sum()\r\n\r\n assigned_annotations = bbox_annotation[IoU_argmax, :]\r\n\r\n targets[positive_indices, :] = 0\r\n targets[positive_indices, assigned_annotations[positive_indices, 4].long()] = 1\r\n\r\n alpha_factor = torch.ones(targets.shape) * alpha\r\n if torch.cuda.is_available():\r\n alpha_factor = alpha_factor.cuda()\r\n\r\n alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, 1. - alpha_factor)\r\n focal_weight = torch.where(torch.eq(targets, 1.), 1. - classification, classification)\r\n focal_weight = alpha_factor * torch.pow(focal_weight, gamma)\r\n\r\n bce = -(targets * torch.log(classification) + (1.0 - targets) * torch.log(1.0 - classification))\r\n\r\n cls_loss = focal_weight * bce\r\n\r\n zeros = torch.zeros(cls_loss.shape)\r\n if torch.cuda.is_available():\r\n zeros = zeros.cuda()\r\n cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, zeros)\r\n\r\n classification_losses.append(cls_loss.sum() / torch.clamp(num_positive_anchors.float(), min=1.0))\r\n\r\n\r\n if positive_indices.sum() > 0:\r\n assigned_annotations = assigned_annotations[positive_indices, :]\r\n\r\n anchor_widths_pi = anchor_widths[positive_indices]\r\n anchor_heights_pi = anchor_heights[positive_indices]\r\n anchor_ctr_x_pi = anchor_ctr_x[positive_indices]\r\n anchor_ctr_y_pi = anchor_ctr_y[positive_indices]\r\n\r\n gt_widths = assigned_annotations[:, 2] - assigned_annotations[:, 0]\r\n gt_heights = assigned_annotations[:, 3] - assigned_annotations[:, 1]\r\n gt_ctr_x = assigned_annotations[:, 0] + 0.5 * gt_widths\r\n gt_ctr_y = assigned_annotations[:, 1] + 0.5 * gt_heights\r\n\r\n gt_widths = torch.clamp(gt_widths, min=1)\r\n gt_heights = torch.clamp(gt_heights, min=1)\r\n\r\n targets_dx = (gt_ctr_x - anchor_ctr_x_pi) / anchor_widths_pi\r\n targets_dy = (gt_ctr_y - anchor_ctr_y_pi) / anchor_heights_pi\r\n targets_dw = torch.log(gt_widths / anchor_widths_pi)\r\n targets_dh = torch.log(gt_heights / anchor_heights_pi)\r\n\r\n targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh))\r\n targets = targets.t()\r\n\r\n norm = torch.Tensor([[0.1, 0.1, 0.2, 0.2]])\r\n if torch.cuda.is_available():\r\n norm = norm.cuda()\r\n targets = targets / norm\r\n\r\n regression_diff = torch.abs(targets - regression[positive_indices, :])\r\n\r\n regression_loss = torch.where(\r\n torch.le(regression_diff, 1.0 / 9.0),\r\n 0.5 * 9.0 * torch.pow(regression_diff, 2),\r\n regression_diff - 0.5 / 9.0\r\n )\r\n regression_losses.append(regression_loss.mean())\r\n else:\r\n if torch.cuda.is_available():\r\n regression_losses.append(torch.tensor(0).float().cuda())\r\n else:\r\n regression_losses.append(torch.tensor(0).float())\r\n\r\n return torch.stack(classification_losses).mean(dim=0, keepdim=True), torch.stack(regression_losses).mean(dim=0,\r\n keepdim=True)" ]
[ [ "torch.zeros", "torch.stack", "torch.eq", "torch.max", "torch.ne", "torch.le", "torch.clamp", "torch.unsqueeze", "torch.ones", "torch.abs", "torch.cuda.is_available", "torch.tensor", "torch.lt", "torch.log", "torch.ge", "torch.Tensor", "torch.pow" ] ]
plancky/mathematical_physics_II
[ "c912dca1a58c218ddb06dc6cbca021b03a703540" ]
[ "direction_fields.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\nplt.style.use(\"dark_background\")\nf = lambda t,x : np.sin(x*t)\n\nt,x = np.linspace(-5,5,10),np.linspace(-5,5,10)\nT,X = np.meshgrid(t,x)\nf_i = 1/np.sqrt(1+f(T,X)**2)\nf_j = f(t,x)/np.sqrt(1+f(T,X)**2)\nfig,ax = plt.subplots(1,1,figsize=(5,5))\nplt.quiver(T,X,T/(T**2+X**2+1),X/(T**2+X**2+1),color = \"#f23333\")\nplt.savefig(\"/home/planck/Home2.png\")\nplt.show()" ]
[ [ "numpy.sin", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots", "numpy.linspace", "matplotlib.pyplot.style.use", "matplotlib.pyplot.show", "matplotlib.pyplot.quiver", "numpy.meshgrid" ] ]
uc-cdis/ndh-demo
[ "a4657036aefe2cffde525ee22f17602d40c85a59" ]
[ "demo/DMID_notebook/nde_dmid_function.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport requests\nimport json\nimport os\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# Create a list of query variables\nsummary_order_LHV = [\n \"_study_count\",\n \"_subject_count\",\n \"_demographic_count\",\n \"_exposure_count\",\n \"_follow_up_count\",\n \"_sample_count\",\n \"_virus_infection_count\",\n \"_summary_lab_result_count\",\n \"_aliquot_count\",\n \"_mrna_microarray_count\",\n \"_mrna_expression_count\",\n \"_protein_mass_spectrometry_count\",\n \"_peptide_expression_count\",\n \"_protein_expression_count\",\n \"_lipid_mass_spectrometry_count\",\n \"_metabolite_mass_spectrometry_count\"\n]\n\n# Create a dictionary of (query variables:header) pairs\nsummary_count_headers_LHV = {\n \"_subject_count\": \"Subjects\",\n \"_study_count\": \"Studies\",\n \"_demographic_count\": \"Demographic records\",\n \"_exposure_count\": \"Exposure records\",\n \"_follow_up_count\": \"Followup records\",\n \"_sample_count\": \"Sample records\",\n \"_virus_infection_count\": \"Virus Infection records\",\n \"_summary_lab_result_count\": \"Lab Results records\",\n \"_aliquot_count\": \"Aliquots\",\n \"_mrna_microarray_count\": \"mRNA Microarray\",\n \"_mrna_expression_count\": \"mRNA Expression\",\n \"_protein_mass_spectrometry_count\": \"Protein Mass Spectrometry\",\n \"_peptide_expression_count\": \"Peptide Expression\",\n \"_protein_expression_count\": \"Protein Expression\",\n \"_lipid_mass_spectrometry_count\": \"Lipid Mass Spectrometry\",\n \"_metabolite_mass_spectrometry_count\": \"Metabolite Mass Spectrometry\"\n}\n\n# Construct SummaryTable class with properties: content, summary_order and summary_header. SummaryTable is representated as a table.\n\n\nclass SummaryTable:\n ''' Represent result tables in HTML format for visualization '''\n\n def __init__(self, content, summary_order, summary_header):\n self.content = content\n self.summary_order = summary_order\n self.summary_header = summary_header\n\n def _repr_html_(self):\n html = []\n html.append(\"<table style>\")\n html.append(\"<thead>\")\n html.append(\"<th>Category</th>\")\n html.append(\"<th>Counts</th>\")\n html.append(\"</thead>\")\n for item in self.summary_order:\n html.append(\"<tr>\")\n html.append(\"<td>%s</td>\" % self.summary_header[item])\n html.append(\"<td>%s</td>\" % self.content[item])\n html.append(\"<tr>\")\n html.append(\"</table>\")\n return ''.join(html)\n\n# Input \"credential.json\" downloaded from data common profile page to generate access token for data query\n\n\ndef add_keys(filename):\n ''' Get auth from our secret keys '''\n\n global auth\n json_data = open(filename).read()\n keys = json.loads(json_data)\n auth = requests.post('https://flu.niaiddata.org/user/credentials/cdis/access_token', json=keys) \n# query data from API using query text, returned data is in json format\n\n\ndef query_api(query_txt, variables=None):\n ''' Request results for a specific query '''\n\n if variables == None:\n query = {'query': query_txt}\n else:\n query = {'query': query_txt, 'variables': variables}\n\n output = requests.post('https://flu.niaiddata.org/api/v0/submission/graphql', headers={'Authorization': 'bearer ' + auth.json()['access_token']}, json=query).text\n data = json.loads(output)\n\n if 'errors' in data:\n print(data)\n\n return data\n\n\ndef query_summary_counts(project_id, summary_order, summary_header):\n ''' Query summary counts for each data type'''\n\n query_txt = \"query Counts ($projectID: [String]) { \"\n for prop in summary_order:\n query_txt += \"%s(project_id: $projectID) \" % prop\n query_txt += \"}\"\n\n variables = {'projectID': project_id}\n\n data = query_api(query_txt, variables)\n\n table = SummaryTable(data['data'], summary_order, summary_header)\n\n return table\n\n\ndef query_cell_lab():\n ''' Graphql query text for querying virus, timepoint, virus titers and gRNA for cell line '''\n query_text = '''{\n study(first:0,order_by_asc:\"submitter_id\",project_id: \"FLU-LHV\",with_path_to:{type:\"subject\",species:\"Homo sapiens\"}){\n submitter_id\n subjects(first:0){\n samples(first:0){\n composition\n hours_to_collection\n virus_infections{\n strain\n mutation\n }\n summary_lab_results(first:0){\n virus_titer_rep1\n virus_titer_rep2\n virus_titer_rep3\n virus_titer_rep4\n virus_titer_rep5\n virus_NP_gRNA\n }\n }\n }\n }\n }'''\n ''' Save response data in dictionary as structure study{virus{timepoint{virus_titer}}} or study{virus{timepoint{gRNA}}}'''\n data = query_api(query_text)\n studies = dict()\n attributes = ['virus_titer_rep1', 'virus_titer_rep2', 'virus_titer_rep3', 'virus_titer_rep4', 'virus_titer_rep5']\n for entity in data['data']['study']:\n study_id = entity['submitter_id']\n studies.setdefault(study_id, {})\n for subject in entity['subjects']:\n for sample in subject['samples']:\n composition = sample['composition']\n tp = str(sample['hours_to_collection'])\n if composition == \"Supernatant\":\n for infection in sample['virus_infections']:\n virus_id = '_'.join(filter(None, [infection['strain'],infection['mutation']]))\n if virus_id is not None:\n virus_id = (\"_\").join(virus_id.split(\" \"))\n studies[study_id].setdefault('Supernatant', {})\n studies[study_id]['Supernatant'].setdefault(virus_id, {})\n studies[study_id]['Supernatant'][virus_id].setdefault(tp, [])\n for lab in sample['summary_lab_results']:\n for attribute in attributes:\n if lab[attribute] is not None:\n studies[study_id]['Supernatant'][virus_id][tp].append(np.log10(lab[attribute]))\n elif composition == \"Cell\":\n for infection in sample['virus_infections']:\n virus_id = '_'.join(filter(None, [infection['strain'],infection['mutation']]))\n if virus_id is not None:\n virus_id = (\"_\").join(virus_id.split(\" \"))\n studies[study_id].setdefault(\"Cell\", {})\n studies[study_id]['Cell'].setdefault(virus_id, {})\n studies[study_id]['Cell'][virus_id].setdefault(tp, [])\n for lab in sample['summary_lab_results']:\n if lab['virus_NP_gRNA'] is not None:\n studies[study_id]['Cell'][virus_id][tp].append(lab['virus_NP_gRNA'])\n return studies\n\n\ndef query_mouse_titer():\n ''' Graphql query text for querying virus, timepoint and virus titer for mouse '''\n query_txt = '''{\n study(first:0,order_by_asc:\"submitter_id\",project_id: \"FLU-LHV\",with_path_to:{type:\"subject\",species:\"Mus musculus\"}){\n submitter_id\n subjects(first:0){\n follow_ups(first:0){\n days_to_follow_up\n submitter_id\n samples(first:0){\n summary_lab_results{\n titer_PFU_per_gram\n }\n }\n }\n virus_infections(first:0){\n strain\n mutation\n }\n }\n }\n }'''\n ''' Save response data in dictionary as structure study{virus{timepoint{virus_titer}}}'''\n data = query_api(query_txt)\n studies = dict()\n for entity in data['data']['study']:\n study_id = entity['submitter_id']\n studies.setdefault(study_id, {})\n for subject in entity['subjects']:\n for infection in subject['virus_infections']:\n virus_id = '_'.join(filter(None, [infection['strain'],infection['mutation']]))\n studies[study_id].setdefault(virus_id, {})\n for follow_up in subject['follow_ups']:\n day_id = str(follow_up['days_to_follow_up'])\n studies[study_id][virus_id].setdefault(day_id, [])\n for sample in follow_up['samples']:\n for lab in sample['summary_lab_results']:\n titer = lab['titer_PFU_per_gram']\n if titer is not None:\n studies[study_id][virus_id][day_id].append(np.log10(titer + 1))\n return studies\n\n\ndef plot_cell_titer(studies):\n ''' plot virus titer for cell line. Each line is for a virus. x-axis is timepoint, y-axis is average virus titer with std'''\n data = query_cell_lab()\n all_timepoints = []\n fig = plt.figure(figsize=(6, 4))\n for study in studies:\n virus_list = data[study][\"Supernatant\"].keys()\n virus_list = filter(None, virus_list)\n for virus in virus_list:\n times = list()\n titer_aves = list()\n titer_stds = list()\n for timepoint in sorted(map(int, data[study][\"Supernatant\"][virus].keys())):\n tp_key = str(timepoint)\n if data[study][\"Supernatant\"][virus][tp_key]:\n titer_aves.append(np.mean(data[study]['Supernatant'][virus][tp_key]))\n titer_stds.append(np.std(data[study]['Supernatant'][virus][tp_key]))\n times.append(timepoint)\n all_timepoints.append(timepoint)\n\n ax1 = fig.add_subplot(111)\n ax1.errorbar(times, titer_aves, yerr=titer_stds, fmt='-o', label=virus)\n ax1.set_xlabel(\"Timepoint\", fontsize=14)\n ax1.set_ylabel(\"Virus Titer\", fontsize=14)\n ax1.set_xticks(times)\n ax1.legend(loc='best', fancybox=True, framealpha=0.5)\n ax1.set_xticks(all_timepoints)\n\n\ndef plot_mouse_titer(study, virus_list=None):\n ''' plot virus titer for mouse. Each line is for a virus. x-axis is timepoint, y-axis is average virus titer with std'''\n data = query_mouse_titer()\n if virus_list is None:\n virus_list = data[study].keys()\n virus_list = filter(None, virus_list)\n fig = plt.figure(figsize=(6, 4))\n for virus in virus_list:\n times = list()\n titer_aves = list()\n titer_stds = list()\n for timepoint in sorted(map(int, data[study][virus].keys())):\n tp_key = str(timepoint)\n if data[study][virus][tp_key]:\n titer_aves.append(np.mean(data[study][virus][tp_key]))\n titer_stds.append(np.std(data[study][virus][tp_key]))\n times.append(timepoint)\n\n ax1 = fig.add_subplot(111)\n ax1.errorbar(times, titer_aves, yerr=titer_stds, fmt='-o', label=virus)\n ax1.set_xlabel(\"Timepoint\", fontsize=14)\n ax1.set_ylabel(\"Virus Titer\", fontsize=14)\n ax1.set_xticks(times)\n ax1.legend(loc='best', fancybox=True, framealpha=0.5)\n\n\ndef plot_gRNA(studies):\n ''' plot gRNA for cell line. Each line is for a virus. x-axis is timepoint, y-axis is virus genomic RNA'''\n data = query_cell_lab()\n fig = plt.figure(figsize=(6, 4))\n for study in studies:\n virus_list = data[study][\"Cell\"].keys()\n virus_list = filter(None, virus_list)\n for virus in virus_list:\n times = list()\n gRNA = list()\n for timepoint in sorted(map(int, data[study][\"Cell\"][virus].keys())):\n tp_key = str(timepoint)\n if data[study][\"Cell\"][virus][tp_key]:\n times.append(timepoint)\n gRNA.append(data[study]['Cell'][virus][tp_key])\n ax1 = fig.add_subplot(111)\n ax1.plot(times, gRNA, 'o-', label=virus)\n ax1.set_xlabel(\"Timepoint\", fontsize=14)\n ax1.set_ylabel(\"gRNA\", fontsize=14)\n ax1.set_xticks(times)\n ax1.legend(loc='best', fancybox=True, framealpha=0.5)\n\n\ndef query_mouse_weight():\n ''' Graphql query text for querying virus, timepoint and weight_percentage for mouse '''\n query_txt = '''{\n study(first:0,order_by_asc:\"submitter_id\",project_id: \"FLU-LHV\"){\n submitter_id\n subjects(first:0, with_links:\"follow_ups\"){\n follow_ups(first:0){\n days_to_follow_up\n weight_percentage\n }\n virus_infections(first:0){\n strain\n mutation\n }\n }\n }\n }'''\n ''' Save response data in dictionary as structure study{virus{timepoint{weight_percentage}}}'''\n data = query_api(query_txt)\n included_studies = ['IM101', 'IM102', 'IM103']\n studies = dict()\n for entity in data['data']['study']:\n if entity['submitter_id'] not in included_studies:\n continue\n else:\n study_id = entity['submitter_id']\n studies.setdefault(study_id, {})\n for subject in entity['subjects']:\n for infection in subject['virus_infections']:\n virus_id = '_'.join(filter(None, [infection['strain'],infection['mutation']]))\n studies[study_id].setdefault(virus_id, {})\n for follow_up in subject['follow_ups']:\n time = str(follow_up['days_to_follow_up'])\n studies[study_id][virus_id].setdefault(time, [])\n weight_percentage = follow_up['weight_percentage']\n if weight_percentage:\n studies[study_id][virus_id][time].append(weight_percentage)\n return studies\n\n\ndef plot_weight_percentage(study):\n ''' plot weight_percentage for mouse. Each line is for a virus. x-axis is timepoint, y-axis is average weight_percentage with std '''\n data = query_mouse_weight()\n fig = plt.figure(figsize=(6, 4))\n virus_list = data[study].keys()\n virus_list = filter(None, virus_list)\n for virus in virus_list:\n times = list()\n weight_aves = list()\n weight_stds = list()\n for timepoint in sorted(map(int, data[study][virus].keys())):\n tp_key = str(timepoint)\n if data[study][virus][tp_key]:\n weight_aves.append(np.mean(list(map(float, data[study][virus][tp_key]))))\n weight_stds.append(np.std(list(map(float, data[study][virus][tp_key]))))\n times.append(timepoint)\n ax1 = fig.add_subplot(111)\n ax1.errorbar(times, weight_aves, yerr=weight_stds, fmt='-o', label=virus)\n ax1.set_xlabel(\"Timepoint\", fontsize=14)\n ax1.set_ylabel(\"weight_percentage\", fontsize=14)\n ax1.set_xticks(times)\n ax1.legend(loc='best', fancybox=True, framealpha=0.5)\n" ]
[ [ "numpy.std", "numpy.log10", "numpy.mean", "matplotlib.pyplot.figure" ] ]
connorjward/loopy
[ "752d758c61faa980b5841d92b7d5acbe4c3f8135" ]
[ "test/test_domain.py" ]
[ "__copyright__ = \"Copyright (C) 2012 Andreas Kloeckner\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport sys\nimport numpy as np\nimport loopy as lp\nimport pyopencl as cl\nimport pyopencl.clmath # noqa\nimport pyopencl.clrandom # noqa\nimport pytest # noqa\n\nimport logging\nlogger = logging.getLogger(__name__)\n\ntry:\n import faulthandler\nexcept ImportError:\n pass\nelse:\n faulthandler.enable()\n\nfrom pyopencl.tools import pytest_generate_tests_for_pyopencl \\\n as pytest_generate_tests\n\n__all__ = [\n \"pytest_generate_tests\",\n \"cl\" # 'cl.create_some_context'\n ]\n\n\nfrom loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_2 # noqa\n\n\ndef test_assume(ctx_factory):\n ctx = ctx_factory()\n\n knl = lp.make_kernel(\n \"{[i]: 0<=i<n}\",\n \"a[i] = a[i] + 1\",\n [lp.GlobalArg(\"a\", np.float32, shape=\"n\"), \"...\"],\n target=lp.PyOpenCLTarget(ctx.devices[0]))\n\n knl = lp.split_iname(knl, \"i\", 16)\n knl = lp.prioritize_loops(knl, \"i_outer,i_inner\")\n knl = lp.assume(knl, \"n mod 16 = 0\")\n knl = lp.assume(knl, \"n > 10\")\n code = lp.generate_code_v2(knl).device_code()\n assert \"if\" not in code\n\n\ndef test_divisibility_assumption(ctx_factory):\n ctx = ctx_factory()\n\n knl = lp.make_kernel(\n \"[n] -> {[i]: 0<=i<n}\",\n [\n \"b[i] = 2*a[i]\"\n ],\n [\n lp.GlobalArg(\"a\", np.float32, shape=(\"n\",)),\n lp.GlobalArg(\"b\", np.float32, shape=(\"n\",)),\n lp.ValueArg(\"n\", np.int32),\n ],\n assumptions=\"n>=1 and (exists zz: n = 16*zz)\",\n target=lp.PyOpenCLTarget(ctx.devices[0]))\n\n ref_knl = knl\n\n knl = lp.split_iname(knl, \"i\", 16)\n code = lp.generate_code_v2(knl).device_code()\n assert \"if\" not in code\n\n lp.auto_test_vs_ref(ref_knl, ctx, knl,\n parameters={\"n\": 16**3})\n\n\ndef test_eq_constraint(ctx_factory):\n logging.basicConfig(level=logging.INFO)\n\n ctx = ctx_factory()\n\n knl = lp.make_kernel(\n \"{[i]: 0<= i < 32}\",\n [\n \"a[i] = b[i]\"\n ],\n [\n lp.GlobalArg(\"a\", np.float32, shape=(1000,)),\n lp.GlobalArg(\"b\", np.float32, shape=(1000,))\n ],\n target=lp.PyOpenCLTarget(ctx.devices[0]))\n\n knl = lp.split_iname(knl, \"i\", 16, outer_tag=\"g.0\")\n knl = lp.split_iname(knl, \"i_inner\", 16, outer_tag=None, inner_tag=\"l.0\")\n print(lp.generate_code_v2(knl).device_code())\n\n\ndef test_dependent_loop_bounds(ctx_factory):\n dtype = np.dtype(np.float32)\n ctx = ctx_factory()\n\n knl = lp.make_kernel(\n [\n \"{[i]: 0<=i<n}\",\n \"{[jj]: 0<=jj<row_len}\",\n ],\n [\n \"<> row_len = a_rowstarts[i+1] - a_rowstarts[i]\",\n \"a_sum[i] = sum(jj, a_values[[a_rowstarts[i]+jj]])\",\n ],\n [\n lp.GlobalArg(\"a_rowstarts\", np.int32, shape=lp.auto),\n lp.GlobalArg(\"a_indices\", np.int32, shape=lp.auto),\n lp.GlobalArg(\"a_values\", dtype),\n lp.GlobalArg(\"a_sum\", dtype, shape=lp.auto),\n lp.ValueArg(\"n\", np.int32),\n ],\n assumptions=\"n>=1 and row_len>=1\",\n target=lp.PyOpenCLTarget(ctx.devices[0]))\n\n print(lp.generate_code_v2(knl).device_code())\n\n\ndef test_dependent_loop_bounds_2(ctx_factory):\n dtype = np.dtype(np.float32)\n ctx = ctx_factory()\n\n knl = lp.make_kernel(\n [\n \"{[i]: 0<=i<n}\",\n \"{[jj]: 0<=jj<row_len}\",\n ],\n [\n \"<> row_start = a_rowstarts[i]\",\n \"<> row_len = a_rowstarts[i+1] - row_start\",\n \"ax[i] = sum(jj, a_values[[row_start+jj]])\",\n ],\n [\n lp.GlobalArg(\"a_rowstarts\", np.int32, shape=lp.auto),\n lp.GlobalArg(\"a_indices\", np.int32, shape=lp.auto),\n lp.GlobalArg(\"a_values\", dtype, strides=(1,)),\n lp.GlobalArg(\"ax\", dtype, shape=lp.auto),\n lp.ValueArg(\"n\", np.int32),\n ],\n assumptions=\"n>=1 and row_len>=1\",\n target=lp.PyOpenCLTarget(ctx.devices[0]))\n\n knl = lp.split_iname(knl, \"i\", 128, outer_tag=\"g.0\",\n inner_tag=\"l.0\")\n\n print(lp.generate_code_v2(knl).device_code())\n\n\ndef test_dependent_loop_bounds_3(ctx_factory):\n # The point of this test is that it shows a dependency between\n # domains that is exclusively mediated by the row_len temporary.\n # It also makes sure that row_len gets read before any\n # conditionals use it.\n\n dtype = np.dtype(np.float32)\n ctx = ctx_factory()\n\n knl = lp.make_kernel(\n [\n \"{[i]: 0<=i<n}\",\n \"{[jj]: 0<=jj<row_len}\",\n ],\n [\n \"<> row_len = a_row_lengths[i]\",\n \"a[i,jj] = 1\",\n ],\n [\n lp.GlobalArg(\"a_row_lengths\", np.int32, shape=lp.auto),\n lp.GlobalArg(\"a\", dtype, shape=(\"n,n\"), order=\"C\"),\n lp.ValueArg(\"n\", np.int32),\n ],\n target=lp.PyOpenCLTarget(ctx.devices[0]),\n name=\"loopy_kernel\")\n\n assert knl[\"loopy_kernel\"].parents_per_domain()[1] == 0\n\n knl = lp.split_iname(knl, \"i\", 128, outer_tag=\"g.0\",\n inner_tag=\"l.0\")\n\n print(lp.generate_code_v2(knl).device_code())\n\n knl_bad = lp.split_iname(knl, \"jj\", 128, outer_tag=\"g.1\",\n inner_tag=\"l.1\")\n\n with pytest.raises(RuntimeError):\n list(lp.generate_code_v2(knl_bad))\n\n\ndef test_dependent_loop_bounds_4():\n # https://gitlab.tiker.net/inducer/loopy/issues/23\n import loopy as lp\n\n loopy_knl = lp.make_kernel(\n [\n \"{[a]: 0<=a<10}\",\n \"{[b]: b_start<=b<b_end}\",\n \"{[c,idim]: c_start<=c<c_end and 0<=idim<dim}\",\n ],\n \"\"\"\n for a\n <> b_start = 1\n <> b_end = 2\n for b\n <> c_start = 1\n <> c_end = 2\n\n for c\n ... nop\n end\n\n <>t[idim] = 1\n end\n end\n \"\"\",\n \"...\",\n seq_dependencies=True)\n\n loopy_knl = lp.fix_parameters(loopy_knl, dim=3)\n\n with lp.CacheMode(False):\n lp.generate_code_v2(loopy_knl)\n\n\ndef test_independent_multi_domain(ctx_factory):\n dtype = np.dtype(np.float32)\n ctx = ctx_factory()\n queue = cl.CommandQueue(ctx)\n\n knl = lp.make_kernel(\n [\n \"{[i]: 0<=i<n}\",\n \"{[j]: 0<=j<n}\",\n ],\n [\n \"a[i] = 1\",\n \"b[j] = 2\",\n ],\n [\n lp.GlobalArg(\"a\", dtype, shape=(\"n\"), order=\"C\"),\n lp.GlobalArg(\"b\", dtype, shape=(\"n\"), order=\"C\"),\n lp.ValueArg(\"n\", np.int32),\n ],\n name=\"loopy_kernel\")\n\n knl = lp.split_iname(knl, \"i\", 16, outer_tag=\"g.0\",\n inner_tag=\"l.0\")\n knl = lp.split_iname(knl, \"j\", 16, outer_tag=\"g.0\",\n inner_tag=\"l.0\")\n assert knl[\"loopy_kernel\"].parents_per_domain() == 2*[None]\n\n n = 50\n evt, (a, b) = knl(queue, n=n, out_host=True)\n\n assert a.shape == (50,)\n assert b.shape == (50,)\n assert (a == 1).all()\n assert (b == 2).all()\n\n\ndef test_equality_constraints(ctx_factory):\n dtype = np.float32\n ctx = ctx_factory()\n\n order = \"C\"\n\n n = 10\n\n knl = lp.make_kernel([\n \"[n] -> {[i,j]: 0<=i,j<n }\",\n \"{[k]: k =i+5 and k < n}\",\n ],\n [\n \"a[i,j] = 5 {id=set_all}\",\n \"b[i,k] = 22 {id=set_b, dep=set_all}\",\n ],\n [\n lp.GlobalArg(\"a,b\", dtype, shape=\"n, n\", order=order),\n lp.ValueArg(\"n\", np.int32, approximately=1000),\n ],\n name=\"equality_constraints\", assumptions=\"n>=1\")\n\n seq_knl = knl\n\n knl = lp.split_iname(knl, \"i\", 16, outer_tag=\"g.0\", inner_tag=\"l.0\")\n knl = lp.split_iname(knl, \"j\", 16, outer_tag=\"g.1\", inner_tag=\"l.1\")\n\n knl = lp.add_inames_to_insn(knl, \"j_inner, j_outer\", \"id:set_b\")\n\n #print(knl)\n #print(knl.domains[0].detect_equalities())\n\n lp.auto_test_vs_ref(seq_knl, ctx, knl,\n parameters=dict(n=n), print_ref_code=True)\n\n\ndef test_stride(ctx_factory):\n dtype = np.float32\n ctx = ctx_factory()\n\n order = \"C\"\n\n n = 10\n\n knl = lp.make_kernel([\n \"{[i]: 0<=i<n and (exists l: i = 2*l)}\",\n ],\n [\n \"a[i] = 5\",\n ],\n [\n lp.GlobalArg(\"a\", dtype, shape=\"n\", order=order),\n lp.ValueArg(\"n\", np.int32, approximately=1000),\n ],\n assumptions=\"n>=1\")\n\n seq_knl = knl\n\n lp.auto_test_vs_ref(seq_knl, ctx, knl,\n parameters=dict(n=n))\n\n\ndef test_domain_dependency_via_existentially_quantified_variable(ctx_factory):\n dtype = np.float32\n ctx = ctx_factory()\n\n order = \"C\"\n\n n = 10\n\n knl = lp.make_kernel([\n \"{[i]: 0<=i<n }\",\n \"{[k]: k=i and (exists l: k = 2*l) }\",\n ],\n [\n \"a[i] = 5 {id=set}\",\n \"b[k] = 6 {dep=set}\",\n ],\n [\n lp.GlobalArg(\"a,b\", dtype, shape=\"n\", order=order),\n lp.ValueArg(\"n\", np.int32, approximately=1000),\n ],\n assumptions=\"n>=1\")\n\n seq_knl = knl\n\n lp.auto_test_vs_ref(seq_knl, ctx, knl,\n parameters=dict(n=n))\n\n\ndef test_triangle_domain(ctx_factory):\n ctx = ctx_factory()\n\n knl = lp.make_kernel(\n \"{[i,j]: 0<=i,j<n and i <= j}\",\n \"a[i,j] = 17\",\n assumptions=\"n>=1\",\n target=lp.PyOpenCLTarget(ctx.devices[0]))\n\n print(knl)\n print(lp.generate_code_v2(knl).device_code())\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n exec(sys.argv[1])\n else:\n from pytest import main\n main([__file__])\n\n# vim: foldmethod=marker\n" ]
[ [ "numpy.dtype" ] ]
SulmanK/Cyberpunk-2077-Twitter-Sentiment-Analysis
[ "eccd0b0cb2ef84808a9639031ce58c41b3c62ca2" ]
[ "App_Deployment/model/data_pull.py" ]
[ "#--------------------- Packages\r\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport psycopg2\r\n#--------------------- Data Gathering\r\n\"\"\" Script to pull the tweets from the PostgreSQL database, retrieves the dataframe and initializes various variables for analysis.\"\"\"\r\n\r\n## Pull the data from the database\r\n### Set up the connection\r\nDATABASE_URL = 'enter'\r\nconn = psycopg2.connect(DATABASE_URL, sslmode = 'require')\r\n\r\n### Store into our dataframe df\r\ndf = pd.read_sql('select * from tweets', con = conn, index_col = 'index')\r\n\r\n### Reindex the values (we will use these for our twitter feed)\r\ndf_1t = df[0:30].reset_index()\r\ndf_2t = df[31:61].reset_index()\r\ndf_3t = df[62:92].reset_index()\r\ndf_4t = df[93:123].reset_index()\r\ndf_5t = df[124:154].reset_index()\r\ndf_6t = df[155:185].reset_index()\r\ndf_7t = df[186:216].reset_index()\r\ndf_8t = df[217:247].reset_index()\r\ndf_9t = df[248:278].reset_index()\r\ndf_10t = df[279:309].reset_index()\r\n\r\n## Dataframe that will contain all the contents and sentiment of the tweets.\r\ntotal_tweets_df = pd.DataFrame(columns = ['Tweets', 'Sentiment'])\r\n\r\n## Vader Sentiment Analyzer\r\nanalyser = SentimentIntensityAnalyzer()\r\n" ]
[ [ "pandas.DataFrame", "pandas.read_sql" ] ]
SamSamhuns/ml_linear_logistic_regression
[ "fb13f96a51d9f838cfecc382667f420d82d8bfaf" ]
[ "src/logistic_regression.py" ]
[ "import numpy as np\nimport logging\nimport sys\nfrom enum import Enum\nimport matplotlib.pyplot as plt\nfrom collections import namedtuple\nfrom tqdm import tqdm\nfrom sklearn.metrics import plot_confusion_matrix\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.model_selection import train_test_split, KFold\n\nlogging.basicConfig(stream=sys.stderr, level=logging.INFO)\nplt.style.use('seaborn-darkgrid')\n\nmodel_parameter = namedtuple(\n 'model_parameter', ('lambda_ridge, alpha, epochs')\n)\n\n\nclass Preprocessing:\n\n def __init__(self):\n self.mean = None\n self.std = None\n\n @staticmethod\n def standardize(X, mean=None, std=None, inplace=False):\n if mean is None:\n mean = np.mean(X, axis=0)\n if std is None:\n std = np.std(X, axis=0)\n\n std = np.where(std == 0, 1, std)\n if inplace:\n X -= mean\n X /= std\n else:\n X = (X - mean) / std\n return X\n\n @staticmethod\n def insert_bias_term(X):\n bias_arr = np.ones(X.shape[0])\n return np.c_[bias_arr, X]\n\n def standardize_save_state(self, X, mean=None, std=None, inplace=False):\n if mean is None:\n mean = np.mean(X, axis=0)\n if std is None:\n std = np.std(X, axis=0)\n\n std = np.where(std == 0, 1, std)\n self.mean = mean\n self.std = std\n if inplace:\n X -= mean\n X /= std\n else:\n X = (X - mean) / std\n return X\n\n def fit(self, X, inplace=False):\n if self.mean is None or self.std is None:\n raise ValueError(\"Mean or std is not for the preprocessing object\")\n if inplace:\n X -= self.mean\n X /= self.std\n else:\n X = (X - self.mean) / self.std\n return X\n\n\nclass LogisticRegr:\n\n slots = ['theta']\n\n def __init__(self):\n self.theta = None\n\n def __repr__(self):\n return ' '.join([str(val) for val in np.ndarray.flatten(self.theta)])\n\n def fit(self, X, y,\n logging_enabled=True,\n model_params=model_parameter(lambda_ridge=0,\n alpha=0.5,\n epochs=5000)):\n \"\"\"\n WARNING: X must be normalized and have the bias term before training\n Batch Gradient Descent for logistic regression\n \"\"\"\n if X.shape[0] != y.shape[0]:\n raise ValueError(\n f\"X shape {X.shape[0]} != y shape {y.shape[0]}. Dimensions not matching\")\n\n loss_arr = []\n m = X.shape[0]\n self.theta = np.zeros(X.shape[1])\n lambda_ridge, alpha, epochs = model_params\n\n for epoch in range(epochs):\n gradient_wout_regu = (1 / m) * np.dot(\n np.matrix.transpose(X), LogisticRegr.sigmoid(np.dot(X, self.theta)) - y)\n # 0th parameter/bias is not regularized\n self.theta[0] = self.theta[0] - alpha * gradient_wout_regu[0]\n gradient_with_regu = gradient_wout_regu + \\\n ((lambda_ridge / m) * self.theta)\n # All other parameters regularized\n self.theta[1:] = self.theta[1:] - alpha * gradient_with_regu[1:]\n\n if epoch % 100 == 0:\n current_log_loss = self.loss(X, y, lambda_ridge)\n if logging_enabled:\n print(f\"Loss at epoch {epoch} is {current_log_loss}\")\n loss_arr.append(current_log_loss)\n\n if logging_enabled:\n self.plot_loss_curve(loss_arr, epochs)\n\n def plot_loss_curve(self, loss_arr, epochs, log_scale: bool = False):\n if log_scale:\n plt.semilogx(range(epochs), loss_arr)\n else:\n plt.plot(loss_arr)\n plt.ylabel('log loss')\n plt.xlabel('Epoch (x100)')\n plt.title(\"Loss Overtime\")\n plt.grid(True)\n plt.show()\n\n @staticmethod\n def log_loss(X, y, theta, lambda_ridge: float = 0.0):\n if X.shape[0] != y.shape[0]:\n raise ValueError(\n f\"X shape {X.shape[0]} != y shape {y.shape[0]}. Dimensions not matching\")\n elif X.shape[1] != theta.shape[0]:\n raise ValueError(\n f\"X shape {X.shape[1]} != theta shape {theta.shape[0]}. Dimensions not matching\")\n\n m = X.shape[0]\n h = LogisticRegr.sigmoid(np.dot(X, theta))\n # loss J(theta) = -(1/m)*(yt*logh + (1-y)t*log(1-h)) + lambda/2m theta_t * theta\n return ((-1 / m) * (\n np.dot(np.matrix.transpose(y), np.log(h))\n + np.dot(np.matrix.transpose(1-y), np.log(1-h)))\n + ((lambda_ridge/(2*m))\n * np.dot(np.matrix.transpose(theta[1:]), theta[1:]))\n )\n\n @staticmethod\n def sigmoid(X):\n return 1 / (1 + np.exp(-X))\n\n @staticmethod\n def predict(X, theta, threshold: float = 0.5):\n prediction = np.dot(X, theta)\n prediction[prediction >= threshold] = 1\n prediction[prediction < threshold] = 0\n return prediction\n\n def loss(self, X, y, lambda_ridge: float = 0.0):\n return LogisticRegr.log_loss(X, y, self.theta, lambda_ridge)\n\n def accuracy(self, X, y, threshold: float = 0.5):\n \"\"\"\n accuracy = (TP+TN) / (TP+FP+TN+FN)\n \"\"\"\n y_pred = LogisticRegr.predict(X, self.theta, threshold)\n return len([1 for y_true, y_hat in zip(y, y_pred) if y_true == y_hat]) / X.shape[0]\n\n def precision(self, X, y, threshold: float = 0.5):\n \"\"\"\n Ratio of correctly predicted positive observations to total positive observations\n precision = TP / (TP+FP)\n \"\"\"\n y_pred = LogisticRegr.predict(X, self.theta, threshold)\n true_positives = len(\n [1 for y_true, y_hat in zip(y, y_pred) if y_true == y_hat == 1])\n total_positives_pred = sum(y_pred)\n return true_positives / total_positives_pred\n\n def recall(self, X, y, threshold: float = 0.5):\n \"\"\"\n Also known as Sensitivity\n Ratio of correctly predicted positive observations to all observations that are actually positive\n recall = TP / (TP+FN)\n \"\"\"\n y_pred = LogisticRegr.predict(X, self.theta, threshold)\n true_positives = len(\n [1 for y_true, y_hat in zip(y, y_pred) if y_true == y_hat == 1])\n true_pos_and_false_neg = len([1 for y_true, y_hat in zip(\n y, y_pred) if y_true == y_hat == 1 or (y_true == 1 and y_hat == 0)])\n return true_positives / true_pos_and_false_neg\n\n def f1_score(self, X, y, threshold: float = 0.5):\n \"\"\"\n Weighted average of precision and recall\n Preferred to accuracy as accuracy is misleading for unbalanced datasets\n f1_score = 2*(Recall*Precision) / (Recall+Precision)\n \"\"\"\n recall = self.recall(X, y, threshold)\n precision = self.precision(X, y, threshold)\n return (2 * recall * precision) / (recall + precision)\n\n def plot_confusion_matrix(self, X, y, threshold: float = 0.5, custom: bool = True):\n y_pred = LogisticRegr.predict(X, self.theta, threshold)\n\n tp = len([1 for y_true, y_hat in zip(\n y, y_pred) if y_true == y_hat == 1])\n tn = len([1 for y_true, y_hat in zip(\n y, y_pred) if y_true == y_hat == 0])\n fp = len([1 for y_true, y_hat in zip(y, y_pred)\n if y_true == 0 and y_hat == 1])\n fn = len([1 for y_true, y_hat in zip(y, y_pred)\n if y_true == 1 and y_hat == 0])\n if custom: # use custom confusion matrix generator\n print(\"\\t\\t\\t Actual values\")\n print(\"\\t\\t\\tPositive(1) Negative(0)\")\n print(f\"Predicted| Positive(1) TP {tp}\\t FP {fp}\")\n print(f\" Values | Negative(0) FN {fn}\\t\\t TN {tn}\")\n else: # use sklearn.metrics.confusion_matrix\n pass\n\n\nclass KFoldCrossValidator:\n\n __slots__ = ['train_loss', 'test_loss',\n 'train_accuracy', 'test_accuracy', 'theta']\n\n def __init__(self):\n self.train_loss = []\n self.test_loss = []\n self.train_accuracy = []\n self.test_accuracy = []\n self.theta = None\n\n def cross_validate(self, model, X, y, k=10,\n logging_enabled=True,\n model_params=model_parameter(\n lambda_ridge=0, alpha=0.5, epochs=5000),\n custom_kfold=False,\n seed=np.random.randint(10000)):\n \"\"\"\n Cross validation function, the theta parameter chosen is from the split with the least test error\n \"\"\"\n\n m = X.shape[0]\n lambda_ridge, alpha, epochs = model_params\n min_test_error = float('inf') # tracks the minimum error with k-folds\n best_fit_theta = None # saves the best theta value with the min_test_error\n preprocessor_object = Preprocessing()\n\n if custom_kfold:\n logging.info(\n f\"Running Custom KFoldCrossValidator with {k} folds and lambda={lambda_ridge}\")\n np.random.seed(seed) # seed random shuffler\n if m < k:\n raise ValueError(\n f\"No of k splits {k} cannot be greater than no. of samples {m}\")\n\n # Randomly shuffle X and y inplace while matching corresponding feat and target\n for i in range(m):\n swap_idx = np.random.randint(i, m)\n # ensures the corresponding feat-target values match\n X[[i, swap_idx]] = X[[swap_idx, i]]\n y[[i, swap_idx]] = y[[swap_idx, i]]\n\n # test start and end idx\n fold_step = m // k\n start = 0\n end = fold_step\n\n for i in range(k):\n end = min(end, m) # prevent array idx out of bounds\n X_train, X_test = np.concatenate(\n [X[0:start], X[end:m]], axis=0), X[start:end]\n y_train, y_test = np.concatenate(\n [y[0:start], y[end:m]], axis=0), y[start:end]\n start += fold_step\n end += fold_step\n\n X_train = preprocessor_object.standardize_save_state(X_train)\n # standardizing X_test with X_train params\n X_test = preprocessor_object.fit(X_test)\n\n X_train = Preprocessing.insert_bias_term(X_train)\n X_test = Preprocessing.insert_bias_term(X_test)\n\n model.fit(X_train, y_train, logging_enabled, model_params)\n cur_train_loss = model.loss(X_train, y_train, lambda_ridge)\n cur_test_loss = model.loss(X_test, y_test, lambda_ridge)\n self.train_loss.append(cur_train_loss)\n self.test_loss.append(cur_test_loss)\n\n if cur_test_loss < min_test_error:\n min_test_error = cur_test_loss\n best_fit_theta = model.theta\n else:\n logging.info(\n f\"Running Sklearn KFoldCrossValidator with {k} folds and lambda {lambda_ridge}\")\n kf = KFold(n_splits=k, random_state=seed, shuffle=True)\n for train_index, test_index in kf.split(X):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n X_train = preprocessor_object.standardize_save_state(X_train)\n # standardizing X_test with X_train params\n X_test = preprocessor_object.fit(X_test)\n\n X_train = Preprocessing.insert_bias_term(X_train)\n X_test = Preprocessing.insert_bias_term(X_test)\n\n model.fit(X_train, y_train, logging_enabled, model_params)\n cur_train_loss = model.loss(X_train, y_train, lambda_ridge)\n cur_test_loss = model.loss(X_test, y_test, lambda_ridge)\n self.train_loss.append(cur_train_loss)\n self.test_loss.append(cur_test_loss)\n\n if cur_test_loss < min_test_error:\n min_test_error = cur_test_loss\n best_fit_theta = model.theta\n self.theta = best_fit_theta\n\n\nif __name__ == \"__main__\":\n X, y = load_breast_cancer(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.40, random_state=42)\n\n preprocessing_object = Preprocessing()\n X_train_std = preprocessing_object.standardize_save_state(X_train)\n X_train_std = preprocessing_object.insert_bias_term(X_train_std)\n\n bcw_model = LogisticRegr()\n model_params = model_parameter(lambda_ridge=0, alpha=0.5, epochs=5000)\n\n bcw_model.fit(X_train_std, y_train, model_params)\n" ]
[ [ "numpy.dot", "numpy.exp", "numpy.mean", "numpy.where", "numpy.concatenate", "numpy.log", "numpy.random.randint", "numpy.ndarray.flatten", "numpy.zeros", "matplotlib.pyplot.title", "numpy.std", "matplotlib.pyplot.style.use", "sklearn.model_selection.KFold", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.show", "numpy.matrix.transpose", "sklearn.datasets.load_breast_cancer", "numpy.random.seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "numpy.ones", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel" ] ]
JunguangJiang/SuperResolution
[ "f11751cffc93b7eed999eba492cd3899783fd442" ]
[ "preprocess/net_interpolation.py" ]
[ "import sys\nimport torch\nfrom collections import OrderedDict\n\"\"\"\npython net_interpolation.py [model1_path] [model2_path] [net_interp_path] [alpha]\ne.g. python net_interpolation.py ../../experiment/baseline_edsr_mse2/model/model_best.pt ../../experiment/esrgan/model/model_best.pt ../../experiment/interpolation/model.pt 0.2\n\n\"\"\"\n\n\ndef net_interpolation(model1_path, model2_path, net_interp_path, alpha):\n \"\"\"\n interpolate the parameters of the model1 and model2\n eg. p1 belongs to model1, p2 belongs to model2\n then (1-alpha)*p1 + alpha*p2 belongs to the interpolated model\n :param model1_path:\n :param model2_path:\n :param net_interp_path:\n :param alpha:\n :return:\n \"\"\"\n net1 = torch.load(model1_path)\n net2 = torch.load(model2_path)\n net_interp = OrderedDict()\n\n for k, v1 in net1.items():\n v2 = net2[k]\n net_interp[k] = (1-alpha) * v1 + alpha * v2\n torch.save(net_interp, net_interp_path)\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 5:\n print(__doc__)\n else:\n net_interpolation(model1_path=sys.argv[1], model2_path=sys.argv[2],\n net_interp_path=sys.argv[3], alpha=sys.argv[4])\n" ]
[ [ "torch.save", "torch.load" ] ]
harvineet/gpytorch
[ "8aa8f1a4298ef61cfea9c4d11c75576a84ffcc3e" ]
[ "gpytorch/utils/interpolation.py" ]
[ "#!/usr/bin/env python3\n\nimport torch\nfrom .broadcasting import _matmul_broadcast_shape\n\n\nclass Interpolation(object):\n \"\"\"\n \"\"\"\n\n def _cubic_interpolation_kernel(self, scaled_grid_dist):\n \"\"\"\n Computes the interpolation kernel u() for points X given the scaled\n grid distances:\n (X-x_{t})/s\n where s is the distance between neighboring grid points. Note that,\n in this context, the word \"kernel\" is not used to mean a covariance\n function as in the rest of the package. For more details, see the\n original paper Keys et al., 1989, equation (4).\n\n scaled_grid_dist should be an n-by-g matrix of distances, where the\n (ij)th element is the distance between the ith data point in X and the\n jth element in the grid.\n\n Note that, although this method ultimately expects a scaled distance matrix,\n it is only intended to be used on single dimensional data.\n \"\"\"\n U = scaled_grid_dist.abs()\n res = torch.zeros(U.size(), dtype=U.dtype, device=U.device)\n\n U_lt_1 = 1 - U.floor().clamp(0, 1) # U, if U < 1, 0 otherwise\n res = res + (((1.5 * U - 2.5).mul(U)).mul(U) + 1) * U_lt_1\n\n # u(s) = -0.5|s|^3 + 2.5|s|^2 - 4|s| + 2 when 1 < |s| < 2\n U_ge_1_le_2 = 1 - U_lt_1 # U, if U <= 1 <= 2, 0 otherwise\n res = res + (((-0.5 * U + 2.5).mul(U) - 4).mul(U) + 2) * U_ge_1_le_2\n return res\n\n def interpolate(self, x_grid, x_target, interp_points=range(-2, 2)):\n # Do some boundary checking\n grid_mins = x_grid.min(0)[0]\n grid_maxs = x_grid.max(0)[0]\n x_target_min = x_target.min(0)[0]\n x_target_max = x_target.min(0)[0]\n lt_min_mask = (x_target_min - grid_mins).lt(-1e-7)\n gt_max_mask = (x_target_max - grid_maxs).gt(1e-7)\n if lt_min_mask.sum().item():\n first_out_of_range = lt_min_mask.nonzero().squeeze(1)[0].item()\n raise RuntimeError(\n (\n \"Received data that was out of bounds for the specified grid. \"\n \"Grid bounds were ({0:.3f}, {0:.3f}), but min = {0:.3f}, \"\n \"max = {0:.3f}\"\n ).format(\n grid_mins[first_out_of_range].item(),\n grid_maxs[first_out_of_range].item(),\n x_target_min[first_out_of_range].item(),\n x_target_max[first_out_of_range].item(),\n )\n )\n if gt_max_mask.sum().item():\n first_out_of_range = gt_max_mask.nonzero().squeeze(1)[0].item()\n raise RuntimeError(\n (\n \"Received data that was out of bounds for the specified grid. \"\n \"Grid bounds were ({0:.3f}, {0:.3f}), but min = {0:.3f}, \"\n \"max = {0:.3f}\"\n ).format(\n grid_mins[first_out_of_range].item(),\n grid_maxs[first_out_of_range].item(),\n x_target_min[first_out_of_range].item(),\n x_target_max[first_out_of_range].item(),\n )\n )\n\n # Now do interpolation\n interp_points = torch.tensor(interp_points, dtype=x_grid.dtype, device=x_grid.device)\n interp_points_flip = interp_points.flip(0)\n\n num_grid_points = x_grid.size(0)\n num_target_points = x_target.size(0)\n num_dim = x_target.size(-1)\n num_coefficients = len(interp_points)\n\n interp_values = torch.ones(\n num_target_points, num_coefficients ** num_dim, dtype=x_grid.dtype, device=x_grid.device\n )\n interp_indices = torch.zeros(\n num_target_points, num_coefficients ** num_dim, dtype=torch.long, device=x_grid.device\n )\n\n for i in range(num_dim):\n grid_delta = x_grid[1, i] - x_grid[0, i]\n lower_grid_pt_idxs = torch.floor((x_target[:, i] - x_grid[0, i]) / grid_delta).squeeze()\n lower_pt_rel_dists = (x_target[:, i] - x_grid[0, i]) / grid_delta - lower_grid_pt_idxs\n lower_grid_pt_idxs = lower_grid_pt_idxs - interp_points.max()\n lower_grid_pt_idxs.detach_()\n\n if len(lower_grid_pt_idxs.shape) == 0:\n lower_grid_pt_idxs = lower_grid_pt_idxs.unsqueeze(0)\n\n scaled_dist = lower_pt_rel_dists.unsqueeze(-1) + interp_points_flip.unsqueeze(-2)\n dim_interp_values = self._cubic_interpolation_kernel(scaled_dist)\n\n # Find points who's closest lower grid point is the first grid point\n # This corresponds to a boundary condition that we must fix manually.\n left_boundary_pts = torch.nonzero(lower_grid_pt_idxs < 1)\n num_left = len(left_boundary_pts)\n\n if num_left > 0:\n left_boundary_pts.squeeze_(1)\n x_grid_first = x_grid[:num_coefficients, i].unsqueeze(1).t().expand(num_left, num_coefficients)\n\n grid_targets = x_target.select(1, i)[left_boundary_pts].unsqueeze(1).expand(num_left, num_coefficients)\n dists = torch.abs(x_grid_first - grid_targets)\n closest_from_first = torch.min(dists, 1)[1]\n\n for j in range(num_left):\n dim_interp_values[left_boundary_pts[j], :] = 0\n dim_interp_values[left_boundary_pts[j], closest_from_first[j]] = 1\n lower_grid_pt_idxs[left_boundary_pts[j]] = 0\n\n right_boundary_pts = torch.nonzero(lower_grid_pt_idxs > num_grid_points - num_coefficients)\n num_right = len(right_boundary_pts)\n\n if num_right > 0:\n right_boundary_pts.squeeze_(1)\n x_grid_last = x_grid[-num_coefficients:, i].unsqueeze(1).t().expand(num_right, num_coefficients)\n\n grid_targets = x_target.select(1, i)[right_boundary_pts].unsqueeze(1)\n grid_targets = grid_targets.expand(num_right, num_coefficients)\n dists = torch.abs(x_grid_last - grid_targets)\n closest_from_last = torch.min(dists, 1)[1]\n\n for j in range(num_right):\n dim_interp_values[right_boundary_pts[j], :] = 0\n dim_interp_values[right_boundary_pts[j], closest_from_last[j]] = 1\n lower_grid_pt_idxs[right_boundary_pts[j]] = num_grid_points - num_coefficients\n\n offset = (interp_points - interp_points.min()).long().unsqueeze(-2)\n dim_interp_indices = lower_grid_pt_idxs.long().unsqueeze(-1) + offset\n\n n_inner_repeat = num_coefficients ** i\n n_outer_repeat = num_coefficients ** (num_dim - i - 1)\n index_coeff = num_grid_points ** (num_dim - i - 1)\n dim_interp_indices = dim_interp_indices.unsqueeze(-1).repeat(1, n_inner_repeat, n_outer_repeat)\n dim_interp_values = dim_interp_values.unsqueeze(-1).repeat(1, n_inner_repeat, n_outer_repeat)\n interp_indices = interp_indices.add(dim_interp_indices.view(num_target_points, -1).mul(index_coeff))\n interp_values = interp_values.mul(dim_interp_values.view(num_target_points, -1))\n\n return interp_indices, interp_values\n\n\ndef left_interp(interp_indices, interp_values, rhs):\n \"\"\"\n \"\"\"\n is_vector = rhs.ndimension() == 1\n\n if is_vector:\n res = rhs.index_select(0, interp_indices.view(-1)).view(*interp_values.size())\n res = res.mul(interp_values)\n res = res.sum(-1)\n return res\n\n else:\n num_rows, num_interp = interp_indices.shape[-2:]\n num_data, num_columns = rhs.shape[-2:]\n interp_shape = torch.Size((*interp_indices.shape[:-1], num_data))\n output_shape = _matmul_broadcast_shape(interp_shape, rhs.shape)\n batch_shape = output_shape[:-2]\n\n interp_indices_expanded = interp_indices.unsqueeze(-1).expand(*batch_shape, num_rows, num_interp, num_columns)\n interp_values_expanded = interp_values.unsqueeze(-1).expand(*batch_shape, num_rows, num_interp, num_columns)\n rhs_expanded = rhs.unsqueeze(-2).expand(*batch_shape, num_data, num_interp, num_columns)\n res = rhs_expanded.gather(-3, interp_indices_expanded).mul(interp_values_expanded)\n return res.sum(-2)\n\n\ndef left_t_interp(interp_indices, interp_values, rhs, output_dim):\n \"\"\"\n \"\"\"\n from .. import dsmm\n\n is_vector = rhs.ndimension() == 1\n if is_vector:\n rhs = rhs.unsqueeze(-1)\n\n # Multiply the rhs by the interp_values\n # This multiplication here will give us the ability to perform backprop\n values = (rhs.unsqueeze(-2) * interp_values.unsqueeze(-1))\n\n # Define a bunch of sizes\n num_data, num_interp = interp_values.shape[-2:]\n num_cols = rhs.size(-1)\n interp_shape = torch.Size((*interp_indices.shape[:-2], output_dim, num_data))\n output_shape = _matmul_broadcast_shape(interp_shape, rhs.shape)\n batch_shape = output_shape[:-2]\n batch_size = batch_shape.numel()\n\n # Using interp_indices, create a sparse matrix that will sum up the values\n interp_indices = interp_indices.expand(*batch_shape, *interp_indices.shape[-2:]).contiguous()\n batch_indices = torch.arange(0, batch_size, dtype=torch.long, device=values.device).unsqueeze_(1)\n batch_indices = batch_indices.repeat(1, num_data * num_interp)\n column_indices = torch.arange(0, num_data * num_interp, dtype=torch.long, device=values.device).unsqueeze_(1)\n column_indices = column_indices.repeat(batch_size, 1)\n summing_matrix_indices = torch.stack([batch_indices.view(-1), interp_indices.view(-1), column_indices.view(-1)], 0)\n summing_matrix_values = torch.ones(\n batch_size * num_data * num_interp, dtype=interp_values.dtype, device=interp_values.device\n )\n size = torch.Size((batch_size, output_dim, num_data * num_interp))\n type_name = summing_matrix_values.type().split(\".\")[-1] # e.g. FloatTensor\n if interp_values.is_cuda:\n cls = getattr(torch.cuda.sparse, type_name)\n else:\n cls = getattr(torch.sparse, type_name)\n summing_matrix = cls(summing_matrix_indices, summing_matrix_values, size)\n\n # Sum up the values appropriately by performing sparse matrix multiplication\n values = values.view(batch_size, num_data * num_interp, num_cols)\n res = dsmm(summing_matrix, values)\n\n res = res.view(*batch_shape, *res.shape[-2:])\n if is_vector:\n res = res.squeeze(-1)\n return res\n" ]
[ [ "torch.Size", "torch.zeros", "torch.nonzero", "torch.min", "torch.arange", "torch.ones", "torch.abs", "torch.tensor", "torch.floor" ] ]
gleefe1995/KP2D
[ "adfb5a71e0e894f1b892a76d2720088a6d2e3db4" ]
[ "kp2d/utils/logging.py" ]
[ "# Copyright 2020 Toyota Research Institute. All rights reserved.\n\n\"\"\"Logging utilities for training\n\"\"\"\nimport os\n\nfrom termcolor import colored\nimport horovod.torch as hvd\nimport numpy as np\nimport torch\n\nfrom kp2d.utils.wandb import WandBLogger\n\n\ndef printcolor_single(message, color=\"white\"):\n \"\"\"Print a message in a certain color\"\"\"\n print(colored(message, color))\n\n\ndef printcolor(message, color=\"white\"):\n \"Print a message in a certain color (only rank 0)\"\n if hvd.rank() == 0:\n print(colored(message, color))\n\n\nclass SummaryWriter:\n \"\"\"Wrapper class for tensorboard and WandB logging\"\"\"\n def __init__(self, log_path, params,\n description=None,\n project='monodepth',\n entity='tri',\n mode='run',\n job_type='train',\n log_wb=True):\n self.log_wb = log_wb\n self._global_step = 0 \n if self.log_wb:\n os.environ['WANDB_DIR'] = log_path\n self.wb_logger = WandBLogger(\n params, description=description,\n project=project, entity=entity, mode=mode, job_type=job_type)\n\n @property\n def run_name(self):\n return self.wb_logger.run_name\n\n @property\n def run_url(self):\n return self.wb_logger.run_url\n\n @property\n def global_step(self):\n return self._global_step\n\n def log_wandb(self, value):\n self.log_wb = value\n\n def add_scalar(self, tag, scalar_value):\n if self.log_wb:\n self.wb_logger.log_values(tag, scalar_value, now=False)\n\n def add_image(self, tag, img_tensor):\n assert img_tensor.max() <= 1.0\n assert (isinstance(img_tensor, torch.Tensor) and img_tensor.device == torch.device(\n 'cpu')) or isinstance(img_tensor, np.ndarray)\n if self.log_wb:\n caption = tag\n if isinstance(img_tensor, torch.Tensor):\n # shape: (C, H, W)\n size = tuple(img_tensor.shape[-2:][::-1])\n assert img_tensor.shape[0] == 1 or img_tensor.shape[0] == 3, \\\n 'Expects CHW with C=1 or 3, provided {}'.format(img_tensor.shape)\n self.wb_logger.log_tensor_image(img_tensor * 255, tag, caption, size=size, now=False)\n else:\n # shape: (H, W, C)\n size = tuple(img_tensor.shape[:2][::-1])\n assert img_tensor.shape[-1] == 1 or img_tensor.shape[-1] == 3, \\\n 'Expects HWC with C=1 or 3, provided {}'.format(img_tensor.shape)\n self.wb_logger.log_numpy_image((img_tensor * 255).astype(np.uint8), tag, caption, size=size, now=False)\n\n def commit_log(self):\n if self.log_wb and self._global_step >= 0:\n self.wb_logger.commit_log()\n self._global_step += 1" ]
[ [ "torch.device" ] ]
AleksCipri/IMNN
[ "57dd6225205a65b436931df6cee78cf1de199bcb" ]
[ "IMNN/IMNN.py" ]
[ "\"\"\"Information maximising neural network\nThis module provides the methods necessary to build and train an information\nmaximising neural network to optimally compress data down to the number of\nmodel parameters.\n\nTODO\n____\nStill some docstrings which need finishing\nSequential training for large data\nUse precomputed external covariance and derivatives\n\"\"\"\n\n\n__version__ = '0.2a3'\n__author__ = \"Tom Charnock\"\n\n\nimport tensorflow as tf\nimport tqdm\nfrom IMNN.utils import utils\n\n\nclass IMNN():\n \"\"\"Information maximising neural network\n The information maximising neural network class contains all the functions\n necessary to train a neural network to maximise the Fisher information of\n the summaries of the input data.\n Attributes\n __________\n u : class\n model of functions for parameter error checking\n dtype : TF type\n 32 bit or 64 TensorFlow tensor floats\n itype : TF type\n 32 bit or 64 TensorFlow tensor integers\n save : bool\n whether to save the model\n filename : str\n directory to save the model\n n_params : int\n number of parameters in physical model\n n_summaries : int\n number of summaries to compress data to\n n_s : int\n number of simulations to calculate summary covariance\n n_p : int\n number of derivatives simulations to calculate derivative of mean\n single_dataset : bool\n whether multiple datasets are needed for fiducial and derivative sims\n numerical : bool\n whether numerical derivative is used for training data\n test_numerical : bool {None}\n whether numerical derivative is used for validation data\n use_external : bool\n whether external summaries are used for training data\n test_use_external : bool {None}\n whether external summaries are used for validation data\n dΔμ_dx : TF tensor float (n_s, n_s, n_summaries, n_summaries)\n derivative of the mean training summaries with respect to the summaries\n n_st : TF tensor float (1, )\n number of simulations to calculate summary covariance as tensor float\n n_sm1 : TF tensor float (1, )\n 1 mines number of simulations to calculate unbiased summary covariance\n model : TF model - keras or other\n neural network to do the compression defined using TF or keras\n optimiser : TF optimiser - keras or other\n optimisation operation to do weight updates, defined using TF or keras\n θ_fid : TF tensor float (n_params,)\n fiducial parameter values for training dataset\n test_θ_fid : TF tensor float {None} (n_params,)\n fiducial parameter values for validation dataset\n δθ : TF tensor float {None} (n_params,)\n parameter differences for numerical derivatives\n d2μ_dθdx : TF tensor (n_d, n_params, n_summaries, n_summaries, n_params)\n derivative of mean summaries with respect to the numerical summaries\n test_δθ : TF tensor float {None} (n_params,)\n parameter differences for numerical derivatives of validation dataset\n dataset : TF dataset\n TF dataset for data input\n test_dataset : TF dataset {None}\n TF dataset for validation data input\n derivative_dataset : TF dataset {None}\n TF dataset for numerical derivative simulations\n test_derivative_dataset : TF dataset {None}\n TF dataset for numerical derivative validation simulations\n F : TF tensor float (1 ,)\n collector for determinant of Fisher matrix\n C : TF tensor float (1 ,)\n collector for determinant of covariance of summaries\n Cinv : TF tensor float (1 ,)\n collector for determinant of inverse covariance of summaries\n dμ_dθ : TF tensor float [n_params, n_summaries]\n collector for determinant of derivative of mean summaries wrt params\n reg : TF tensor float (1 ,)\n collector for value of regulariser\n r : TF tensor float (1 ,)\n collector for value of coupling strength of the regulariser\n MLE_F : TF tensor float {None} (n_params, n_params)\n Fisher information matrix for using in ABC\n MLE_Finv : TF tensor float {None} (n_params, n_params)\n inverse Fisher information matrix for calculating MLE\n MLE_Cinv : TF tensor float {None} (n_summaries, n_summaries)\n inverse covariance matrix of summaries for calculating MLE\n MLE_dμ_dθ : TF tensor float {None} (n_params, n_summaries)\n derivative of the mean summaries with respect to parameters for MLE\n MLE_μ : TF tensor float {None} (n_summaries,)\n mean summary value for calculating MLE\n MLE_θ_fid : None (n_params,)\n fiducial parameter values for calculating MLE\n history : dict\n history object for saving training statistics.\n \"\"\"\n def __init__(self, n_params, n_summaries, n_covariance_sims,\n n_derivative_sims, fast_train=True, dtype=tf.float32, \n save=False, filename=None, verbose=True):\n \"\"\"Initialises attributes and calculates useful constants\n\n Parameters\n __________\n n_params : int\n number of parameters in physical model\n n_summaries : int\n number of summaries to compress data to\n n_covariance_sims : int\n number of simulations to calculate summary covariance\n n_derivative_sims : int\n number of derivatives simulations to calculate derivative of mean\n fast_train : bool\n whether to train using entire graph at once or a vectorised loop\n dtype : TF type\n 32 bit or 64 TensorFlow tensor floats (default tf.float32)\n save : bool\n whether to save the model\n filename : str\n name for saving the model\n verbose : bool\n whether to use verbose outputs in error checking module\n\n Calls\n _____\n initialise_attributes(int, int, int, int, tf.dtype, bool, str)\n Initialises all attributes and sets necessary constants\n \"\"\"\n self.u = utils.utils(verbose=verbose)\n self.initialise_attributes(n_params, n_summaries, n_covariance_sims,\n n_derivative_sims, fast_train, dtype, save, \n filename)\n\n def initialise_attributes(self, n_params, n_summaries, n_covariance_sims,\n n_derivative_sims, fast_train, dtype, save, \n filename):\n \"\"\"Initialises all attributes and sets necessary constants\n\n All attributes are set to None before they are loaded when\n necessary. The number of parameters and summaries are set and\n the number of simulations needed for the covariance and derivatives\n of the mean summaries is set.\n\n Parameters\n __________\n n_params : int\n number of parameters in physical model\n n_summaries : int\n number of summaries to compress data to\n n_covariance_sims : int\n number of simulations to calculate summary covariance\n n_covariance_sims : int\n number of derivatives simulations to calculate derivative of mean\n fast_train : bool\n whether to train using entire graph at once or a vectorised loop\n dtype : TF type\n 32 bit or 64 TensorFlow tensor floats (default tf.float32)\n save : bool\n whether to save the model\n filename : str\n name for saving the model\n\n Calls\n _____\n IMNN.utils.utils.positive_integer(int, str) -> int\n checks whether parameter is positive integer and error otherwise\n IMNN.utils.utils.check_num_datasets(int, int) -> bool\n checks whether to use a single dataset for derivatives and data\n initialise_history()\n sets up dictionary of lists for collecting training diagnostics\n load_useful_constants()\n makes TF tensors for necessary objects which can be precomputed\n \"\"\"\n if dtype == tf.float64:\n self.dtype = tf.float32\n self.itype = tf.int64\n else:\n self.dtype = tf.float32\n self.itype = tf.int32\n \n self.save = save\n if self.save:\n if filename is None:\n self.filename = \"model\"\n else:\n self.filename = str(filename)\n else:\n self.filename = None\n\n if fast_train:\n self.trainer = self.fast_train\n else:\n self.trainer = self.loop_train\n self.n_params = self.u.positive_integer(n_params, \"n_params\")\n self.n_summaries = self.u.positive_integer(n_summaries, \"n_summaries\")\n self.n_s = self.u.positive_integer(\n n_covariance_sims, \"n_covariance_sims\")\n self.n_d = self.u.positive_integer(\n n_derivative_sims, \"n_derivative_sims\")\n self.single_dataset = self.u.check_num_datasets(self.n_s, self.n_d)\n\n self.numerical = None\n self.test_numerical = None\n self.use_external = None\n self.test_use_external = None\n # self.sims_at_once = None\n # self.loop_sims = None\n\n self.dΔμ_dx = None\n self.n_st = None\n self.n_sm1 = None\n self.identity = None\n\n self.model = None\n self.optimiser = None\n\n self.θ_fid = None\n self.test_θ_fid = None\n self.δθ = None\n self.d2μ_dθdx = None\n self.test_δθ = None\n self.dataset = None\n self.test_dataset = None\n self.derivative_dataset = None\n self.test_derivative_dataset = None\n # self.indices = None\n # self.derivative_indices = None\n\n self.MLE_F = None\n self.MLE_Finv = None\n self.MLE_Cinv = None\n self.MLE_dμ_dθ = None\n self.MLE_μ = None\n self.MLE_θ_fid = None\n\n self.initialise_history()\n self.load_useful_constants()\n\n def initialise_history(self):\n \"\"\"Sets up dictionary of lists for collecting training diagnostics\n\n Dictionary of all diagnostics which can be collected during training.\n These are:\n det_F - determinant of Fisher information\n val_det_F - determinant of Fisher information from validation\n det_C - determinant of covariance of summaries\n val_det_C - determinant of covariance of validation summaries\n det_Cinv - determinant of inverse covariance of summaries\n val_det_Cinv - det of inverse covariance of validation summaries\n dμ_dθ - derivative of mean summaries wrt model parameters\n val_dμ_dθ - derivative of mean validation summaries wrt parameters\n reg - value of the regularisation term\n val_reg - value of the regularisation term from validation set\n r - value of the coupling strength of the regulariser\n val_r - value of validation coupling strength of the regulariser\n \"\"\"\n self.history = {\n \"det_F\": [],\n \"val_det_F\": [],\n \"det_C\": [],\n \"val_det_C\": [],\n \"det_Cinv\": [],\n \"val_det_Cinv\": [],\n \"dμ_dθ\": [],\n \"val_dμ_dθ\": [],\n \"reg\": [],\n \"val_reg\": [],\n \"r\": [],\n \"val_r\": []\n }\n\n def load_useful_constants(self):\n \"\"\"Makes TF tensors for necessary objects which can be precomputed\n\n Sets up the loop variable tensors for training and validation and\n calculates the derivative of the mean summaries with respect to outputs\n which can be precomputed. Also makes tensor float version of number of\n simulations for summary covariance and this value minus 1 for unbiased\n covariance calculation. The identity matrix is also defined.\n\n Calls\n _____\n get_dΔμ_dx() -> TF tensor float (n_s, n_s, n_summaries, n_summaries)\n derivative of the mean training summaries wrt the summaries\n get_n_s_minus_1() -> TF tensor float (1, )\n subtracts 1 from the value of the number of sims for covariance\n \"\"\"\n self.F = tf.Variable(0., dtype=self.dtype)\n self.C = tf.Variable(0., dtype=self.dtype)\n self.Cinv = tf.Variable(0., dtype=self.dtype)\n self.dμ_dθ = tf.zeros((self.n_params, self.n_summaries),\n dtype=self.dtype)\n self.reg = tf.Variable(0., dtype=self.dtype)\n self.r = tf.Variable(0., dtype=self.dtype)\n\n self.dΔμ_dx = self.get_dΔμ_dx()\n self.n_st = tf.Variable(self.n_s, dtype=self.dtype, name=\"n_s\")\n self.n_sm1 = self.get_n_s_minus_1()\n self.identity = tf.eye(self.n_summaries)\n\n def get_dΔμ_dx(self):\n \"\"\"Builds derivative of the mean training summaries wrt the summaries\n\n The difference between the derivative of the mean of summaries wrt the\n summaries and the derivative of the summaries wrt the summaries can be\n calculated by building the Kronecker delta which is\n $\\\\delta_{ab}\\\\delta_{ij} = \\\\frac{\\\\partial x_a^i}{\\\\partial x_b^j}$\n where a and b label the summary and i and j label the simulation.\n We then take the difference between this and its mean across the i\n simulations. This is needed for calculating the derivative of the\n covariance with respect to the summaries for the backpropagation.\n\n Returns\n _______\n TF tensor float (n_s, n_s, n_summaries, n_summaries)\n derivative of the mean training summaries wrt the summaries\n \"\"\"\n dx_dx = tf.einsum(\n \"ij,kl->ijkl\",\n tf.eye(self.n_s, self.n_s),\n tf.eye(self.n_summaries, self.n_summaries),\n name=\"derivative_of_summaries_wrt_summaries\")\n dμ_dx = tf.reduce_mean(dx_dx, axis=0, keepdims=True,\n name=\"derivative_of_mean_x_wrt_x\")\n return tf.subtract(dx_dx, dμ_dx,\n name=\"derivative_of_diff_mean_x_wrt_x\")\n\n def get_n_s_minus_1(self):\n \"\"\"Subtracts 1 from the value of the number of sims for covariance\n\n Returns\n _______\n TF tensor float (1,)\n (n_s - 1) as a float tensor\n \"\"\"\n return tf.subtract(\n tf.cast(\n self.n_s,\n self.dtype,\n name=\"number_of_simulations_float\"),\n 1.,\n name=\"number_of_simulations_minus_1\")\n\n def get_d2μ_dθdx(self, δθ):\n \"\"\"Calculate derivative of mean summaries wrt the numerical summaries\n\n The derivative of the mean summaries with respect to the summaries of\n the simulations for the numerical derivatives can be calculated\n knowning only the width of the parameter values using\n $\\\\frac{\\\\partial^2\\\\mu_\\\\mathscr{f}}{\\\\partial x\\\\partial\\\\theta} =\n \\\\frac{1}{2\\\\delta\\\\theta_\\\\alpha n_d}\\\\sum_i\\\\delta_{ab}\n \\\\delta_{\\\\alpha\\\\beta}\\\\delta_{ij}$\n where\n $\\\\delta_{ab}\\\\delta_{\\\\alpha\\\\beta}\\\\delta_{ij} =\n \\\\frac{\\\\partial x^{i\\\\alpha}_a}{\\\\partial x^{j\\\\beta}}_b}$\n in which a and b label the summary, $\\\\alpha$ and $\\\\beta$ label the\n which parameter the numerical derivative is with respect to and i and j\n label the simulation.\n\n Parameters\n __________\n δθ : TF tensor float (n_params,)\n parameter differences for numerical derivatives\n\n Returns\n _______\n TF tensor float (n_d, n_params, n_summaries, n_summaries, n_params)\n derivative of mean summaries wrt the numerical summaries\n \"\"\"\n dxa_dxb = tf.einsum(\n \"ij,kl,mn->ijklmn\",\n tf.eye(self.n_d, self.n_d),\n tf.eye(self.n_params, self.n_params),\n tf.eye(self.n_summaries, self.n_summaries),\n name=\"derivative_of_x_wrt_x_for_derivatives\")\n return tf.reduce_mean(\n tf.einsum(\n \"ijklmn,l->ijkmnl\",\n dxa_dxb,\n δθ,\n name=\"derivative_of_x_wrt_x_and_parameters\"),\n axis=0,\n name=\"derivative_of_mean_x_wrt_x_and_parameters\")\n\n def set_model(self, model, optimiser):\n \"\"\"Loads functional neural network and optimiser as attributes\n\n Parameters\n __________\n model : TF model (keras or other)\n neural network to do the compression defined using TF or keras\n optimiser : TF optimiser (keras or other)\n optimisation operation to do weight updates using TF or keras\n\n Calls\n _____\n check_model(int, int)\n prints warning that model should be correct size (might upgrade to\n real model checking)\n \"\"\"\n self.u.check_model(self.n_params, self.n_summaries)\n self.model = model\n self.optimiser = optimiser\n if self.save:\n self.model.save(self.filename)\n \n def load_model(self, optimiser, weights=None):\n \"\"\"Reloads a saved model\n \n Parameters\n __________\n optimiser : TF optimiser (keras or other)\n optimisation operation to do weight updates using TF or keras\n weights : str\n filename for saving weights\n \"\"\"\n self.model = tf.keras.models.load_model(self.filename)\n self.optimiser = optimiser\n if weights is not None:\n self.model.load_weights(self.filename + \"/\" + weights + \".h5\")\n \n\n def load_fiducial(self, θ_fid, train):\n \"\"\"Loads the fiducial parameters into a TF tensor\n\n Checks that the fiducial parameter values are of the correct shape\n and makes a tensor from it. This is then loaded into the training or\n validation attribute.\n\n Parameters\n __________\n θ_fid : np.ndarray (n_params)\n fiducial parameter values\n train : bool\n whether fiducial parameter values are for validation or training\n\n Calls\n _____\n IMNN.utils.utils.fiducial_check(np.ndarray, int)\n checks the size of the fiducial parameters array is correct\n \"\"\"\n self.u.fiducial_check(θ_fid, self.n_params)\n if train:\n self.θ_fid = tf.Variable(θ_fid, dtype=self.dtype, trainable=False,\n name=\"fiducial\")\n else:\n self.test_θ_fid = tf.Variable(θ_fid, dtype=self.dtype,\n trainable=False,\n name=\"test_fiducial\")\n\n def check_derivative(self, dd_dθ, δθ, train):\n \"\"\"Checks whether numerical derivative is used\n\n Checks whether the numerical parameter difference is defined, and if it\n is then sets the attribute to use numerical derivatives. If doing\n numerical derivatives then the parameter widths are loaded as tensors\n and the derivative of the mean of the numerical derivative summaries\n with respect to the summaries is calculated.\n\n TODO\n ____\n Should check that the shape of the derivatives is correct if δθ is\n passed. If δθ is passed then numpy array of derivative simulations\n should be be (n_d, 2, n_params) + input_shape where the first element\n of the second axis is the lower derivatives and the second element is\n the upper derivatives.\n\n Parameters\n __________\n dd_dθ : np.ndarray (n_s, n_params, ...) or (n_d, 2, n_params, ...)\n array of the derivatives of the simulation (could be numerical)\n δθ : np.ndarray (n_params,) {None}\n parameter differences for numerical derivatives if using\n train : bool\n whether fiducial parameter values are for validation or training\n\n Returns\n _______\n bool\n whether numerical derivatives should be done\n\n Calls\n _____\n IMNN.utils.utils.bool_none(any) -> bool\n checks whether input exists\n IMNN.utils.utils.delta_check(np.ndarray, int)\n checks wether parameter differences has correct shape (n_params,)\n get_d2μ_dθdx(TF tensor float) -> TF tensor float\n (n_d, n_params, n_summaries, n_summaries, n_params)\n derivative of mean summaries wrt the numerical summaries\n \"\"\"\n numerical = self.u.bool_none(δθ)\n if numerical:\n self.u.delta_check(δθ, self.n_params)\n δθ_tensor = tf.Variable(\n 1. / (2. * δθ), dtype=self.dtype, trainable=False,\n name=\"delta_theta\")\n if train:\n self.numerical = numerical\n if numerical:\n self.δθ = δθ_tensor\n self.d2μ_dθdx = self.get_d2μ_dθdx(self.δθ)\n else:\n self.test_δθ = δθ_tensor\n self.test_numerical = numerical\n return numerical\n\n # def to_loop_sims(self, sims_at_once, train):\n # loop_sims = bool_none(sims_at_once)\n # if train:\n # self.loop_sims = loop_sims\n # else:\n # self.test_loop_sims = loop_sims\n # return loop_sims\n\n def use_external_summaries(self, external_summaries, train):\n \"\"\"Checks whether external summaries are used\n\n It is possible to use external summaries as informative summaries\n alongside the IMNN summaries. This function checks whether these\n summaries have been passed.\n\n Parameters\n __________\n external_summaries : np.ndarray (n_s, n_external_summaries)\n array of other informative summaries to be included\n train : bool\n whether fiducial parameter values are for validation or training\n\n Returns\n _______\n bool\n whether external summaries should be used\n\n Calls\n _____\n IMNN.utils.utils.bool_none(any) -> bool\n checks whether input exists\n \"\"\"\n use_external = self.u.bool_none(external_summaries)\n if train:\n self.use_external = use_external\n else:\n self.test_use_external = use_external\n return use_external\n\n def build_dataset(self, data, batchsize=None, shufflesize=None):\n \"\"\"Create tensorflow dataset and split into batches and shuffle\n\n Parameters\n __________\n data : np.ndarray or (np.ndarray, ...)\n the data to be placed into the tensorflow dataset\n batchsize : int\n the size of the batch for calculating covariance or mean derivative\n shufflesize : int\n how many simulations should be shuffled (should be size of data[0])\n\n Returns\n _______\n dataset : TF dataset\n the tensorflow dataset containing the data\n \"\"\"\n dataset = tf.data.Dataset.from_tensor_slices(data)\n if batchsize is not None:\n dataset = dataset.batch(batchsize)\n elif shufflesize is not None:\n dataset = dataset.shuffle(shufflesize)\n return dataset\n\n def setup_dataset(self, θ_fid, d, dd_dθ, δθ=None, external_summaries=None,\n external_derivatives=None, # sims_at_once=None,\n train=True):\n \"\"\"Builds TF datasets for training or validation sets\n\n By passing data to the function all options for training and validation\n are set, including batch sizing, whether to do numerical derivatives\n whether external summaries should be included. Once constructed the\n datasets are set as module attributes.\n\n TODO\n ____\n For very large simulations we would need to compute the summaries a\n few at a time and collect the derivatives and summaries to compute the\n Fisher information and backpropagation sequentially. This would take\n a parameter sims_at_once to say how many simulations could be processed\n at once and would set the loop_sims attribute. This is mostly\n implemented but is commented out.\n It would be possible to have external covariances and derivatives\n precomputed and therefore not need to be computed on every iteration.\n This would save computation, especially when the dimension of the\n external summaries is large. In this case we would only need to\n calculate the covariance of the set of external summaries with the IMNN\n summaries Cov[{s}, {x}] and never Cov[{s}, {s}]. The values of\n Cov[{s}, {x}] could just be appended to the outer block of the\n covariance matrix which would be cheaper. The numerical derivative of\n the external summaries could also be precomputed and just appended.\n\n Parameters\n __________\n θ_fid : np.ndarray (n_params,)\n parameter differences for numerical derivatives\n d : np.ndarray (n_s,) + input_shape\n simulations at fiducial parameter values for calculating covariance\n dd_dθ : np.ndarray (n_s, n_params, ...) or (n_d, 2, n_params, ...)\n array of the derivatives of the simulation (could be numerical)\n δθ : np.ndarray (n_params,) {None}\n parameter differences for numerical derivatives if using\n external_summaries : np.ndarray (n_s, n_external_summaries) {None}\n set of informative summaries for each simulation\n external_derivatives : np.ndarray (n_s, n_params, n_external_summaries)\n or (n_s, 2, n_params, n_external_summaries)\n derivative of the informative summaries wrt model parameters\n train : bool\n whether fiducial parameter values are for validation or training\n\n Calls\n _____\n load_fiducial(np.ndarray, bool)\n loads the fiducial parameters into a TF tensor\n use_external_summaries(np.ndarray, np.ndarray, bool) -> bool\n checks whether external summaries are used\n IMNN.utils.utils.batch_warning(int, int, bool) -> int\n checks whether the batchsize is valid given input data\n IMNN.utils.utils.size_check(int, int, str, str)\n checks wether two datasets have compatible sizes\n IMNN.util.utils.numericial_size_check(int, int, bool)\n checks whether the simulations for numercial derivatives is correct\n build_dataset(np.ndarray, {int}, {int}) -> TF dataset\n Create tensorflow dataset and split into batches and shuffle\n \"\"\"\n\n self.load_fiducial(θ_fid, train)\n numerical = self.check_derivative(dd_dθ, δθ, train)\n # loop_sims = self.to_loop_sims(sims_at_once, train)\n use_external = self.use_external_summaries(external_summaries, train)\n n_batch = self.u.batch_warning(d.shape[0], self.n_s, train)\n\n if (not self.single_dataset) and (not use_external):\n data = d\n else:\n data = (d,)\n\n if self.single_dataset:\n self.u.size_check(dd_dθ.shape[0], d.shape[0], \"dd_dθ\", \"d\")\n n_d_batch = n_batch\n data += (dd_dθ,)\n else:\n if numerical:\n n_d_batch = self.u.batch_warning(dd_dθ.shape[0], self.n_d,\n train, derivative=True)\n else:\n self.u.numerical_size_check(dd_dθ.shape[0], d.shape[0],\n numerical)\n n_d_batch = n_batch\n d_data = dd_dθ\n\n if use_external:\n self.u.size_check(external_summaries.shape[0], d.shape[0],\n \"external_summaries\", \"d\")\n data += (external_summaries,)\n if self.single_dataset:\n self.u.size_check(external_derivatives.shape[0], d.shape[0],\n \"external_derivatives\", \"d\")\n data += (external_derivatives,)\n else:\n if numerical:\n self.u.size_check(external_derivatives.shape[0],\n dd_dθ.shape[0], \"external_derivatives\",\n \"dd_dθ\")\n d_data = (d_data, external_derivatives)\n\n dataset = self.build_dataset(data, batchsize=self.n_s,\n shufflesize=self.n_s * n_batch)\n if not self.single_dataset:\n d_dataset = self.build_dataset(d_data, batchsize=self.n_d,\n shufflesize=self.n_d * n_d_batch)\n # if loop_sims:\n # def loop_batch(*x):\n # return self.loop_batch(sims_at_once, *x)\n # ind = tf.expand_dims(tf.range(self.n_s, dtype=self.itype), 1)\n # indices = self.build_dataset(ind, batchsize=sims_at_once)\n # if self.single_dataset:\n # dataset = dataset.map(loop_batch)\n # else:\n # d_ind = tf.expand_dims(tf.range(self.n_d,\n # dtype=self.itype), 1)\n # d_indices = self.build_dataset(d_ind, batchsize=sims_at_once)\n # if use_external:\n # dataset = dataset.map(loop_batch)\n # d_dataset = dataset.map(loop_batch)\n # else:\n # dataset = dataset.map(\n # lambda x: (tf.data.Dataset.from_tensor_slices(x)\n # .batch(sims_at_once).repeat(2)))\n # d_dataset = d_dataset.map(\n # lambda x: (tf.data.Dataset.from_tensor_slices(x)\n # .batch(sims_at_once).repeat(2)))\n\n if train:\n self.dataset = dataset\n # if loop_sims:\n # self.indices = indices\n if not self.single_dataset:\n self.derivative_dataset = d_dataset\n # if loop_sims:\n # self.derivative_indices = d_indices\n else:\n self.test_dataset = dataset\n # if loop_sims:\n # self.test_indices = indices\n if not self.single_dataset:\n self.test_derivative_dataset = d_dataset\n # if loop_sims:\n # self.test_derivative_indices = d_indices\n\n # def loop_batch(self, sims_at_once, *x):\n # new_batch = tuple()\n # for i in range(len(x)):\n # new_batch += (tf.data.Dataset.from_tensor_slices(x[i])\n # .batch(self.sims_at_once)\n # .repeat(2),)\n # return new_batch\n\n def fit(self, n_iterations, reset=False, validate=False, patience=None,\n checkpoint=None, min_iterations=None):\n \"\"\"Fitting routine for IMNN\n\n Can reset model if training goes awry and clear diagnostics.\n Diagnostics are collected after one whole pass through the data.\n Validation can also be done if validation set is defined.\n\n Parameters\n __________\n n_iterations : int\n number of complete passes through the data\n reset : bool\n whether to reset weights of the model and clear diagnostic values\n validate : bool\n whether to validate the model using preloaded dataset (not checked)\n patience : int\n number of iterations to check for early stopping\n checkpoint : int\n number of iterations at which to checkpoint\n min_iterations : int\n number of initial iterations before using patience\n\n Calls\n _____\n initialise_history()\n sets up dictionary of lists for collecting training diagnostics\n IMNN.utils.utils.isnotebook()\n checks whether IMNN being trained in jupyter notebook\n loop_train(tensor, tensor, tensor, tensor, tensor, tensor) ->\n tensor, tensor, tensor, tensor, tensor, tensor\n loop routine through entire training dataset to update weights\n fast_train(tensor, tensor, tensor, tensor, tensor, tensor) ->\n tensor, tensor, tensor, tensor, tensor, tensor\n update weights on entire training dataset\n fast_validate(tensor, tensor, tensor, tensor, tensor, tensor) ->\n tensor, tensor, tensor, tensor, tensor, tensor\n roop routine through entire validation dataset for diagnostics\n \"\"\"\n if reset:\n self.initialise_history()\n self.model.reset_states()\n if n_iterations is None:\n n_iterations = int(1e10)\n if checkpoint is not None:\n if not self.save:\n print(\"Need to save model for checkpointing to work.\\n\" +\n \"Run IMNN.save=True;\\n\" +\n \"IMNN.filename='save-directory-path';\\n\" +\n \"IMNN.model.save(IMNN.filename)\")\n to_checkpoint = True\n self.model.save_weights(self.filename + \"/model_weights.h5\")\n else:\n to_checkpoint = False\n if patience is not None:\n if not self.save:\n print(\"Need to save model for patience to work.\\n\" +\n \"Run IMNN.save=True;\\n\" +\n \"IMNN.filename='save-directory-path';\\n\" +\n \"IMNN.model.save(IMNN.filename)\")\n else:\n print(\"Using patience length of \" + str(patience) + \n \". Maximum number of training iterations is \" + \n str(n_iterations) + \".\")\n print(\"Saving current model in \" + self.filename)\n self.model.save(self.filename)\n self.model.save_weights(self.filename + \"/model_weights.h5\")\n patience_counter = 0\n this_iteration = 0\n calculate_patience = True\n if min_iterations is not None:\n min_reached = False\n else:\n min_reached = True\n if validate:\n patience_criterion = \"val_det_F\"\n else:\n patience_criterion = \"det_F\"\n if checkpoint is None:\n checkpoint = 1\n to_checkpoint = True\n else:\n calculate_patience = False\n \n if self.u.isnotebook():\n bar = tqdm.tnrange(n_iterations, desc=\"Iterations\")\n else:\n bar = tqdm.trange(n_iterations, desc=\"Iterations\")\n for iterations in bar:\n self.F, self.C, self.Cinv, self.dμ_dθ, self.reg, self.r = \\\n self.trainer(self.F, self.C, self.Cinv, self.dμ_dθ,\n self.reg, self.r)\n self.history[\"det_F\"].append(self.F.numpy())\n self.history[\"det_C\"].append(self.C.numpy())\n self.history[\"det_Cinv\"].append(self.Cinv.numpy())\n self.history[\"dμ_dθ\"].append(self.dμ_dθ.numpy())\n self.history[\"reg\"].append(self.reg.numpy())\n self.history[\"r\"].append(self.r.numpy())\n postfix_dictionary = {\n \"det_F\": self.history[\"det_F\"][-1],\n \"det_C\": self.history[\"det_C\"][-1],\n \"det_Cinv\": self.history[\"det_Cinv\"][-1],\n \"r\": self.history[\"r\"][-1]}\n if validate:\n self.F, self.C, self.Cinv, self.dμ_dθ, self.reg, self.r = \\\n self.fast_validate(self.F, self.C, self.Cinv, self.dμ_dθ,\n self.reg, self.r)\n self.history[\"val_det_F\"].append(self.F.numpy())\n self.history[\"val_det_C\"].append(self.C.numpy())\n self.history[\"val_det_Cinv\"].append(self.Cinv.numpy())\n self.history[\"val_dμ_dθ\"].append(self.dμ_dθ.numpy())\n self.history[\"val_reg\"].append(self.reg.numpy())\n self.history[\"val_r\"].append(self.r.numpy())\n postfix_dictionary[\"val_det_F\"] = self.history[\"val_det_F\"][-1]\n postfix_dictionary[\"val_det_C\"] = self.history[\"val_det_C\"][-1]\n postfix_dictionary[\"val_det_Cinv\"] = self.history[\"val_det_Cinv\"][-1]\n postfix_dictionary[\"val_r\"] = self.history[\"val_r\"][-1]\n if to_checkpoint:\n if calculate_patience:\n if min_reached:\n if (self.history[patience_criterion][-1] \n <= self.history[patience_criterion][-2]):\n if patience_counter > patience:\n print(\"Reached \" + str(patience) + \" steps without increasing \" \n + patience_criterion \n + \". Resetting weights to those from iteration \" \n + str(this_iteration) + \".\")\n self.model.load_weights(self.filename + \"/model_weights.h5\")\n break\n else:\n patience_counter += 1\n else:\n patience_counter = 0\n if iterations % checkpoint == 0:\n this_iteration = iterations\n self.model.save_weights(self.filename + \"/model_weights.h5\") \n else:\n if iterations > min_iterations:\n min_reached = True\n if iterations % checkpoint == 0:\n this_iteration = iterations\n self.model.save_weights(self.filename + \"/model_weights.h5\") \n postfix_dictionary[\"patience\"] = patience_counter\n else:\n if iterations % checkpoint == 0:\n this_iteration = iterations\n self.model.save_weights(self.filename + \"/model_weights.h5\") \n bar.set_postfix(postfix_dictionary)\n\n def automatic_train(self, x, dx_dθ, dx_dw, d2x_dwdθ, s=None, ds_dθ=None):\n \"\"\"Automatic calculation of gradients for updating weights\n\n The Fisher information is maximised by automatically calculating the\n derivative of the logarithm of the determinant of the Fisher matrix\n regularised by the Frobenius norm of the elementwise difference of the\n summary covariance and the inverse covariance of the summaries from the\n identity matrix. The regulariser is necessary to set the scale of the\n summaries, i.e. the set of summaries which are preferably orthogonal\n with covariance of I. The strength of the regularisation is smoothly\n reduced as the summaries approach a covariance of I so that preference\n is given to optimising the Fisher information matrix.\n\n We calculate the analytic gradients of the loss function and the\n regularisation with respect to the outputs of the network and then\n update the weights using the optimisation scheme provided where the\n error is calculated using the chain rule\n $\\\\frac{\\\\partial\\\\Lambda}{\\\\partial w_j^l} =\n \\\\frac{\\\\partial\\\\Lambda}{\\\\partial x_i}\n \\\\frac{\\\\partial x_i}{\\\\partial w_j^l}$\n If numerical derivatives are used then we also need to include the\n response of the summaries for the derivatives on the network.\n\n\n TODO\n ____\n - Finish writing this docstring\n \n Parameters\n __________\n\n Returns\n _______\n\n Calls\n _____\n \"\"\"\n with tf.GradientTape() as tape:\n if self.numerical:\n tape.watch([x, dx_dθ])\n else:\n tape.watch(x)\n F, C, Cinv, dμ_dθ, _, _, _, _ = self.get_stats(\n x, dx_dθ, self.numerical, self.use_external, s=s,\n ds_dθ=ds_dθ, δθ=self.δθ)\n reg, _ = self.get_regularisation(C, Cinv)\n r, _ = self.get_r(reg)\n Λ = tf.subtract(tf.multiply(r, reg), tf.linalg.slogdet(F))\n if self.numerical:\n dΛ_dx, d2Λ_dxdθ = tape.gradient(Λ, [x, dx_dθ])\n else:\n dΛ_dx = tape.gradient(Λ, x)\n gradients = []\n for layer in range(len(self.model.variables)):\n gradients.append(\n tf.divide(\n tf.einsum(\n \"ij,ij...->...\",\n dΛ_dx,\n dx_dw[layer]),\n tf.dtypes.cast(\n self.n_s,\n self.dtype)))\n if self.numerical:\n gradients[layer] = tf.add(\n gradients[layer],\n tf.divide(\n tf.einsum(\n \"ijkl,iklj...->...\",\n d2Λ_dxdθ,\n d2x_dwdθ[layer]),\n tf.dtypes.cast(\n self.n_d,\n self.dtype)))\n self.optimiser.apply_gradients(zip(gradients, self.model.variables))\n return F, C, Cinv, dμ_dθ, reg, r\n \n def unpack_data(self, data, use_external):\n \"\"\" Unpacks zipped data and returns in regular format\n\n For generality all data is drawn from zipped datasets. For readability\n the data is then unpacked here to be returned in the same format no\n matter what the zipped datasets are.\n\n Parameters\n __________\n data : tuple of TF tensor floats\n the zipped data to be unpacked\n use_external : bool\n whether external summaries are used\n\n Returns\n _______\n TF tensor float (n_s,) + input_shape\n Unpacked fiducial data simulations\n TF tensor float (n_s, n_params) + input_shape or\n (n_d, 2, n_params) + input_shape\n Unpacked data for derivative or numerical derivative\n TF tensor float (n_s, n_external) {None}\n Unpacked external fiducial summaries if used\n TF tensor float (n_s, n_params, n_external) {None} or\n (n_d, 2, n_params, n_external)\n Unpacked external summaries for derivative or numerical derivative\n \"\"\"\n if self.single_dataset:\n d = data[0]\n dd_dθ = data[1]\n if use_external:\n s = data[2]\n ds_dθ = data[3]\n else:\n s = None\n ds_dθ = None\n else:\n if use_external:\n d = data[0]\n s = data[1]\n dd_dθ = data[2]\n ds_dθ = data[3]\n else:\n d = data[0]\n dd_dθ = data[1]\n s = None\n ds_dθ = None\n return d, dd_dθ, s, ds_dθ\n\n \n @tf.function()\n def fast_train(self, F, C, Cinv, dμ_dθ, reg, r):\n \"\"\"Automatic calculation of gradients for updating weights\n\n The Fisher information is maximised by automatically calculating the\n derivative of the logarithm of the determinant of the Fisher matrix\n regularised by the Frobenius norm of the elementwise difference of the\n summary covariance and the inverse covariance of the summaries from the\n identity matrix. The regulariser is necessary to set the scale of the\n summaries, i.e. the set of summaries which are preferably orthogonal\n with covariance of I. The strength of the regularisation is smoothly\n reduced as the summaries approach a covariance of I so that preference\n is given to optimising the Fisher information matrix.\n\n The gradients are calculated end-to-end in the graph and so it\n necessitates relatively small inputs. If the inputs are too big for\n fast_train(), then loop_train() can be used instead\n\n TODO\n ____\n - Finish writing this docstring\n \n Parameters\n __________\n\n Returns\n _______\n\n Calls\n _____\n \"\"\"\n if self.single_dataset:\n loop = self.dataset\n else:\n loop = zip(self.dataset, self.derivative_dataset)\n for data in loop:\n d, dd_dθ, s, ds_dθ = self.unpack_data(data, self.use_external)\n with tf.GradientTape() as tape:\n F, C, Cinv, dμ_dθ, _, _ = self.get_fisher(\n d, dd_dθ, self.numerical, self.use_external, s=s,\n ds_dθ=ds_dθ, δθ=self.δθ)\n reg, _ = self.get_regularisation(C, Cinv)\n r, _ = self.get_r(reg)\n Λ = tf.subtract(tf.multiply(r, reg), tf.linalg.slogdet(F))\n gradients = tape.gradient(Λ, self.model.variables)\n self.optimiser.apply_gradients(zip(gradients, self.model.variables))\n return tf.linalg.det(F), tf.linalg.det(C), tf.linalg.det(Cinv), \\\n dμ_dθ, reg, r\n \n @tf.function\n def loop_train(self, F, C, Cinv, dμ_dθ, reg, r):\n \"\"\" Tensorflow dataset loop for training IMNN\n\n All data in data set in looped over, summarised (and the jacobian wrt\n the network parameters calculated) to calculate the Fisher information\n and the Jacobian of the regularised ln(det(F)) wrt to the network to\n optimise the weights to maximise the Fisher information.\n\n Note that we make use of a vectorised mapping on the whole batch to\n calculate the summaries and jacobian since we know that each input is\n independent. If we did not use this then the Jacobian calculation would\n massively dominate all the computation time.\n\n Parameters\n __________\n F : TF tensor float (1 ,)\n collector for determinant of Fisher matrix\n C : TF tensor float (1 ,)\n collector for determinant of covariance of summaries\n Cinv : TF tensor float (1 ,)\n collector for determinant of inverse covariance of summaries\n dμ_dθ : TF tensor float [n_params, n_summaries]\n collector for det of derivative of mean summaries wrt params\n reg : TF tensor float (1 ,)\n collector for value of regulariser\n r : TF tensor float (1 ,)\n collector for value of coupling strength of the regulariser\n\n Returns\n _______\n TF tensor float (1 ,)\n determinant of Fisher matrix\n TF tensor float (1 ,)\n determinant of covariance of summaries\n TF tensor float (1 ,)\n determinant of inverse covariance of summaries\n TF tensor float [n_params, n_summaries]\n determinant of derivative of mean summaries wrt params\n TF tensor float (1 ,)\n value of regulariser\n TF tensor float (1 ,)\n value of coupling strength of the regulariser\n\n Calls\n _____\n unpack_data(tuple of TF tensor float, bool)\n -> tensor, tensor, tensor {None}, tensor {None}\n unpacks zipped data and returns in regular format\n get_jacobian(TF tensor float, {bool})\n -> TF tensor float, {TF tensor float}, list of TF tensor\n summarises and calculates jacobian of outputs wrt network\n get_numerical_derivative_mean(TF tensor, TF tensor, bool, {TF tensor})\n -> TF tensor float\n calculates the numerical mean derivative of summaries wrt params\n get_summary_derivative_mean(TF tensor float, TF tensor float)\n -> TF tensor float\n append external summary derivatives to the IMNN summary derivatives\n get_covariance(TF tensor float) -> TF tensor, TF tensor, TF tensor\n calculates covariance, mean and difference of mean from zero for x\n get_score(TF tensor float, TF tensor float) -> TF tensor float\n calculates the product of the inverse covariance and dμ_dθ\n get_fisher_matrix(TF tensor float, TF tensor float) -> TF tensor float\n calculates the symmetric Fisher information matrix\n train(tensor, tensor, tensor, tensor, tensor,\n list, tensor, list, tensor, tensor) -> tensor, tensor\n caculates and applies the maximisation of the Fisher information\n \"\"\"\n if self.single_dataset:\n loop = self.dataset\n else:\n loop = zip(self.dataset, self.derivative_dataset)\n for data in loop:\n d, dd_dθ, s, ds_dθ = self.unpack_data(data, self.use_external)\n x, dx_dw = tf.vectorized_map(\n lambda i: self.get_jacobian(\n tf.expand_dims(i, 0)),\n d)\n if self.numerical:\n dx_dθ, d2x_dwdθ = tf.vectorized_map(\n lambda i: self.get_jacobian(\n tf.expand_dims(i, 0)),\n tf.reshape(dd_dθ, (self.n_d * 2 * self.n_params,) + self.model.input_shape[1:]))\n dx_dθ = tf.reshape(dx_dθ, (self.n_d, 2, self.n_params, self.n_summaries))\n d2x_dwdθ = [\n tf.reshape(\n d2x_dwdθ[i], \n (self.n_d, 2, self.n_params, self.n_summaries) + self.model.variables[i].shape)\n for i in range(len(self.model.variables))]\n else:\n x, dx_dd, dx_dw = tf.vectorized_map(\n lambda i: self.get_jacobian(\n tf.expand_dims(i, 0),\n derivative=True),\n d)\n dx_dθ = None\n d2x_dwdθ = None\n F, C, Cinv, dμ_dθ, reg, r = self.automatic_train(\n x, dx_dθ, dx_dw, d2x_dwdθ, s=s, ds_dθ=ds_dθ)\n return tf.linalg.det(F), tf.linalg.det(C), tf.linalg.det(Cinv), \\\n dμ_dθ, reg, r\n\n @tf.function\n def fast_validate(self, F, C, Cinv, dμ_dθ, reg, r):\n \"\"\" Tensorflow dataset loop for validating IMNN\n\n All data in validation data set in looped over and summarised to\n calculate the Fisher information and regularisation terms.\n\n Parameters\n __________\n F : TF tensor float (1 ,)\n collector for determinant of Fisher matrix\n C : TF tensor float (1 ,)\n collector for determinant of covariance of summaries\n Cinv : TF tensor float (1 ,)\n collector for determinant of inverse covariance of summaries\n dμ_dθ : TF tensor float [n_params, n_summaries]\n collector for det of derivative of mean summaries wrt params\n reg : TF tensor float (1 ,)\n collector for value of regulariser\n r : TF tensor float (1 ,)\n collector for value of coupling strength of the regulariser\n\n Returns\n _______\n TF tensor float (1 ,)\n determinant of Fisher matrix\n TF tensor float (1 ,)\n determinant of covariance of summaries\n TF tensor float (1 ,)\n determinant of inverse covariance of summaries\n TF tensor float [n_params, n_summaries]\n determinant of derivative of mean summaries wrt params\n TF tensor float (1 ,)\n value of regulariser\n TF tensor float (1 ,)\n value of coupling strength of the regulariser\n\n Calls\n _____\n unpack_data(tuple of TF tensor float, bool)\n -> tensor, tensor, tensor {None}, tensor {None}\n unpacks zipped data and returns in regular format\n get_fisher(tensor, tensor, bool, bool, {tensor}, {tensor}, {tensor})\n -> tensor, tensor, tensor, tensor, tensor, tensor\n generic calculation of all necessary components for Fisher matrix\n get_regularisation(tensor, tensor) -> tensor, tensor\n calculates the frobenius norm of |C-I|+|C^{-1}-I|\n get_r(tensor) -> tensor, tensor\n calculates the strength of the coupling of the regulariser\n \"\"\"\n if self.single_dataset:\n loop = self.test_dataset\n else:\n loop = zip(self.test_dataset, self.test_derivative_dataset)\n for data in loop:\n d, dd_dθ, s, ds_dθ = self.unpack_data(data, self.test_use_external)\n F, C, Cinv, dμ_dθ, _, _ = self.get_fisher(\n d, dd_dθ, self.test_numerical, self.test_use_external, s=s,\n ds_dθ=ds_dθ, δθ=self.test_δθ)\n reg, _ = self.get_regularisation(C, Cinv)\n r, _ = self.get_r(reg)\n return tf.linalg.det(F), tf.linalg.det(C), tf.linalg.det(Cinv), \\\n dμ_dθ, reg, r\n\n def get_fisher(self, d, dd_dθ, numerical, use_external, s=None, ds_dθ=None,\n δθ=None):\n \"\"\"Generic calculation of all necessary components for Fisher matrix\n\n Without calculating any gradients, the Fisher information can be\n calculated by passing the data and derivatives of the data through the\n neural network and then computing the covariance of the summaries and\n the numerical derivative of the summaries wrt the model parameters or\n using the chain rule with the Jacobian of the summaries wrt the data.\n\n Parameters\n __________\n d : TF tensor float (n_s,) + input_shape\n fiducial simulations from the dataset\n dd_dθ : TF tensor float (n_s, n_params) + input_shape or\n (n_d, 2, n_params) + input_shape\n derivative of sims wrt params or sims for numerical derivative\n numerical : bool\n whether numerical derivative is used\n use_external : bool\n whether external summaries are used\n s : TF tensor float (n_s, n_external) {None}\n informative summaries at fiducial value\n ds_dθ : TF tensor float (n_s, n_params, n_external) {None} on\n (n_s, 2, n_params, n_external) {None}\n informative summaries for derivative (numerical) wrt parameters\n δθ : TF tensor float {None} (n_params,)\n parameter differences for numerical derivatives\n\n Returns\n _______\n TF tensor float (n_params, n_params)\n Fisher information matrix\n TF tensor float (n_summaries, n_summaries) or\n (n_summaries + n_external, n_summaries + n_external)\n covariance of the summaries\n TF tensor float (n_summaries, n_summaries) or\n (n_summaries + n_external, n_summaries + n_external)\n inverse covariance of the summaries\n TF tensor float (n_params, n_summaries) or\n (n_params, n_summaries + n_external)\n derivative of the mean of the summaries with respect to the params\n TF tensor float (1, n_summaries,) or (1, n_summaries + n_external,)\n mean of the summaries\n TF tensor float (n_params, n_summaries) or\n (n_params, n_summaries + n_external)\n product of inverse covariance and dμ_dθ\n\n Calls\n _____\n get_numerical_derivative_mean(TF tensor, TF tensor, bool, {TF tensor})\n -> TF tensor float\n calculates the numerical mean derivative of summaries wrt params\n get_summary_derivative_mean(TF tensor float, TF tensor float)\n -> TF tensor float\n append external summary derivatives to the IMNN summary derivatives\n get_covariance(TF tensor float) -> TF tensor, TF tensor, TF tensor\n calculates covariance, mean and difference of mean from summaries\n get_score(TF tensor float, TF tensor float) -> TF tensor float\n calculates the product of the inverse covariance and dμ_dθ\n get_fisher_matrix(TF tensor float, TF tensor float) -> TF tensor float\n calculates the symmetric Fisher information matrix\n \"\"\"\n if numerical:\n x = self.model(d)\n dx_dθ = tf.reshape(\n self.model(\n tf.reshape(\n dd_dθ, \n (self.n_d * 2 * self.n_params,) + self.model.input_shape[1:])), \n (self.n_d, 2, self.n_params, self.n_summaries))\n else:\n with tf.TapeGradient() as tape:\n tape.watch(d)\n x = self.model(d)\n dx_dd = tape.batch_jacobian(x, d)\n F, C, Cinv, dμ_dθ, μ, score, _, _ = self.get_stats(\n x, dx_dθ, numerical, use_external, s=s, ds_dθ=ds_dθ, δθ=δθ)\n return F, C, Cinv, dμ_dθ, μ, score\n\n def get_stats(self, x, dx_dθ, numerical, external, s=None, ds_dθ=None,\n δθ=None):\n \"\"\"\n \"\"\"\n if numerical:\n dμ_dθ = self.get_numerical_derivative_mean(dx_dθ, self.δθ,\n external,\n ds_dθ=ds_dθ)\n else:\n dμ_dθ = tf.divide(\n tf.einsum(\"ij...,ik...->kj\", dx_dd, dd_dθ),\n self.n_st)\n if external:\n dμ_dθ = self.get_summary_derivative_mean(dμ_dθ, ds_dθ)\n C, μ, Δμ = self.get_covariance(x)\n Cinv = tf.linalg.inv(C)\n if external:\n C_n = C\n Cinv_n = Cinv\n C, _, _ = self.get_covariance(tf.concat([s, x], axis=1))\n Cinv = tf.linalg.inv(C)\n else:\n C_n = None\n Cinv_n = None\n score = self.get_score(Cinv, dμ_dθ)\n F = self.get_fisher_matrix(Cinv, dμ_dθ, score)\n return F, C, Cinv, dμ_dθ, μ, score, C_n, Cinv_n\n\n def get_jacobian(self, d, derivative=False):\n \"\"\" Calculates the summaries and Jacobian wrt to network parameters\n\n As part of a vectorised batch calculation the jacobian is calculated\n for the summaries wrt the network parameters per summary. This is much\n cheaper than calculating the whole Jacobian which is not needed. We can\n also calculate the Jacobian of the summaries with respect to the input\n if we are using the chain rule to calculate the derivative of the mean\n summaries with respect to the parameters.\n\n Parameters\n __________\n d : TF tensor float (1,) + input_shape\n fiducial sims or sims for numerical derivative from the dataset\n derivative : bool\n whether to calculate the Jacobian of the summaries wrt the data\n\n Returns\n _______\n TF tensor float (n_summaries,)\n a single element summary of the input data slice\n TF tensor float (n_summaries,) + input_shape {None}\n Jacobian of a summary wrt an input data slice\n list of TF tensor float\n Jacobian of a summary wrt the network parameters\n \"\"\"\n with tf.GradientTape(persistent=True) as tape:\n if derivative:\n tape.watch(d)\n x = self.model(d)\n dx_dw = tape.jacobian(x, self.model.variables)\n if derivative:\n dx_dd = tape.jacobian(x, d)\n return tf.squeeze(x, 0), \\\n tf.squeeze(dx_dd, (0, 2)), \\\n [tf.squeeze(i, 0) for i in dx_dw]\n else:\n return tf.squeeze(x, 0), [tf.squeeze(i, 0) for i in dx_dw]\n\n def get_covariance(self, x):\n \"\"\"Calculates covariance, mean and difference of mean from summaries\n\n Calculates the mean of the summaries and then finds the difference\n between the mean and each summary. This can then be used to calculate\n the covariance of the summaries.\n\n Parameters\n __________\n x : TF tensor float (n_s, n_summaries) or\n (n_s, n_summaries + n_external)\n summaries of the input data (network and informative summaries)\n\n Returns\n _______\n TF tensor float (n_summaries, n_summaries) or\n (n_summaries + n_external, n_summaries + n_external)\n covariance of the summaries\n TF tensor float (1, n_summaries,) or (1, n_summaries + n_external,)\n mean of the summaries\n TF tensor float (n_s, n_summaries,) or (n_s, n_summaries + n_external,)\n difference between mean of the summaries and the summaries\n \"\"\"\n μ = tf.reduce_mean(\n x,\n axis=0,\n keepdims=True,\n name=\"mean\")\n Δμ = tf.subtract(\n x,\n μ,\n name=\"centred_mean\")\n C = tf.divide(\n tf.einsum(\n \"ij,ik->jk\",\n Δμ,\n Δμ,\n name=\"unnormalised_covariance\"),\n self.n_sm1,\n name=\"covariance\")\n return C, μ, Δμ\n\n def get_numerical_derivative_mean(self, dx_dθ, δθ, use_external,\n ds_dθ=None):\n \"\"\" Calculate the mean of the derivative of the summaries\n\n Numerically, we take away the simulations generated above the fiducial\n parameter values from the simulations generated below the fiducial\n parameter values and divide them by 2 times the difference between the\n upper and lower parameter values. If external informative summaries are\n provided then these are concatenated to the network summaries.\n\n Parameters\n _________\n dx_dθ : TF tensor float (n_d, 2, n_params, n_summaries)\n upper and lower parameter value summaries for numerical derivative\n δθ : TF tensor float (n_params,)\n parameter differences for numerical derivatives\n use_external : bool\n whether external summaries are used\n ds_dθ : TF tensor float (n_d, 2, n_params, n_external)\n upper and lower parameter value external summaries for derivative\n\n Returns\n _______\n TF tensor float (n_params, n_summaries) or\n (n_params, n_summaries + n_external)\n derivative of the mean of the summaries with respect to the params\n \"\"\"\n if use_external:\n lower = tf.concat([ds_dθ[:, 0, :, :], dx_dθ[:, 0, :, :]], axis=2)\n upper = tf.concat([ds_dθ[:, 1, :, :], dx_dθ[:, 1, :, :]], axis=2)\n else:\n lower = dx_dθ[:, 0, :, :]\n upper = dx_dθ[:, 1, :, :]\n return tf.reduce_mean(\n tf.multiply(\n tf.subtract(\n upper,\n lower),\n tf.expand_dims(tf.expand_dims(δθ, 0), 2)),\n axis=0,\n name=\"numerical_derivative_mean_wrt_parameters\")\n\n def get_summary_derivative_mean(self, dμ_dθ_network, ds_dθ):\n \"\"\" Concatenate external derivative mean to network summary mean\n\n When using external summaries we concatenate the mean of the derivative\n of external summaries with respect to the parameters to the precomputed\n mean of the network summaries with respect to the parameters.\n\n Parameters\n __________\n dμ_dθ_network : TF tensor float (n_params, n_summaries)\n derivative of the mean network summaries wrt the parameters\n ds_dθ : TF tensor float (n_s, n_params, n_external)\n derivative of external summaries wrt the parameters\n\n Returns\n _______\n TF tensor float (n_params, n_summaries + n_external)\n derivative of all summaries wrt the parameters\n \"\"\"\n dμ_dθ_external = tf.reduce_mean(ds_dθ, 0)\n return tf.concat([dμ_dθ_external, dμ_dθ_network], axis=2)\n\n def get_score(self, Cinv, dμ_dθ):\n \"\"\"Product of inverse covariance and derivative mean wrt parameters\n\n This tensor is used multiple times so we calculate it and store it to\n be reused\n\n Parameters\n __________\n Cinv : TF tensor float (n_summaries, n_summaries) or\n (n_summaries + n_external, n_summaries + n_external)\n inverse covariance of the summaries\n dμ_dθ : TF tensor float (n_params, n_summaries) or\n (n_params, n_summaries + n_external)\n derivative mean summaries wrt parameters\n\n Returns\n _______\n TF tensor float (n_params, n_summaries) or\n (n_params, n_summaries + n_external)\n product of inverse covariance and derivative of mean wrt params\n \"\"\"\n return tf.einsum(\n \"ij,kj->ki\",\n Cinv,\n dμ_dθ,\n name=\"score\")\n\n def get_fisher_matrix(self, Cinv, dμ_dθ, score):\n \"\"\"Calculate Fisher information matrix\n\n We calculate the Fisher information matrix. Since we want this to be\n exactly symmetrical to make the inversion quick then we take the lower\n half of the matrix and add it to its transpose.\n\n Parameters\n __________\n Cinv : TF tensor float (n_summaries, n_summaries) or\n (n_summaries + n_external, n_summaries + n_external)\n inverse covariance of the summaries\n dμ_dθ : TF tensor float (n_params, n_summaries) or\n (n_params, n_summaries + n_external)\n derivative mean summaries wrt parameters\n score : TF tensor float (n_params, n_summaries) or\n (n_params, n_summaries + n_external)\n product of inverse covariance and derivative of mean wrt params\n\n Returns\n _______\n TF tensor float (n_params, n_params)\n Fisher information matrix\n \"\"\"\n F = tf.linalg.band_part(\n tf.einsum(\n \"ij,kj->ik\",\n dμ_dθ,\n score,\n name=\"half_fisher\"),\n 0,\n -1,\n name=\"triangle_fisher\")\n return tf.multiply(\n 0.5,\n tf.add(\n F,\n tf.transpose(\n F,\n perm=[1, 0],\n name=\"transposed_fisher\"),\n name=\"double_fisher\"),\n name=\"fisher\")\n\n def get_regularisation(self, C, Cinv):\n CmI = tf.subtract(C, self.identity)\n CinvmI = tf.subtract(Cinv, self.identity)\n regulariser = tf.multiply(\n 0.5,\n tf.add(\n tf.square(\n tf.norm(CmI,\n ord=\"fro\",\n axis=(0, 1))),\n tf.square(\n tf.norm(CinvmI,\n ord=\"fro\",\n axis=(0, 1)))),\n name=\"regulariser\")\n return regulariser, CmI\n\n def get_r(self, regulariser):\n rate = tf.multiply(-self.α, regulariser)\n e_rate = tf.exp(rate)\n r = tf.divide(\n tf.multiply(\n self.λ,\n regulariser),\n tf.add(\n regulariser,\n e_rate))\n return r, e_rate\n\n def set_regularisation_strength(self, ϵ, λ):\n self.λ = tf.Variable(λ, dtype=self.dtype, trainable=False,\n name=\"strength\")\n self.α = -tf.divide(\n tf.math.log(\n tf.add(tf.multiply(tf.subtract(λ, 1.), ϵ),\n tf.divide(tf.square(ϵ), tf.add(1., ϵ)))),\n ϵ)\n\n def setup_MLE(self, dataset=True, θ_fid=None, d=None, dd_dθ=None, δθ=None,\n s=None, ds_dθ=None):\n if dataset:\n self.MLE_θ_fid = self.test_θ_fid\n if self.single_dataset:\n loop = self.dataset\n else:\n loop = zip(self.dataset, self.derivative_dataset)\n for data in loop:\n d, dd_dθ, s, ds_dθ = self.unpack_data(data,\n self.test_use_external)\n self.MLE_F, _, self.MLE_Cinv, self.MLE_dμ_dθ, self.MLE_μ, _ = \\\n self.get_fisher(\n d, dd_dθ, self.test_numerical, self.test_use_external,\n s=s, ds_dθ=ds_dθ, δθ=self.test_δθ)\n else:\n if δθ is not None:\n numerical = True\n δθ = tf.Variable(1. / (2. * δθ), dtype=self.dtype)\n else:\n numerical = False\n if s is not None:\n use_external = True\n else:\n use_external = False\n self.MLE_θ_fid = θ_fid\n self.MLE_F, _, self.MLE_Cinv, self.MLE_dμ_dθ, self.MLE_μ, _ = \\\n self.get_fisher(\n d, dd_dθ, numerical, use_external, s=s, ds_dθ=ds_dθ, δθ=δθ)\n self.MLE_Finv = tf.linalg.inv(self.MLE_F)\n\n def get_MLE(self, d):\n x = self.model(d)\n return tf.add(\n self.MLE_θ_fid,\n tf.einsum(\n \"ij,kj->ki\",\n self.MLE_Finv,\n tf.einsum(\n \"ij,kj->ki\",\n self.MLE_dμ_dθ,\n tf.einsum(\n \"ij,kj->ki\",\n self.MLE_Cinv,\n tf.subtract(x, self.MLE_μ)))))\n" ]
[ [ "tensorflow.exp", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.function", "tensorflow.reshape", "tensorflow.linalg.slogdet", "tensorflow.cast", "tensorflow.einsum", "tensorflow.concat", "tensorflow.GradientTape", "tensorflow.TapeGradient", "tensorflow.subtract", "tensorflow.linalg.inv", "tensorflow.Variable", "tensorflow.transpose", "tensorflow.norm", "tensorflow.squeeze", "tensorflow.add", "tensorflow.zeros", "tensorflow.eye", "tensorflow.expand_dims", "tensorflow.keras.models.load_model", "tensorflow.dtypes.cast", "tensorflow.multiply", "tensorflow.linalg.det", "tensorflow.reduce_mean", "tensorflow.square" ] ]
tehstu/adventofcode
[ "afd2dd4d229606411d57821431e21ab9e30a0ec4" ]
[ "2021/day1.py" ]
[ "# Advent of Code 2021\n# Day 1\n\nimport pandas as pd\n\ndf = pd.read_csv(\"day1_input.txt\", header=None)\n\nincrease = 0\n\nfor i in range(len(df.index) - 1):\n current = df[0][i]\n next = df[0][i+1]\n if next > current:\n increase += 1\n\nprint(\"Number of increases = \", increase)" ]
[ [ "pandas.read_csv" ] ]
jonjon33/sandbox
[ "33aec6dc0bada8d9fe26a6df73d45eaf34e509c6" ]
[ "python/dsbook/1.17-exercises/sudoku.py" ]
[ "'''sudokusolver'''\nfrom __future__ import print_function # want that end=''\nimport numpy as np # using ndarray instead of a traditional array/list\n\nclass Sudoku(object):\n '''sudoku game state and solver class'''\n def __init__(self):\n '''initialize the board and vars'''\n opt = input('choose game method:\\n'+ \\\n ' 1) built-in puzzle\\n'+ \\\n ' 2) rng puzzle\\n'+ \\\n ' 3) user puzzle\\n')\n # switch game modes\n if opt == 1:\n puzzle = input('select puzzle (1-2):')\n # switch puzzles\n if puzzle == 1:\n self.state = np.array([[0,3,0,2,9,7,0,0,4], \\\n [5,0,0,0,3,0,6,2,7], \\\n [0,0,0,0,6,0,0,9,0], \\\n [0,0,8,0,5,0,0,7,1], \\\n [7,9,0,0,0,0,0,4,5], \\\n [4,5,0,0,2,0,8,0,0], \\\n [0,7,0,0,8,0,0,0,0], \\\n [9,1,5,0,7,0,0,0,8], \\\n [2,0,0,5,4,9,0,1,0]])\n elif puzzle == 2:\n self.state = np.array([[0,3,7,0,2,0,0,0,0], \\\n [0,0,0,6,9,0,0,0,3], \\\n [0,0,0,0,0,0,0,9,8], \\\n [0,2,8,0,0,0,0,0,0], \\\n [4,0,0,1,0,3,0,0,7], \\\n [0,0,0,0,0,0,4,8,0], \\\n [7,5,0,0,0,0,0,0,0], \\\n [1,0,0,0,4,9,0,0,0], \\\n [0,0,0,0,6,0,7,4,0]])\n else:\n print('no puzzle at given index!')\n exit()\n else:\n print(\"this option is TODO!\")\n exit()\n\n self.potentials = {}\n self.stepcount = 0\n self.maxdepth = 0\n\n def solve(self):\n '''solver loop that checks, steps, and prints'''\n self.checkboard()\n while not self.solvestep():\n self.show()\n self.checkboard()\n if len(self.potentials) == 0:\n print('solved in '+str(self.stepcount)+' steps!')\n print('max assumption depth was '+str(self.maxdepth)+' levels')\n return\n print(self.potentials) # get info if solvestep() fails\n\n def checkboard(self):\n '''iterate through board writing potentials dict'''\n for i,row in enumerate(self.state):\n for j,item in enumerate(row):\n if self.state[i,j] == 0:\n self.potentials[(i,j)] = self.checkcell((i,j))\n\n def solvestep(self):\n '''single step of solve procedure, non-zero return of errors'''\n hit = False\n self.stepcount = self.stepcount + 1\n for coord in self.potentials.keys():\n if len(self.potentials[coord]) == 1:\n hit = True\n (x,y) = coord\n self.state[x,y] = self.potentials[coord].pop()\n del self.potentials[coord]\n if hit == False:\n print(\"Warning: no 1-len cells\")\n return 1\n return 0\n\n def checkcell(self,coord):\n '''return list of potential legal cell contents'''\n (x,y) = coord\n potlist = range(1,10)\n\n # check the row\n for val in self.state[x]:\n if val in potlist:\n potlist.remove(val)\n\n # check the column\n for row in self.state:\n val = row[y]\n if val in potlist:\n potlist.remove(val)\n\n # check the box\n for i in range(3):\n currow = x + i - (x % 3)\n for j in range(3):\n curcol = y + j - (y % 3)\n val = self.state[currow,curcol]\n if val in potlist:\n potlist.remove(val)\n\n return potlist\n\n\n def show(self):\n '''print the board in a human-readable manner'''\n print()\n for i,row in enumerate(self.state):\n if i % 3 == 0 and i != 0:\n print('---------------------')\n for j in range(9):\n if j % 3 == 0 and j != 0:\n print('| ',end='')\n print(convchar(row[j])+' ',end='')\n print()\n print()\n\ndef convchar(num):\n '''helper to make state values easier to read on board'''\n if num == 0:\n return '.'\n else:\n return str(num)\n\n\ndef main():\n '''main area'''\n game = Sudoku()\n game.show()\n game.solve()\n\nmain()\n" ]
[ [ "numpy.array" ] ]
vuiseng9/BRECQ
[ "e455d62e93c70351961f8991c913b59435bd165f" ]
[ "quant/quant_block.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom quant.quant_layer import QuantModule, UniformAffineQuantizer, StraightThrough\nfrom models.resnet import BasicBlock, Bottleneck\nfrom models.regnet import ResBottleneckBlock\nfrom models.mobilenetv2 import InvertedResidual\n\n\nclass BaseQuantBlock(nn.Module):\n \"\"\"\n Base implementation of block structures for all networks.\n Due to the branch architecture, we have to perform activation function\n and quantization after the elemental-wise add operation, therefore, we\n put this part in this class.\n \"\"\"\n def __init__(self, act_quant_params: dict = {}):\n super().__init__()\n self.use_weight_quant = False\n self.use_act_quant = False\n # initialize quantizer\n\n self.act_quantizer = UniformAffineQuantizer(**act_quant_params)\n self.activation_function = StraightThrough()\n\n self.ignore_reconstruction = False\n\n def set_quant_state(self, weight_quant: bool = False, act_quant: bool = False):\n # setting weight quantization here does not affect actual forward pass\n self.use_weight_quant = weight_quant\n self.use_act_quant = act_quant\n for m in self.modules():\n if isinstance(m, QuantModule):\n m.set_quant_state(weight_quant, act_quant)\n\n\nclass QuantBasicBlock(BaseQuantBlock):\n \"\"\"\n Implementation of Quantized BasicBlock used in ResNet-18 and ResNet-34.\n \"\"\"\n def __init__(self, basic_block: BasicBlock, weight_quant_params: dict = {}, act_quant_params: dict = {}):\n super().__init__(act_quant_params)\n self.conv1 = QuantModule(basic_block.conv1, weight_quant_params, act_quant_params)\n self.conv1.activation_function = basic_block.relu1\n self.conv2 = QuantModule(basic_block.conv2, weight_quant_params, act_quant_params, disable_act_quant=True)\n\n # modify the activation function to ReLU\n self.activation_function = basic_block.relu2\n\n if basic_block.downsample is None:\n self.downsample = None\n else:\n self.downsample = QuantModule(basic_block.downsample[0], weight_quant_params, act_quant_params,\n disable_act_quant=True)\n # copying all attributes in original block\n self.stride = basic_block.stride\n\n def forward(self, x):\n residual = x if self.downsample is None else self.downsample(x)\n out = self.conv1(x)\n out = self.conv2(out)\n out += residual\n out = self.activation_function(out)\n if self.use_act_quant:\n out = self.act_quantizer(out)\n return out\n\n\nclass QuantBottleneck(BaseQuantBlock):\n \"\"\"\n Implementation of Quantized Bottleneck Block used in ResNet-50, -101 and -152.\n \"\"\"\n\n def __init__(self, bottleneck: Bottleneck, weight_quant_params: dict = {}, act_quant_params: dict = {}):\n super().__init__(act_quant_params)\n self.conv1 = QuantModule(bottleneck.conv1, weight_quant_params, act_quant_params)\n self.conv1.activation_function = bottleneck.relu1\n self.conv2 = QuantModule(bottleneck.conv2, weight_quant_params, act_quant_params)\n self.conv2.activation_function = bottleneck.relu2\n self.conv3 = QuantModule(bottleneck.conv3, weight_quant_params, act_quant_params, disable_act_quant=True)\n\n # modify the activation function to ReLU\n self.activation_function = bottleneck.relu3\n\n if bottleneck.downsample is None:\n self.downsample = None\n else:\n self.downsample = QuantModule(bottleneck.downsample[0], weight_quant_params, act_quant_params,\n disable_act_quant=True)\n # copying all attributes in original block\n self.stride = bottleneck.stride\n\n def forward(self, x):\n residual = x if self.downsample is None else self.downsample(x)\n out = self.conv1(x)\n out = self.conv2(out)\n out = self.conv3(out)\n out += residual\n out = self.activation_function(out)\n if self.use_act_quant:\n out = self.act_quantizer(out)\n return out\n\n\nclass QuantResBottleneckBlock(BaseQuantBlock):\n \"\"\"\n Implementation of Quantized Bottleneck Blockused in RegNetX (no SE module).\n \"\"\"\n\n def __init__(self, bottleneck: ResBottleneckBlock, weight_quant_params: dict = {}, act_quant_params: dict = {}):\n super().__init__(act_quant_params)\n self.conv1 = QuantModule(bottleneck.f.a, weight_quant_params, act_quant_params)\n self.conv1.activation_function = bottleneck.f.a_relu\n self.conv2 = QuantModule(bottleneck.f.b, weight_quant_params, act_quant_params)\n self.conv2.activation_function = bottleneck.f.b_relu\n self.conv3 = QuantModule(bottleneck.f.c, weight_quant_params, act_quant_params, disable_act_quant=True)\n\n # modify the activation function to ReLU\n self.activation_function = bottleneck.relu\n\n if bottleneck.proj_block:\n self.downsample = QuantModule(bottleneck.proj, weight_quant_params, act_quant_params,\n disable_act_quant=True)\n else:\n self.downsample = None\n # copying all attributes in original block\n self.proj_block = bottleneck.proj_block\n\n def forward(self, x):\n residual = x if not self.proj_block else self.downsample(x)\n out = self.conv1(x)\n out = self.conv2(out)\n out = self.conv3(out)\n out += residual\n out = self.activation_function(out)\n if self.use_act_quant:\n out = self.act_quantizer(out)\n return out\n\n\nclass QuantInvertedResidual(BaseQuantBlock):\n \"\"\"\n Implementation of Quantized Inverted Residual Block used in MobileNetV2.\n Inverted Residual does not have activation function.\n \"\"\"\n\n def __init__(self, inv_res: InvertedResidual, weight_quant_params: dict = {}, act_quant_params: dict = {}):\n super().__init__(act_quant_params)\n\n self.use_res_connect = inv_res.use_res_connect\n self.expand_ratio = inv_res.expand_ratio\n if self.expand_ratio == 1:\n self.conv = nn.Sequential(\n QuantModule(inv_res.conv[0], weight_quant_params, act_quant_params),\n QuantModule(inv_res.conv[3], weight_quant_params, act_quant_params, disable_act_quant=True),\n )\n self.conv[0].activation_function = nn.ReLU6()\n else:\n self.conv = nn.Sequential(\n QuantModule(inv_res.conv[0], weight_quant_params, act_quant_params),\n QuantModule(inv_res.conv[3], weight_quant_params, act_quant_params),\n QuantModule(inv_res.conv[6], weight_quant_params, act_quant_params, disable_act_quant=True),\n )\n self.conv[0].activation_function = nn.ReLU6()\n self.conv[1].activation_function = nn.ReLU6()\n\n def forward(self, x):\n if self.use_res_connect:\n out = x + self.conv(x)\n else:\n out = self.conv(x)\n out = self.activation_function(out)\n if self.use_act_quant:\n out = self.act_quantizer(out)\n return out\n\n\nspecials = {\n BasicBlock: QuantBasicBlock,\n Bottleneck: QuantBottleneck,\n ResBottleneckBlock: QuantResBottleneckBlock,\n InvertedResidual: QuantInvertedResidual,\n}\n" ]
[ [ "torch.nn.ReLU6" ] ]
xianyuMeng/nvdiffrast
[ "1004b70efbd4b852a2f61117c432b3ef3eadcb93" ]
[ "samples/torch/cube.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\nimport argparse\nimport os\nimport pathlib\nimport sys\nimport numpy as np\nimport torch\nimport imageio\n\nimport util\n\nimport nvdiffrast.torch as dr\n\n# Transform vertex positions to clip space\ndef transform_pos(mtx, pos):\n t_mtx = torch.from_numpy(mtx).cuda() if isinstance(mtx, np.ndarray) else mtx\n # (x,y,z) -> (x,y,z,1)\n posw = torch.cat([pos, torch.ones([pos.shape[0], 1]).cuda()], axis=1)\n return torch.matmul(posw, t_mtx.t())[None, ...]\n\ndef render(glctx, mtx, pos, pos_idx, vtx_col, col_idx, resolution: int):\n pos_clip = transform_pos(mtx, pos)\n rast_out, _ = dr.rasterize(glctx, pos_clip, pos_idx, resolution=[resolution, resolution])\n color, _ = dr.interpolate(vtx_col[None, ...], rast_out, col_idx)\n color = dr.antialias(color, rast_out, pos_clip, pos_idx)\n return color\n\ndef make_grid(arr, ncols=2):\n n, height, width, nc = arr.shape\n nrows = n//ncols\n assert n == nrows*ncols\n return arr.reshape(nrows, ncols, height, width, nc).swapaxes(1,2).reshape(height*nrows, width*ncols, nc)\n\ndef fit_cube(max_iter = 5000,\n resolution = 4,\n discontinuous = False,\n repeats = 1,\n log_interval = 10,\n display_interval = None,\n display_res = 512,\n out_dir = None,\n log_fn = None,\n mp4save_interval = None,\n mp4save_fn = None):\n\n log_file = None\n writer = None\n if out_dir:\n os.makedirs(out_dir, exist_ok=True)\n if log_fn:\n log_file = open(f'{out_dir}/{log_fn}', 'wt')\n if mp4save_interval != 0:\n writer = imageio.get_writer(f'{out_dir}/{mp4save_fn}', mode='I', fps=30, codec='libx264', bitrate='16M')\n else:\n mp4save_interval = None\n\n datadir = f'{pathlib.Path(__file__).absolute().parents[1]}/data'\n fn = 'cube_%s.npz' % ('d' if discontinuous else 'c')\n with np.load(f'{datadir}/{fn}') as f:\n pos_idx, vtxp, col_idx, vtxc = f.values()\n print(\"Mesh has %d triangles and %d vertices.\" % (pos_idx.shape[0], vtxp.shape[0]))\n\n # Create position/triangle index tensors\n pos_idx = torch.from_numpy(pos_idx.astype(np.int32)).cuda()\n col_idx = torch.from_numpy(col_idx.astype(np.int32)).cuda()\n vtx_pos = torch.from_numpy(vtxp.astype(np.float32)).cuda()\n vtx_col = torch.from_numpy(vtxc.astype(np.float32)).cuda()\n\n glctx = dr.RasterizeGLContext()\n\n # Repeats.\n for rep in range(repeats):\n\n ang = 0.0\n gl_avg = []\n\n vtx_pos_rand = np.random.uniform(-0.5, 0.5, size=vtxp.shape) + vtxp\n vtx_col_rand = np.random.uniform(0.0, 1.0, size=vtxc.shape)\n vtx_pos_opt = torch.tensor(vtx_pos_rand, dtype=torch.float32, device='cuda', requires_grad=True)\n vtx_col_opt = torch.tensor(vtx_col_rand, dtype=torch.float32, device='cuda', requires_grad=True)\n\n # Adam optimizer for vertex position and color with a learning rate ramp.\n optimizer = torch.optim.Adam([vtx_pos_opt, vtx_col_opt], lr=1e-2)\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: max(0.01, 10**(-x*0.0005)))\n\n for it in range(max_iter + 1):\n # Random rotation/translation matrix for optimization.\n r_rot = util.random_rotation_translation(0.25)\n\n # Smooth rotation for display.\n a_rot = np.matmul(util.rotate_x(-0.4), util.rotate_y(ang))\n\n # Modelview and modelview + projection matrices.\n proj = util.projection(x=0.4)\n r_mv = np.matmul(util.translate(0, 0, -3.5), r_rot)\n r_mvp = np.matmul(proj, r_mv).astype(np.float32)\n a_mv = np.matmul(util.translate(0, 0, -3.5), a_rot)\n a_mvp = np.matmul(proj, a_mv).astype(np.float32)\n\n # Compute geometric error for logging.\n with torch.no_grad():\n geom_loss = torch.mean(torch.sum((torch.abs(vtx_pos_opt) - .5)**2, dim=1)**0.5)\n gl_avg.append(float(geom_loss))\n\n # Print/save log.\n if log_interval and (it % log_interval == 0):\n gl_val = np.mean(np.asarray(gl_avg))\n gl_avg = []\n s = (\"rep=%d,\" % rep) if repeats > 1 else \"\"\n s += \"iter=%d,err=%f\" % (it, gl_val)\n print(s)\n if log_file:\n log_file.write(s + \"\\n\")\n\n color = render(glctx, r_mvp, vtx_pos, pos_idx, vtx_col, col_idx, resolution)\n color_opt = render(glctx, r_mvp, vtx_pos_opt, pos_idx, vtx_col_opt, col_idx, resolution)\n\n # Compute loss and train.\n loss = torch.mean((color - color_opt)**2) # L2 pixel loss.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n scheduler.step()\n\n # Show/save image.\n display_image = display_interval and (it % display_interval == 0)\n save_mp4 = mp4save_interval and (it % mp4save_interval == 0)\n\n if display_image or save_mp4:\n ang = ang + 0.01\n\n img_b = color[0].cpu().numpy()[::-1]\n img_o = color_opt[0].detach().cpu().numpy()[::-1]\n img_d = render(glctx, a_mvp, vtx_pos_opt, pos_idx, vtx_col_opt, col_idx, display_res)[0]\n img_r = render(glctx, a_mvp, vtx_pos, pos_idx, vtx_col, col_idx, display_res)[0]\n\n scl = display_res // img_o.shape[0]\n img_b = np.repeat(np.repeat(img_b, scl, axis=0), scl, axis=1)\n img_o = np.repeat(np.repeat(img_o, scl, axis=0), scl, axis=1)\n result_image = make_grid(np.stack([img_o, img_b, img_d.detach().cpu().numpy()[::-1], img_r.cpu().numpy()[::-1]]))\n\n if display_image:\n util.display_image(result_image, size=display_res, title='%d / %d' % (it, max_iter))\n if save_mp4:\n writer.append_data(np.clip(np.rint(result_image*255.0), 0, 255).astype(np.uint8))\n\n # Done.\n if writer is not None:\n writer.close()\n if log_file:\n log_file.close()\n\n#----------------------------------------------------------------------------\n\ndef main():\n parser = argparse.ArgumentParser(description='Cube fit example')\n parser.add_argument('--outdir', help='Specify output directory', default='')\n parser.add_argument('--discontinuous', action='store_true', default=False)\n parser.add_argument('--resolution', type=int, default=0, required=True)\n parser.add_argument('--display-interval', type=int, default=0)\n parser.add_argument('--mp4save-interval', type=int, default=100)\n parser.add_argument('--max-iter', type=int, default=1000)\n args = parser.parse_args()\n\n # Set up logging.\n if args.outdir:\n ds = 'd' if args.discontinuous else 'c'\n out_dir = f'{args.outdir}/cube_{ds}_{args.resolution}'\n print (f'Saving results under {out_dir}')\n else:\n out_dir = None\n print ('No output directory specified, not saving log or images')\n\n # Run.\n fit_cube(\n max_iter=args.max_iter,\n resolution=args.resolution,\n discontinuous=args.discontinuous,\n log_interval=10,\n display_interval=args.display_interval,\n out_dir=out_dir,\n log_fn='log.txt',\n mp4save_interval=args.mp4save_interval,\n mp4save_fn='progress.mp4'\n )\n\n # Done.\n print(\"Done.\")\n\n#----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n main()\n\n#----------------------------------------------------------------------------\n" ]
[ [ "numpy.matmul", "numpy.asarray", "torch.no_grad", "torch.optim.Adam", "numpy.load", "numpy.rint", "torch.from_numpy", "torch.ones", "torch.abs", "numpy.random.uniform", "torch.tensor", "numpy.repeat", "torch.mean" ] ]
tripathiaakash/transformers
[ "8e73e56cf648418e6c9701fa64ee0dd56f02cb5f" ]
[ "examples/run_language_modeling.py" ]
[ "import argparse\nimport glob\nimport logging\nimport os\nimport pickle\nimport random\nimport re\nimport shutil\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nimport torch\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n BertConfig,\n BertForMaskedLM,\n BertTokenizer,\n CamembertConfig,\n CamembertForMaskedLM,\n CamembertTokenizer,\n DistilBertConfig,\n DistilBertForMaskedLM,\n DistilBertTokenizer,\n GPT2Config,\n GPT2LMHeadModel,\n GPT2Tokenizer,\n OpenAIGPTConfig,\n OpenAIGPTLMHeadModel,\n OpenAIGPTTokenizer,\n PreTrainedModel,\n PreTrainedTokenizer,\n RobertaConfig,\n RobertaForMaskedLM,\n RobertaTokenizer,\n T5Config,\n T5WithLMHeadModel,\n T5Tokenizer,\n get_linear_schedule_with_warmup,\n)\n\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\n\nMODEL_CLASSES = {\n \"gpt2\": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),\n \"openai-gpt\": (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),\n \"bert\": (BertConfig, BertForMaskedLM, BertTokenizer),\n \"roberta\": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),\n \"camembert\": (CamembertConfig, CamembertForMaskedLM, CamembertTokenizer),\n \"t5\": (T5Config, T5WithLMHeadModel, T5Tokenizer)\n}\n\n\nclass TextDataset(Dataset):\n def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path: str, block_size=512):\n assert os.path.isfile(file_path)\n\n block_size = block_size - (tokenizer.max_len - tokenizer.max_len_single_sentence)\n\n directory, filename = os.path.split(file_path)\n cached_features_file = os.path.join(\n directory, args.model_type + \"_cached_lm_\" + str(block_size) + \"_\" + filename\n )\n\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n with open(cached_features_file, \"rb\") as handle:\n self.examples = pickle.load(handle)\n else:\n logger.info(\"Creating features from dataset file at %s\", directory)\n\n self.examples = []\n with open(file_path, encoding=\"utf-8\") as f:\n text = f.read()\n\n tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))\n\n for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size\n self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size]))\n # Note that we are loosing the last truncated example here for the sake of simplicity (no padding)\n # If your dataset is small, first you should loook for a bigger one :-) and second you\n # can change this behavior by adding (model specific) padding.\n\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n with open(cached_features_file, \"wb\") as handle:\n pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, item):\n return torch.tensor(self.examples[item], dtype=torch.long)\n\n\nclass LineByLineTextDataset(Dataset):\n def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path: str, block_size=512):\n assert os.path.isfile(file_path)\n # Here, we do not cache the features, operating under the assumption\n # that we will soon use fast multithreaded tokenizers from the\n # `tokenizers` repo everywhere =)\n logger.info(\"Creating features from dataset file at %s\", file_path)\n\n with open(file_path, encoding=\"utf-8\") as f:\n lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]\n\n self.examples = tokenizer.batch_encode_plus(lines, add_special_tokens=True, max_length=block_size)[\"input_ids\"]\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, i):\n return torch.tensor(self.examples[i], dtype=torch.long)\n\n\ndef load_and_cache_examples(args, tokenizer, evaluate=False):\n if args.seq2seq == 'false':\n file_path = args.eval_data_file if evaluate else args.train_data_file\n if args.line_by_line:\n return (LineByLineTextDataset(tokenizer, args, file_path=file_path, block_size=args.block_size),)\n else:\n return TextDataset(tokenizer, args, file_path=file_path, block_size=args.block_size)\n else:\n file_x_path = args.eval_data_x_file if evaluate else args.train_x_data_file\n file_y_path = args.eval_data_y_file if evaluate else args.train_y_data_file\n\n return (LineByLineTextDataset(tokenizer, args, file_path=file_x_path, block_size=args.block_size),\n LineByLineTextDataset(tokenizer, args, file_path=file_y_path, block_size=args.block_size))\n\n \n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef _sorted_checkpoints(args, checkpoint_prefix=\"checkpoint\", use_mtime=False) -> List[str]:\n ordering_and_checkpoint_path = []\n\n glob_checkpoints = glob.glob(os.path.join(args.output_dir, \"{}-*\".format(checkpoint_prefix)))\n\n for path in glob_checkpoints:\n if use_mtime:\n ordering_and_checkpoint_path.append((os.path.getmtime(path), path))\n else:\n regex_match = re.match(\".*{}-([0-9]+)\".format(checkpoint_prefix), path)\n if regex_match and regex_match.groups():\n ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))\n\n checkpoints_sorted = sorted(ordering_and_checkpoint_path)\n checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]\n return checkpoints_sorted\n\n\ndef _rotate_checkpoints(args, checkpoint_prefix=\"checkpoint\", use_mtime=False) -> None:\n if not args.save_total_limit:\n return\n if args.save_total_limit <= 0:\n return\n\n # Check if we should delete older checkpoint(s)\n checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)\n if len(checkpoints_sorted) <= args.save_total_limit:\n return\n\n number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)\n checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]\n for checkpoint in checkpoints_to_be_deleted:\n logger.info(\"Deleting older checkpoint [{}] due to args.save_total_limit\".format(checkpoint))\n shutil.rmtree(checkpoint)\n\n\ndef mask_tokens(inputs: torch.Tensor, tokenizer: PreTrainedTokenizer, args) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. \"\"\"\n\n if tokenizer.mask_token is None:\n raise ValueError(\n \"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.\"\n )\n\n labels = inputs.clone()\n # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)\n probability_matrix = torch.full(labels.shape, args.mlm_probability)\n special_tokens_mask = [\n tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()\n ]\n probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)\n if tokenizer._pad_token is not None:\n padding_mask = labels.eq(tokenizer.pad_token_id)\n probability_matrix.masked_fill_(padding_mask, value=0.0)\n masked_indices = torch.bernoulli(probability_matrix).bool()\n labels[~masked_indices] = -100 # We only compute loss on masked tokens\n\n # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])\n indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices\n inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced\n random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)\n inputs[indices_random] = random_words[indices_random]\n\n # The rest of the time (10% of the time) we keep the masked input tokens unchanged\n return inputs, labels\n\n\ndef train(args, train_dataset, model: PreTrainedModel, tokenizer: PreTrainedTokenizer) -> Tuple[int, float]:\n \"\"\" Train the model \"\"\"\n \n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n\n def collate(examples: List[torch.Tensor]):\n if tokenizer._pad_token is None:\n return pad_sequence(examples, batch_first=True)\n return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)\n\n #train_sampler = RandomSampler(train_dataset[0]) if args.local_rank == -1 else DistributedSampler(train_dataset[0])\n train_dataloader = DataLoader(\n train_dataset[0], batch_size=args.train_batch_size, collate_fn=collate\n )\n\n\n #### If seq2seq \n if len(train_dataset)>1:\n #train_sampler_y = RandomSampler(train_dataset[1]) if args.local_rank == -1 else DistributedSampler(train_dataset[1])\n train_dataloader_y = DataLoader(\n train_dataset[1], batch_size=args.train_batch_size, collate_fn=collate\n )\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if (\n args.model_name_or_path\n and os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\"))\n and os.path.isfile(os.path.join(args.model_name_or_path, \"scheduler.pt\"))\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset[0]))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n if args.model_name_or_path and os.path.exists(args.model_name_or_path):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n tr_loss, logging_loss = 0.0, 0.0\n\n model_to_resize = model.module if hasattr(model, \"module\") else model # Take care of distributed/parallel training\n model_to_resize.resize_token_embeddings(len(tokenizer))\n\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0]\n )\n # Added here for reproducibility\n set_seed(args)\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n epoch_iterator_y = tqdm(train_dataloader_y, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, (batch, batch_y) in enumerate(zip(epoch_iterator,epoch_iterator_y)):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n inputs, labels = mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch)\n if args.seq2seq == 'true':\n inputs, labels = batch, batch_y\n\n inputs = inputs.to(args.device)\n labels = labels.to(args.device)\n model.train()\n\n if args.seq2seq == 'false':\n outputs = model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels)\n else:\n outputs = model(input_ids=inputs, decoder_lm_labels=labels)\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else: \n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n if (\n args.local_rank == -1 and args.evaluate_during_training\n ): # Only evaluate when single GPU otherwise metrics may not average well\n results = evaluate(args, model, tokenizer)\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step)\n logging_loss = tr_loss\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n checkpoint_prefix = \"checkpoint\"\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"{}-{}\".format(checkpoint_prefix, global_step))\n os.makedirs(output_dir, exist_ok=True)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n _rotate_checkpoints(args, checkpoint_prefix)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, prefix=\"\") -> Dict:\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_output_dir = args.output_dir\n\n eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)\n\n if args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir, exist_ok=True)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n\n def collate(examples: List[torch.Tensor]):\n if tokenizer._pad_token is None:\n return pad_sequence(examples, batch_first=True)\n return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(\n eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate\n )\n\n # multi-gpu evaluate\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n model.eval()\n\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n inputs, labels = mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch)\n inputs = inputs.to(args.device)\n labels = labels.to(args.device)\n\n with torch.no_grad():\n outputs = model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels)\n lm_loss = outputs[0]\n eval_loss += lm_loss.mean().item()\n nb_eval_steps += 1\n\n eval_loss = eval_loss / nb_eval_steps\n perplexity = torch.exp(torch.tensor(eval_loss))\n\n result = {\"perplexity\": perplexity}\n\n output_eval_file = os.path.join(eval_output_dir, prefix, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results {} *****\".format(prefix))\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n return result\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--train_data_file\", default=None, type=str, help=\"The input training data file (a text file).\"\n )\n\n parser.add_argument(\n \"--train_x_data_file\", default=None, type=str, help=\"The input training data x file (a text file) for seq2seq.\"\n )\n\n parser.add_argument(\n \"--train_y_data_file\", default=None, type=str, help=\"The input training data y file (a text file) for seq2seq.\"\n )\n\n parser.add_argument(\n \"--output_dir\",\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n parser.add_argument(\n \"--model_type\", type=str, required=True, help=\"The model architecture to be trained or fine-tuned.\",\n )\n\n parser.add_argument(\n \"--seq2seq\", type=str, default='false', help=\"true if model is seq2seq\"\n )\n\n # Other parameters\n parser.add_argument(\n \"--eval_data_file\",\n default=None,\n type=str,\n help=\"An optional input evaluation data file to evaluate the perplexity on (a text file).\",\n )\n parser.add_argument(\n \"--line_by_line\",\n action=\"store_true\",\n help=\"Whether distinct lines of text in the dataset are to be handled as distinct sequences.\",\n )\n parser.add_argument(\n \"--should_continue\", action=\"store_true\", help=\"Whether to continue from latest checkpoint in output_dir\"\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n help=\"The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.\",\n )\n\n parser.add_argument(\n \"--mlm\", action=\"store_true\", help=\"Train with masked-language modeling loss instead of language modeling.\"\n )\n parser.add_argument(\n \"--mlm_probability\", type=float, default=0.15, help=\"Ratio of tokens to mask for masked language modeling loss\"\n )\n\n parser.add_argument(\n \"--config_name\",\n default=None,\n type=str,\n help=\"Optional pretrained config name or path if not the same as model_name_or_path. If both are None, initialize a new config.\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=None,\n type=str,\n help=\"Optional pretrained tokenizer name or path if not the same as model_name_or_path. If both are None, initialize a new tokenizer.\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=None,\n type=str,\n help=\"Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)\",\n )\n parser.add_argument(\n \"--block_size\",\n default=-1,\n type=int,\n help=\"Optional input sequence length after tokenization.\"\n \"The training dataset will be truncated in block of this size for training.\"\n \"Default to the model max input length for single sentence inputs (take into account special tokens).\",\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\n \"--evaluate_during_training\", action=\"store_true\", help=\"Run evaluation during training at each logging step.\"\n )\n\n parser.add_argument(\"--per_gpu_train_batch_size\", default=4, type=int, help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=4, type=int, help=\"Batch size per GPU/CPU for evaluation.\"\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=1.0, type=float, help=\"Total number of training epochs to perform.\"\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument(\"--logging_steps\", type=int, default=500, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=500, help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--save_total_limit\",\n type=int,\n default=None,\n help=\"Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default\",\n )\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\"\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"For distant debugging.\")\n args = parser.parse_args()\n\n if args.model_type in [\"bert\", \"roberta\", \"distilbert\", \"camembert\"] and not args.mlm:\n raise ValueError(\n \"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the --mlm \"\n \"flag (masked language modeling).\"\n )\n if args.eval_data_file is None and args.do_eval:\n raise ValueError(\n \"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file \"\n \"or remove the --do_eval argument.\"\n )\n if args.should_continue:\n sorted_checkpoints = _sorted_checkpoints(args)\n if len(sorted_checkpoints) == 0:\n raise ValueError(\"Used --should_continue but no checkpoint was found in --output_dir.\")\n else:\n args.model_name_or_path = sorted_checkpoints[-1]\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n set_seed(args)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab\n\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n\n if args.config_name:\n config = config_class.from_pretrained(args.config_name, cache_dir=args.cache_dir)\n elif args.model_name_or_path:\n config = config_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)\n else:\n config = config_class()\n\n if args.tokenizer_name:\n tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir)\n elif args.model_name_or_path:\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)\n else:\n raise ValueError(\n \"You are instantiating a new {} tokenizer. This is not supported, but you can do it from another script, save it,\"\n \"and load it from here, using --tokenizer_name\".format(tokenizer_class.__name__)\n )\n\n if args.block_size <= 0:\n args.block_size = tokenizer.max_len\n # Our input block size will be the max possible for the model\n else:\n args.block_size = min(args.block_size, tokenizer.max_len)\n\n if args.model_name_or_path:\n model = model_class.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir,\n )\n else:\n logger.info(\"Training new model from scratch\")\n model = model_class(config=config)\n\n model.to(args.device)\n\n if args.local_rank == 0:\n torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Training\n if args.do_train:\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)\n\n if args.local_rank == 0:\n torch.distributed.barrier()\n\n global_step, tr_loss = train(args, train_dataset, model, tokenizer)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Saving best-practices: if you use save_pretrained for the model and tokenizer, you can reload them using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir, exist_ok=True)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = model_class.from_pretrained(args.output_dir)\n tokenizer = tokenizer_class.from_pretrained(args.output_dir)\n model.to(args.device)\n\n # Evaluation\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n )\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n prefix = checkpoint.split(\"/\")[-1] if checkpoint.find(\"checkpoint\") != -1 else \"\"\n\n model = model_class.from_pretrained(checkpoint)\n model.to(args.device)\n result = evaluate(args, model, tokenizer, prefix=prefix)\n result = dict((k + \"_{}\".format(global_step), v) for k, v in result.items())\n results.update(result)\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.distributed.get_world_size", "torch.cuda.is_available", "torch.nn.DataParallel", "torch.distributed.init_process_group", "torch.manual_seed", "torch.tensor", "torch.utils.data.DataLoader", "torch.distributed.get_rank", "torch.device", "torch.cuda.manual_seed_all", "torch.nn.utils.rnn.pad_sequence", "torch.utils.data.SequentialSampler", "torch.nn.parallel.DistributedDataParallel", "torch.cuda.device_count", "torch.cuda.set_device", "torch.full", "torch.distributed.barrier", "numpy.random.seed", "torch.no_grad", "torch.bernoulli" ] ]
KwonGihyun/DiagonalGAN
[ "9e401c00e741d700f85df2c715ee11c1e66e1d1c" ]
[ "dataset.py" ]
[ "from io import BytesIO\n\n# import lmdb\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nimport os\nimport torchvision.transforms as transforms\nimport random\nimport torch\n\nclass MultiResolutionDataset(Dataset):\n def __init__(self, path, resolution=8):\n\n files = os.listdir(path)\n self.imglist =[]\n for fir in files:\n self.imglist.append(os.path.join(path,fir))\n\n self.transform = transforms.Compose(\n [\n transforms.Resize(resolution),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\n ]\n )\n\n self.resolution = resolution\n\n\n def __len__(self):\n return len(self.imglist)\n\n def __getitem__(self, index):\n\n img = Image.open(self.imglist[index])\n img = self.transform(img)\n\n return img\n\nclass MultiLabelResolutionDataset(Dataset):\n def __init__(self, path, resolution=8):\n folders = path\n\n self.imglist =[]\n self.attributes = []\n for i in range(len(folders)):\n files = os.listdir(folders[i])\n for fir in files:\n self.imglist.append(os.path.join(folders[i],fir))\n self.attributes.append(i)\n\n c = list(zip(self.imglist,self.attributes))\n random.shuffle(c)\n self.imglist2, self.att2 = zip(*c)\n\n self.transform = transforms.Compose(\n [\n transforms.Resize(resolution),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\n ]\n )\n\n self.resolution = resolution\n\n def __len__(self):\n return len(self.imglist)\n\n def __getitem__(self, index):\n \n img = Image.open(self.imglist[index])\n img = self.transform(img)\n label_org = self.attributes[index]\n label_trg = self.att2[index]\n return img,label_org,label_trg\n\nclass MultiLabelAllDataset(Dataset):\n def __init__(self, path,spath,ppath,resolution=256):\n\n self.slist = []\n self.plist = []\n self.imglist = []\n self.attributes = []\n for i in range(len(spath)):\n style = os.listdir(spath[i])\n pix = os.listdir(ppath[i])\n imgs = os.listdir(path[i])\n style.sort()\n pix.sort()\n imgs.sort()\n for ii in range(len(style)):\n self.slist.append(os.path.join(spath[i],style[ii]))\n self.plist.append(os.path.join(ppath[i],pix[ii]))\n self.imglist.append(os.path.join(path[i],imgs[ii]))\n self.attributes.append(i)\n \n c = list(zip(self.imglist,self.slist,self.plist,self.attributes))\n random.shuffle(c)\n self.imglist2,self.slist2,self.plist2, self.att2 = zip(*c)\n self.transform = transforms.Compose(\n [\n transforms.Resize(resolution),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\n ])\n def __len__(self):\n return len(self.slist)\n\n def __getitem__(self, index):\n \n img = Image.open(self.imglist[index])\n sty = torch.load(self.slist[index])\n sty.requires_grad=False\n pix = torch.load(self.plist[index])\n pix.requires_grad=False\n \n img = self.transform(img)\n\n label_org = self.attributes[index]\n label_trg = self.att2[index]\n return img,sty,pix,label_org,label_trg" ]
[ [ "torch.load" ] ]
maurov/xraysloth
[ "6f18ddcb02050431574693d46bcf4b89c719c40b" ]
[ "examples/xdata_tests.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Examples for xdata\n=====================\n\"\"\"\nimport numpy as np\n\nfrom sloth.utils.xdata import (ELEMENTS, SHELLS, LINES_DICT,\\\n LINES_K, LINES_L, LINES_M, LINES, TRANSITIONS)\n\nfrom sloth.utils.xdata import (ene_res, fluo_width, find_line, mapLine2Trans,\\\n fluo_spectrum)\n\n\ndef testEresLinesKLM(emin, emax):\n \"\"\"Returns a list of the average energy resolution for K, L and M\n lines for the elements in the given energy range\n\n \"\"\"\n k = ene_res(emin, emax, shells=['K'])\n l1 = ene_res(emin, emax, shells=['L1'])\n l2 = ene_res(emin, emax, shells=['L2'])\n l3 = ene_res(emin, emax, shells=['L3'])\n m1 = ene_res(emin, emax, shells=['M1'])\n m2 = ene_res(emin, emax, shells=['M2'])\n m3 = ene_res(emin, emax, shells=['M3'])\n m4 = ene_res(emin, emax, shells=['M4'])\n m5 = ene_res(emin, emax, shells=['M5'])\n #\n ss = [k, l1, l2, l3, m1, m2, m3, m4, m5]\n ss_n = ['K', 'L1', 'L2', 'L3', 'M1', 'M2', 'M3', 'M4', 'M5']\n #\n dees = []\n for s in ss:\n dees.append(np.mean(s['dee']))\n #\n return dees\n\n\ndef testFluoWidth(elem='Au', lines=['LB6', 'LB4', 'LB1', 'LB2', 'LB3', 'LB5']):\n \"\"\"returns the line width for the lines of a given element\n this example: Au Lbeta lines\n \"\"\"\n for line in lines:\n print(\"{0} {1} : {2:>.4f} eV\".format(elem, line,\n fluo_width(elem, line)))\n\n\ndef testFluoSulphurK():\n \"\"\"generate the Kalpha1,2 emission spectrum of Sulphur\"\"\"\n elem = 'S'\n x1, y1, i1 = fluo_spectrum(elem, 'KA1')\n x2, y2, i2 = fluo_spectrum(elem, 'KA2')\n x = np.arange(x2.min(), x1.max(), 0.05)\n y1i = np.interp(x, x1, y1)\n y2i = np.interp(x, x2, y2)\n y = y1i+y2i\n from silx.gui.plot import Plot1D\n from silx import sx\n sx.enable_gui()\n p = Plot1D()\n p.addCurve(x, y, legend='sum', color='black')\n p.addCurve(x1, y1, legend='KA1', color='red')\n p.addCurve(x2, y2, legend='KA2', color='green')\n p.show()\n return p\n\n\nif __name__ == '__main__':\n # pass\n # dees = testEresLinesKLM(2000, 5000)\n # find_line(1500., 5500., lines=LINES_DICT['L2'], outDict=False)\n p = testFluoSulphurK()\n input(\"Press enter to quit\")\n" ]
[ [ "numpy.interp", "numpy.mean" ] ]
yoon28/realsr-noise-injection
[ "402679490bf0972d09aaaadee3b5b9850c2a36e4" ]
[ "codes/models/SRGAN_model.py" ]
[ "import logging\nfrom collections import OrderedDict\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel as P\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\nimport models.networks as networks\nimport models.lr_scheduler as lr_scheduler\nfrom .base_model import BaseModel\nfrom models.modules.loss import GANLoss\n\nlogger = logging.getLogger('base')\n\nclass SRGANModel(BaseModel):\n def __init__(self, opt):\n super(SRGANModel, self).__init__(opt)\n if opt['dist']:\n self.rank = torch.distributed.get_rank()\n else:\n self.rank = -1 # non dist training\n train_opt = opt['train']\n\n # define networks and load pretrained models\n self.netG = networks.define_G(opt).to(self.device)\n if opt['dist']:\n self.netG = DistributedDataParallel(self.netG, device_ids=[torch.cuda.current_device()])\n else:\n self.netG = DataParallel(self.netG)\n if self.is_train:\n self.netD = networks.define_D(opt).to(self.device)\n if opt['dist']:\n if opt['network_D']['norm_layer'] == 'batchnorm':\n self.netD = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.netD).to(self.device)\n self.netD = DistributedDataParallel(self.netD,\n device_ids=[torch.cuda.current_device()])\n else:\n self.netD = DataParallel(self.netD)\n\n self.netG.train()\n self.netD.train()\n\n # define losses, optimizer and scheduler\n if self.is_train:\n\n # G pixel loss\n if train_opt['pixel_weight'] > 0:\n l_pix_type = train_opt['pixel_criterion']\n if l_pix_type == 'l1':\n self.cri_pix = nn.L1Loss().to(self.device)\n elif l_pix_type == 'l2':\n self.cri_pix = nn.MSELoss().to(self.device)\n else:\n raise NotImplementedError('Loss type [{:s}] not recognized.'.format(l_pix_type))\n self.l_pix_w = train_opt['pixel_weight']\n else:\n logger.info('Remove pixel loss.')\n self.cri_pix = None\n\n # G feature loss\n if train_opt['feature_weight'] > 0:\n l_fea_type = train_opt['feature_criterion']\n if l_fea_type == 'l1':\n self.cri_fea = nn.L1Loss().to(self.device)\n elif l_fea_type == 'l2':\n self.cri_fea = nn.MSELoss().to(self.device)\n else:\n raise NotImplementedError('Loss type [{:s}] not recognized.'.format(l_fea_type))\n self.l_fea_w = train_opt['feature_weight']\n else:\n logger.info('Remove feature loss.')\n self.cri_fea = None\n if self.cri_fea: # load VGG perceptual loss\n self.netF = networks.define_F(opt, use_bn=False).to(self.device)\n # if opt['dist']:\n # self.netF = DistributedDataParallel(self.netF,\n # device_ids=[torch.cuda.current_device()])\n # else:\n # self.netF = DataParallel(self.netF)\n\n # GD gan loss\n self.cri_gan = GANLoss(train_opt['gan_type'], 1.0, 0.0).to(self.device)\n self.l_gan_w = train_opt['gan_weight']\n # D_update_ratio and D_init_iters\n self.D_update_ratio = train_opt['D_update_ratio'] if train_opt['D_update_ratio'] else 1\n self.D_init_iters = train_opt['D_init_iters'] if train_opt['D_init_iters'] else 0\n\n # optimizers\n # G\n wd_G = train_opt['weight_decay_G'] if train_opt['weight_decay_G'] else 0\n optim_params = []\n for k, v in self.netG.named_parameters(): # can optimize for a part of the model\n if v.requires_grad:\n optim_params.append(v)\n else:\n if self.rank <= 0:\n logger.warning('Params [{:s}] will not optimize.'.format(k))\n self.optimizer_G = torch.optim.Adam(optim_params, lr=train_opt['lr_G'],\n weight_decay=wd_G,\n betas=(train_opt['beta1_G'], train_opt['beta2_G']))\n self.optimizers.append(self.optimizer_G)\n # D\n wd_D = train_opt['weight_decay_D'] if train_opt['weight_decay_D'] else 0\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=train_opt['lr_D'],\n weight_decay=wd_D,\n betas=(train_opt['beta1_D'], train_opt['beta2_D']))\n self.optimizers.append(self.optimizer_D)\n\n # schedulers\n if train_opt['lr_scheme'] == 'MultiStepLR':\n for optimizer in self.optimizers:\n self.schedulers.append(\n lr_scheduler.MultiStepLR_Restart(optimizer, train_opt['lr_steps'],\n restarts=train_opt['restarts'],\n weights=train_opt['restart_weights'],\n gamma=train_opt['lr_gamma'],\n clear_state=train_opt['clear_state']))\n elif train_opt['lr_scheme'] == 'CosineAnnealingLR_Restart':\n for optimizer in self.optimizers:\n self.schedulers.append(\n lr_scheduler.CosineAnnealingLR_Restart(\n optimizer, train_opt['T_period'], eta_min=train_opt['eta_min'],\n restarts=train_opt['restarts'], weights=train_opt['restart_weights']))\n else:\n raise NotImplementedError('MultiStepLR learning rate scheme is enough.')\n\n self.log_dict = OrderedDict()\n\n self.print_network() # print network\n self.load() # load G and D if needed\n\n def feed_data(self, data, need_GT=True):\n self.var_L = data['LQ'].to(self.device) # LQ\n if need_GT:\n self.var_H = data['GT'].to(self.device) # GT\n input_ref = data['ref'] if 'ref' in data else data['GT']\n self.var_ref = input_ref.to(self.device)\n\n def optimize_parameters(self, step):\n # G\n for p in self.netD.parameters():\n p.requires_grad = False\n\n self.optimizer_G.zero_grad()\n self.fake_H = self.netG(self.var_L.detach())\n\n l_g_total = 0\n if step % self.D_update_ratio == 0 and step > self.D_init_iters:\n if self.cri_pix: # pixel loss\n l_g_pix = self.l_pix_w * self.cri_pix(self.fake_H, self.var_H)\n l_g_total += l_g_pix\n if self.cri_fea: # feature loss\n real_fea = self.netF(self.var_H).detach()\n fake_fea = self.netF(self.fake_H)\n l_g_fea = self.l_fea_w * self.cri_fea(fake_fea, real_fea)\n l_g_total += l_g_fea\n\n pred_g_fake = self.netD(self.fake_H)\n if self.opt['train']['gan_type'] == 'gan':\n l_g_gan = self.l_gan_w * self.cri_gan(pred_g_fake, True)\n elif self.opt['train']['gan_type'] == 'ragan':\n pred_d_real = self.netD(self.var_ref).detach()\n l_g_gan = self.l_gan_w * (\n self.cri_gan(pred_d_real - torch.mean(pred_g_fake), False) +\n self.cri_gan(pred_g_fake - torch.mean(pred_d_real), True)) / 2\n l_g_total += l_g_gan\n\n l_g_total.backward()\n self.optimizer_G.step()\n\n # D\n for p in self.netD.parameters():\n p.requires_grad = True\n\n self.optimizer_D.zero_grad()\n l_d_total = 0\n pred_d_real = self.netD(self.var_ref)\n pred_d_fake = self.netD(self.fake_H.detach()) # detach to avoid BP to G\n if self.opt['train']['gan_type'] == 'gan':\n l_d_real = self.cri_gan(pred_d_real, True)\n l_d_fake = self.cri_gan(pred_d_fake, False)\n l_d_total = l_d_real + l_d_fake\n elif self.opt['train']['gan_type'] == 'ragan':\n l_d_real = self.cri_gan(pred_d_real - torch.mean(pred_d_fake), True)\n l_d_fake = self.cri_gan(pred_d_fake - torch.mean(pred_d_real), False)\n l_d_total = (l_d_real + l_d_fake) / 2\n\n l_d_total.backward()\n self.optimizer_D.step()\n\n # set log\n if step % self.D_update_ratio == 0 and step > self.D_init_iters:\n if self.cri_pix:\n self.log_dict['l_g_pix'] = l_g_pix.item()\n # self.log_dict['l_g_mean_color'] = l_g_mean_color.item()\n if self.cri_fea:\n self.log_dict['l_g_fea'] = l_g_fea.item()\n self.log_dict['l_g_gan'] = l_g_gan.item()\n\n self.log_dict['l_d_real'] = l_d_real.item()\n self.log_dict['l_d_fake'] = l_d_fake.item()\n self.log_dict['D_real'] = torch.mean(pred_d_real.detach())\n self.log_dict['D_fake'] = torch.mean(pred_d_fake.detach())\n\n def test(self):\n self.netG.eval()\n with torch.no_grad():\n self.fake_H = self.netG(self.var_L)\n self.netG.train()\n\n def back_projection(self):\n lr_error = self.var_L - torch.nn.functional.interpolate(self.fake_H,\n scale_factor=1/self.opt['scale'],\n mode='bicubic',\n align_corners=False)\n us_error = torch.nn.functional.interpolate(lr_error,\n scale_factor=self.opt['scale'],\n mode='bicubic',\n align_corners=False)\n self.fake_H += self.opt['back_projection_lamda'] * us_error\n torch.clamp(self.fake_H, 0, 1)\n\n def test_chop(self):\n self.netG.eval()\n with torch.no_grad():\n self.fake_H = self.forward_chop(self.var_L)\n self.netG.train()\n\n def forward_chop(self, *args, shave=10, min_size=160000):\n # scale = 1 if self.input_large else self.scale[self.idx_scale]\n scale = self.opt['scale']\n n_GPUs = min(torch.cuda.device_count(), 4)\n args = [a.squeeze().unsqueeze(0) for a in args]\n\n # height, width\n h, w = args[0].size()[-2:]\n # print('len(args)', len(args))\n # print('args[0].size()', args[0].size())\n\n top = slice(0, h//2 + shave)\n bottom = slice(h - h//2 - shave, h)\n left = slice(0, w//2 + shave)\n right = slice(w - w//2 - shave, w)\n x_chops = [torch.cat([\n a[..., top, left],\n a[..., top, right],\n a[..., bottom, left],\n a[..., bottom, right]\n ]) for a in args]\n # print('len(x_chops)', len(x_chops))\n # print('x_chops[0].size()', x_chops[0].size())\n\n y_chops = []\n if h * w < 4 * min_size:\n for i in range(0, 4, n_GPUs):\n x = [x_chop[i:(i + n_GPUs)] for x_chop in x_chops]\n # print(len(x))\n # print(x[0].size())\n y = P.data_parallel(self.netG, *x, range(n_GPUs))\n if not isinstance(y, list): y = [y]\n if not y_chops:\n y_chops = [[c for c in _y.chunk(n_GPUs, dim=0)] for _y in y]\n else:\n for y_chop, _y in zip(y_chops, y):\n y_chop.extend(_y.chunk(n_GPUs, dim=0))\n else:\n\n # print(x_chops[0].size())\n for p in zip(*x_chops):\n # print('len(p)', len(p))\n # print('p[0].size()', p[0].size())\n y = self.forward_chop(*p, shave=shave, min_size=min_size)\n if not isinstance(y, list): y = [y]\n if not y_chops:\n y_chops = [[_y] for _y in y]\n else:\n for y_chop, _y in zip(y_chops, y): y_chop.append(_y)\n\n h *= scale\n w *= scale\n top = slice(0, h//2)\n bottom = slice(h - h//2, h)\n bottom_r = slice(h//2 - h, None)\n left = slice(0, w//2)\n right = slice(w - w//2, w)\n right_r = slice(w//2 - w, None)\n\n # batch size, number of color channels\n b, c = y_chops[0][0].size()[:-2]\n y = [y_chop[0].new(b, c, h, w) for y_chop in y_chops]\n for y_chop, _y in zip(y_chops, y):\n _y[..., top, left] = y_chop[0][..., top, left]\n _y[..., top, right] = y_chop[1][..., top, right_r]\n _y[..., bottom, left] = y_chop[2][..., bottom_r, left]\n _y[..., bottom, right] = y_chop[3][..., bottom_r, right_r]\n\n if len(y) == 1:\n y = y[0]\n\n return y\n\n def get_current_log(self):\n return self.log_dict\n\n def get_current_visuals(self, need_GT=True):\n out_dict = OrderedDict()\n out_dict['LQ'] = self.var_L.detach()[0].float().cpu()\n out_dict['SR'] = self.fake_H.detach()[0].float().cpu()\n if need_GT:\n out_dict['GT'] = self.var_H.detach()[0].float().cpu()\n return out_dict\n\n def print_network(self):\n # Generator\n s, n = self.get_network_description(self.netG)\n if isinstance(self.netG, nn.DataParallel) or isinstance(self.netG, DistributedDataParallel):\n net_struc_str = '{} - {}'.format(self.netG.__class__.__name__,\n self.netG.module.__class__.__name__)\n else:\n net_struc_str = '{}'.format(self.netG.__class__.__name__)\n if self.rank <= 0:\n logger.info('Network G structure: {}, with parameters: {:,d}'.format(net_struc_str, n))\n logger.info(s)\n if self.is_train:\n # Discriminator\n s, n = self.get_network_description(self.netD)\n if isinstance(self.netD, nn.DataParallel) or isinstance(self.netD,\n DistributedDataParallel):\n net_struc_str = '{} - {}'.format(self.netD.__class__.__name__,\n self.netD.module.__class__.__name__)\n else:\n net_struc_str = '{}'.format(self.netD.__class__.__name__)\n if self.rank <= 0:\n logger.info('Network D structure: {}, with parameters: {:,d}'.format(\n net_struc_str, n))\n logger.info(s)\n\n if self.cri_fea: # F, Perceptual Network\n s, n = self.get_network_description(self.netF)\n if isinstance(self.netF, nn.DataParallel) or isinstance(\n self.netF, DistributedDataParallel):\n net_struc_str = '{} - {}'.format(self.netF.__class__.__name__,\n self.netF.module.__class__.__name__)\n else:\n net_struc_str = '{}'.format(self.netF.__class__.__name__)\n if self.rank <= 0:\n logger.info('Network F structure: {}, with parameters: {:,d}'.format(\n net_struc_str, n))\n logger.info(s)\n\n def load(self):\n load_path_G = self.opt['path']['pretrain_model_G']\n if load_path_G is not None:\n logger.info('Loading model for G [{:s}] ...'.format(load_path_G))\n self.load_network(load_path_G, self.netG, self.opt['path']['strict_load'])\n load_path_D = self.opt['path']['pretrain_model_D']\n if self.opt['is_train'] and load_path_D is not None:\n logger.info('Loading model for D [{:s}] ...'.format(load_path_D))\n self.load_network(load_path_D, self.netD, self.opt['path']['strict_load'])\n\n def save(self, iter_step):\n self.save_network(self.netG, 'G', iter_step)\n self.save_network(self.netD, 'D', iter_step)\n" ]
[ [ "torch.cat", "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "torch.nn.MSELoss", "torch.nn.functional.interpolate", "torch.optim.Adam", "torch.no_grad", "torch.clamp", "torch.cuda.device_count", "torch.cuda.current_device", "torch.nn.L1Loss", "torch.nn.parallel.DataParallel", "torch.distributed.get_rank", "torch.mean" ] ]
AurelienNioche/ToyPulseRecommender
[ "561c24ed7e350de90cbd3babb785ceb2eb49ebc7" ]
[ "main.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom stable_baselines3.common.env_checker import check_env\n\nfrom stable_baselines3 import PPO\nfrom stable_baselines3.ppo import MlpPolicy\n\nimport gym\nfrom gym import spaces\n\nimport serial\nimport json\n\nimport os\nimport time\n\n\nclass CustomEnv(gym.Env):\n \"\"\"Custom Environment that follows gym interface\"\"\"\n metadata = {'render.modes': ['human']}\n\n def __init__(self, obs_min=40, obs_max=140,\n stim_duration=2.0,\n init_pos=90.0,\n continuous_action_space=True):\n\n super(CustomEnv, self).__init__()\n\n if continuous_action_space:\n self.action_space = spaces.Box(low=-1, high=1, shape=(1, ))\n else:\n self.action_space = spaces.Discrete(2)\n\n self.observation_space = spaces.Box(low=obs_min, high=obs_max, shape=(1, ))\n\n self.obs_min = obs_min\n self.obs_max = obs_max\n self.init_pos = init_pos\n\n self.stim_duration = stim_duration\n\n self.pos = np.array([init_pos])\n\n def step(self, action):\n\n if isinstance(self.action_space, spaces.Box):\n self.pos[:] += action[:] * 10\n else:\n self.pos[:] += (action - 0.5) * 10\n\n self.pos[self.pos > self.obs_max] = self.obs_max\n self.pos[self.pos < self.obs_min] = self.obs_min\n\n observation = self.pos\n\n frequency = self.pos[0] # Hertz\n\n print(\"choose frequency\", frequency)\n\n os.system(f'play -v 2.0 -n synth {self.stim_duration} sin {frequency}')\n\n start_time = time.time()\n\n with serial.Serial('/dev/cu.usbmodem112201', 115200, timeout=.1) as arduino:\n # need to drop first data\n while True:\n data = arduino.readline().strip()\n if data:\n break\n\n # timestamp = []\n bpm = []\n spO2 = []\n while time.time() - start_time < self.stim_duration:\n data = arduino.readline().strip()\n if data:\n data = data.decode()\n try:\n dic_data = json.loads(data)\n # timestamp.append(dic_data[\"timestamp\"])\n except (UnicodeDecodeError, json.decoder.JSONDecodeError):\n print(\"Not data\", data)\n continue\n\n bpm.append(dic_data[\"bpm\"])\n spO2.append(dic_data[\"spO2\"])\n mean_bpm = np.mean(bpm)\n reward = - mean_bpm / 100\n\n\n\n done = False\n info = {}\n return observation, reward, done, info\n\n def reset(self):\n\n self.pos = np.array([self.init_pos, ])\n observation = self.pos\n print(observation)\n return observation # reward, done, info can't be included\n\n def render(self, mode='human'):\n pass\n\n def close(self):\n pass\n\n\ndef main():\n print(\"Initializing...\")\n with serial.Serial('/dev/cu.usbmodem112201', 115200, timeout=.1) as arduino:\n # need to drop first data\n while True:\n data = arduino.readline().strip()\n\n if data:\n data = data.decode()\n try:\n dic_data = json.loads(data)\n # timestamp.append(dic_data[\"timestamp\"])\n if dic_data[\"bpm\"] > 40:\n break\n\n except (UnicodeDecodeError, json.decoder.JSONDecodeError):\n continue\n\n print(\"ready!\")\n env = CustomEnv()\n check_env(env=env)\n\n expert = PPO(\n policy=MlpPolicy,\n env=env,\n # seed=0,\n # batch_size=64,\n # ent_coef=0.0,\n # learning_rate=0.0003,\n # n_epochs=10,\n # n_steps=64,\n )\n\n # If discrete action, needs 50000\n expert.learn(50000)\n\n # x = np.linspace(0, 10, 100)\n # y = np.zeros_like(x)\n # for i in range(x.shape[0]):\n # obs = np.array([x[i], ])\n # action, _ = expert.predict(obs)\n # try:\n # y[i] = action[0]\n # except:\n # y[i] = action\n\n\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array", "numpy.mean" ] ]
Bob620/superman-web
[ "e13ae7305962cd348c2af74485ffa3e0b6855c02" ]
[ "backend/handlers/upload.py" ]
[ "from __future__ import absolute_import, print_function, division\nimport h5py\nimport logging\nimport numpy as np\nimport os\nimport pandas as pd\nimport time\nimport yaml\nfrom io import BytesIO, StringIO\nfrom six.moves import xrange\nfrom superman.file_io import parse_spectrum\nfrom threading import Thread\nfrom tornado import gen\nfrom tornado.escape import url_escape\nfrom zipfile import is_zipfile, ZipFile\n\nfrom .common import BaseHandler\nfrom ..web_datasets import (\n UploadedSpectrumDataset,\n WebTrajDataset, WebVectorDataset, WebLIBSDataset, DATASETS,\n PrimaryKeyMetadata, NumericMetadata, BooleanMetadata, LookupMetadata)\n\n\nclass SpectrumUploadHandler(BaseHandler):\n def post(self):\n fig_data = self.get_fig_data()\n if fig_data is None:\n return self.visible_error(403, 'Broken connection to server.')\n\n if not self.request.files:\n return self.visible_error(403, 'No file uploaded.')\n\n f = self.request.files['query'][0]\n fname = f['filename']\n logging.info('Parsing file: %s', fname)\n fh = BytesIO(f['body'])\n\n try:\n query = parse_spectrum(fh)\n except Exception:\n try:\n fh = StringIO(f['body'].decode('utf-8', 'ignore'), newline=None)\n query = parse_spectrum(fh)\n except Exception:\n logging.exception('Spectrum parse failed.')\n # XXX: save failed uploads for debugging purposes\n open('logs/badupload-' + fname, 'w').write(f['body'])\n return self.visible_error(415, 'Spectrum upload failed.')\n\n ds = UploadedSpectrumDataset(fname, query)\n fig_data.set_selected(ds.view(), title=fname)\n axlimits = fig_data.plot()\n return self.write_json(axlimits)\n\n\nclass DatasetUploadHandler(BaseHandler):\n @gen.coroutine\n def post(self):\n ds_name = self.get_argument('ds_name')\n ds_kind = self.get_argument('ds_kind')\n description = self.get_argument('desc')\n\n resample = (self.get_argument('lb', ''), self.get_argument('ub', ''), self.get_argument('step', ''))\n if not any(resample):\n resample = None\n\n if ds_kind not in DATASETS:\n self.visible_error(400, 'Invalid dataset kind.', 'Invalid ds_kind: %r', ds_kind)\n return\n\n if ds_name in DATASETS[ds_kind]:\n self.visible_error(403, 'Dataset already exists.',\n 'ds import would clobber existing: %s [%s]', ds_name, ds_kind)\n return\n\n if not self.request.files or 'spectra' not in self.request.files:\n self.visible_error(400, 'No spectrum data uploaded.')\n return\n\n meta_file, = self.request.files.get('metadata', [None])\n spectra_file, = self.request.files['spectra']\n\n # TODO: gen.Task is removed from modern Tornado implementation, fix this\n err = yield gen.Task(_async_ds_upload, meta_file, spectra_file, ds_name, ds_kind, resample, description)\n if err:\n self.visible_error(*err)\n return\n\n # Return a link to the new dataset to signal the upload succeeded.\n self.write('/explorer?ds_kind=%s&ds_name=%s' % (\n ds_kind, url_escape(ds_name, plus=False)))\n\n # Kick off a background thread to save this new dataset to disk.\n t = Thread(target=_save_ds, args=(ds_kind, ds_name))\n t.daemon = True\n t.start()\n\n\ndef _async_ds_upload(meta_file, spectra_file, ds_name, ds_kind, resample, description, callback=None):\n def helper():\n meta_kwargs, meta_pkeys, err = _load_metadata_csv(meta_file)\n\n if err is None:\n fh = BytesIO(spectra_file['body'])\n if is_zipfile(fh):\n # interpret this as a ZIP of csv files\n fh.seek(0)\n err = _traj_ds(fh, ds_name, ds_kind, meta_kwargs, meta_pkeys, resample, description)\n else:\n # this is one single csv file with all spectra in it\n fh.seek(0)\n err = _vector_ds(fh, ds_name, ds_kind, meta_kwargs, meta_pkeys, resample, description)\n\n callback(err)\n\n t = Thread(target=helper)\n t.daemon = True\n t.start()\n\n\ndef _load_metadata_csv(f=None):\n # metadata is optional\n if f is None:\n return {}, [], None\n\n fh = BytesIO(f['body'])\n try:\n meta = pd.read_csv(fh)\n except Exception:\n logging.exception('Bad metadata file')\n return None, None, (415, 'Unable to parse metadata CSV.')\n\n if meta.columns[0] != 'pkey':\n return None, None, (415, 'Metadata CSV must start with \"pkey\" column.')\n\n meta_kwargs = {}\n for i, name in enumerate(meta.columns[1:]):\n x = meta[name].values\n\n if np.issubdtype(x.dtype, np.bool_):\n m = BooleanMetadata(x, display_name=name)\n elif np.issubdtype(x.dtype, np.number):\n m = NumericMetadata(x, display_name=name)\n else:\n m = LookupMetadata(x, display_name=name)\n\n # use a JS-friendly string key\n meta_kwargs['k%d' % i] = m\n\n # make sure there's no whitespace sticking to the pkeys\n meta_pkeys = np.array(meta.pkey.values, dtype='U', copy=False)\n meta_pkeys = np.char.strip(meta_pkeys)\n return meta_kwargs, meta_pkeys, None\n\n\ndef _traj_ds(fh, ds_name, ds_kind, meta_kwargs, meta_pkeys, resample,\n description):\n # sanity check before doing the hard work\n if resample is None and ds_kind == 'LIBS':\n return 415, 'Failed: LIBS data must be sampled on a common x-axis'\n\n zf = ZipFile(fh)\n traj_data = {}\n for subfile in zf.infolist():\n if subfile.file_size <= 0:\n continue\n # ignore directory prefixes\n fname = os.path.basename(subfile.filename)\n # ignore hidden files\n if fname.startswith('.'):\n continue\n # read and wrap, because the ZipExtFile object isn't seekable\n sub_fh = BytesIO(zf.open(subfile).read())\n try:\n # TODO: ensure each traj has wavelengths in increasing order\n traj_data[fname] = parse_spectrum(sub_fh)\n except Exception:\n logging.exception('bad spectrum subfile: ' + fname)\n return 415, 'Unable to parse spectrum file: %s' % fname\n\n num_meta = len(meta_pkeys)\n num_traj = len(traj_data)\n\n if num_meta == 0:\n meta_pkeys = traj_data.keys()\n elif num_meta != num_traj:\n return (415, 'Failed: %d metadata entries for %d spectra' % (num_meta,\n num_traj))\n else:\n for pkey in meta_pkeys:\n if pkey not in traj_data:\n return 415, 'Failed: %r not in spectra.' % pkey\n\n if resample is None:\n _load = _make_loader_function(description, meta_pkeys, traj_data,\n **meta_kwargs)\n WebTrajDataset(ds_name, ds_kind, _load)\n else:\n lb, ub, step = map(_maybe_float, resample)\n waves = [t[:, 0] for t in traj_data.values()]\n if lb is None:\n lb = max(w[0] for w in waves)\n if ub is None:\n ub = min(w[-1] for w in waves)\n if step is None:\n step = min(np.diff(w).min() for w in waves)\n\n wave = np.arange(lb, ub + step / 2, step, dtype=waves[0].dtype)\n spectra = np.zeros((len(waves), len(wave)), dtype=wave.dtype)\n for i, key in enumerate(meta_pkeys):\n traj = traj_data[key]\n spectra[i] = np.interp(wave, traj[:, 0], traj[:, 1])\n pkey = PrimaryKeyMetadata(meta_pkeys)\n\n _load = _make_loader_function(description, wave, spectra, pkey=pkey,\n **meta_kwargs)\n WebVectorDataset(ds_name, ds_kind, _load)\n\n return None\n\n\ndef _vector_ds(fh, ds_name, ds_kind, meta_kwargs, meta_pkeys, resample, description):\n # I'm not 100% sure what is happening here, but I assume we want to check to make sure we can properly import the\n # data in the correct order\n try:\n pkey = np.array(next(fh).strip().split(b',')[1:])\n data = np.genfromtxt(fh, dtype=np.float32, delimiter=b',', unpack=True)\n wave = data[0]\n spectra = data[1:]\n except Exception:\n logging.exception('Bad spectra file.')\n return 415, 'Unable to parse spectrum data CSV.'\n\n # cut out empty columns (where pkey is '')\n mask = pkey != b''\n if not mask.all():\n pkey = pkey[mask]\n spectra = spectra[mask]\n\n # cut out empty rows (where wave is NaN)\n mask = np.isfinite(wave)\n if not mask.all():\n wave = wave[mask]\n spectra = spectra[:, mask]\n\n if ds_kind == 'LIBS' and wave.shape[0] not in (6144, 6143, 5485):\n return 415, 'Wrong number of channels for LIBS data: %d.' % wave.shape[0]\n\n # make sure there's no whitespace sticking to the pkeys\n pkey = np.char.strip(pkey)\n\n # Check length and see if the original arrays are the same\n if len(meta_pkeys) > 0 and not np.array_equal(meta_pkeys, pkey):\n if len(meta_pkeys) != len(pkey):\n return 415, 'Spectrum and metadata names mismatch.', 'wrong number of meta_pkeys for vector data'\n\n # Order arrays and check those\n if not np.array_equal(np.argsort(meta_pkeys), np.argsort(pkey)):\n return 415, 'Spectrum and metadata names mismatch.'\n\n try:\n pkey = PrimaryKeyMetadata(pkey)\n except AssertionError: # XXX: convert this to a real error\n return 415, 'Primary keys not unique.'\n\n # make sure wave is in increasing order\n order = np.argsort(wave)\n if not np.array_equal(order, np.arange(len(wave))):\n wave = wave[order]\n spectra = spectra[:, order]\n\n if resample is not None:\n lb, ub, step = resample\n lb = _maybe_float(lb, wave[0])\n ub = _maybe_float(ub, wave[-1])\n step = _maybe_float(step)\n if step is not None:\n new_wave = np.arange(lb, ub + step / 2, step, dtype=wave.dtype)\n new_spectra = np.zeros((len(spectra), len(new_wave)),\n dtype=spectra.dtype)\n for i, y in enumerate(spectra):\n new_spectra[i] = np.interp(new_wave, wave, y)\n wave = new_wave\n spectra = new_spectra\n else:\n lb_idx = np.searchsorted(wave, lb)\n ub_idx = np.searchsorted(wave, ub, side='right')\n spectra = spectra[:, lb_idx:ub_idx]\n wave = wave[lb_idx:ub_idx]\n\n # async loading machinery automatically registers us with DATASETS\n _load = _make_loader_function(description, wave, spectra, pkey=pkey, **meta_kwargs)\n\n if ds_kind == 'LIBS':\n WebLIBSDataset(ds_name, _load)\n else:\n WebVectorDataset(ds_name, ds_kind, _load)\n return None\n\n\ndef _maybe_float(x, default=None):\n try:\n return float(x)\n except ValueError:\n return default\n\n\ndef _make_loader_function(desc, *args, **kwargs):\n def _load(ds):\n ds.set_data(*args, **kwargs)\n ds.is_public = False\n ds.user_added = True\n ds.description = desc\n return True\n\n return _load\n\n\ndef _save_ds(ds_kind, ds_name):\n # Wait for the new dataset to finish registering.\n time.sleep(1)\n for _ in xrange(60):\n if ds_name in DATASETS[ds_kind]:\n break\n logging.info('Waiting for %s [%s] to register...', ds_name, ds_kind)\n time.sleep(1)\n\n # Save the new dataset to disk as a canonical hdf5.\n ds = DATASETS[ds_kind][ds_name]\n # XXX: this path manipulation is pretty hacky\n outdir = os.path.normpath(os.path.join(os.path.dirname(__file__),\n '../../uploads'))\n outname = os.path.join(outdir, '%s_%s.hdf5' % (ds_kind,\n ds_name.replace(' ', '_')))\n logging.info('Writing %s to disk: %s', ds, outname)\n\n # Set up the config entry.\n entry = dict(\n vector=(not isinstance(ds, WebTrajDataset)),\n file=os.path.abspath(outname),\n description=ds.description,\n public=ds.is_public,\n metadata=[])\n # TODO: move this logic to superman.dataset\n with h5py.File(outname, 'w') as fh:\n if entry['vector']:\n fh['/spectra'] = ds.intensities\n fh['/meta/waves'] = ds.bands\n else:\n for key, traj in ds.traj.items():\n fh['/spectra/' + key] = traj\n if ds.pkey is not None:\n fh['/meta/pkey'] = np.char.encode(ds.pkey.keys, 'utf8')\n entry['metadata'].append(('pkey', 'PrimaryKeyMetadata', None))\n for key, m in ds.metadata.items():\n try:\n arr = np.array(m.get_array())\n except:\n logging.exception('Failed to get_array for %s /meta/%s', ds, key)\n continue\n if arr.dtype.char == 'U':\n arr = np.char.encode(arr, 'utf8')\n fh['/meta/' + key] = arr\n entry['metadata'].append([key, type(m).__name__, m.display_name(key)])\n # Clean up if no metadata was added.\n if not entry['metadata']:\n del entry['metadata']\n\n # Update the user-uploaded dataset config with the new dataset.\n config_path = os.path.join(outdir, 'user_data.yml')\n if os.path.exists(config_path):\n config = yaml.safe_load(open(config_path))\n else:\n config = {}\n if ds_kind not in config:\n config[ds_kind] = {ds_name: entry}\n else:\n config[ds_kind][ds_name] = entry\n with open(config_path, 'w') as fh:\n yaml.safe_dump(config, fh, allow_unicode=True)\n\n\nroutes = [\n (r'/_upload_spectrum', SpectrumUploadHandler),\n (r'/_upload_dataset', DatasetUploadHandler),\n]\n" ]
[ [ "numpy.char.strip", "numpy.array", "numpy.array_equal", "numpy.genfromtxt", "numpy.interp", "numpy.diff", "numpy.arange", "numpy.argsort", "numpy.isfinite", "numpy.searchsorted", "pandas.read_csv", "numpy.issubdtype", "numpy.char.encode" ] ]
vishnubk/ml_tutorial_pulsars
[ "1a1b1eabbce43c39222b32974e29dfff5a722601" ]
[ "extract_pfd_features_gbncc_data3.py" ]
[ "import sys, os, glob\nsys.path.append('/home/psr/software/psrchive/install/lib/python2.7/site-packages')\nsys.path.append('/home/psr')\nimport numpy as np\nfrom ubc_AI.training import pfddata\nimport math\nimport time\nt0 = time.time()\n#pfd_files_pulsars = glob.glob('/beegfs/vishnu/scripts/neural_network/test/pulsars/*.pfd')\npfd_files_nonpulsars = sorted(glob.glob('/beegfs/vishnu/scripts/neural_network/test/nonpulsars/*.pfd'))\n\nfraction = 4\ncurrent_segment = 2\nmax_value = int(math.ceil(len(pfd_files_nonpulsars)/fraction))\nprint(max_value)\ndouble_max_value = 2 * max_value\ntriple_max_value = 3 * max_value\nprint(double_max_value)\n# Initialise data objects from getdata class\n#data_object_pulsars = [pfddata(f) for f in pfd_files_pulsars] \ndata_object_nonpulsars = [pfddata(f) for f in pfd_files_nonpulsars[double_max_value:triple_max_value]] \nprint('loaded data into memory')\n# Extract 4 features based on Zhu et.al 2014\n\n#1 time vs phase plot\n#time_phase_plots_pulsars = [f.getdata(intervals=48) for f in data_object_pulsars]\ntime_phase_plots_nonpulsars = [f.getdata(intervals=48) for f in data_object_nonpulsars]\nprint('time phase done')\n#2 freq vs phase plot\n#freq_phase_plots_pulsars = [f.getdata(subbands=48) for f in data_object_pulsars]\nfreq_phase_plots_nonpulsars = [f.getdata(subbands=48) for f in data_object_nonpulsars]\n\nprint('freq phase done')\n#3 Pulse Profile\n#pulse_profile_pulsars = [f.getdata(phasebins=64) for f in data_object_pulsars]\npulse_profile_nonpulsars = [f.getdata(phasebins=64) for f in data_object_nonpulsars]\n\nprint('pulse profile done')\n#4 DM Curve\n\n#dm_curve_pulsars = [f.getdata(DMbins=60) for f in data_object_pulsars]\ndm_curve_nonpulsars = [f.getdata(DMbins=60) for f in data_object_nonpulsars]\n\nprint('dm curve done')\n#Save all features as numpy array files\n\n#np.save('time_phase_gbncc_test_data_pulsars.npy', time_phase_plots_pulsars) \nnp.save('time_phase_gbncc_test_data_nonpulsars_part3.npy', time_phase_plots_nonpulsars) \n\n#np.save('freq_phase_gbncc_test_data_pulsars.npy', freq_phase_plots_pulsars) \nnp.save('freq_phase_gbncc_test_data_nonpulsars_part3.npy', freq_phase_plots_nonpulsars) \n\n#np.save('pulse_profile_gbncc_test_data_pulsars.npy', pulse_profile_pulsars) \nnp.save('pulse_profile_gbncc_test_data_nonpulsars_part3.npy', pulse_profile_nonpulsars) \n\n#np.save('dm_curve_gbncc_test_data_pulsars.npy', dm_curve_pulsars) \nnp.save('dm_curve_gbncc_test_data_nonpulsars_part3.npy', dm_curve_nonpulsars) \nt1 = time.time()\nprint('Total time taken for the code to execute is %s seconds' %str(t1-t0))\n" ]
[ [ "numpy.save" ] ]
mgoldchild/FastMOT
[ "090c8ae357f143658fc81b1059060263105734e8" ]
[ "fastmot/flow.py" ]
[ "import logging\nimport itertools\nimport numpy as np\nimport numba as nb\nimport cv2\n\nfrom .utils.rect import to_tlbr, get_size, get_center\nfrom .utils.rect import mask_area, intersection, crop, transform\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Flow:\n \"\"\"\n A KLT tracker based on optical flow feature point matching.\n Camera motion is simultaneously estimated by tracking feature points\n on the background.\n Parameters\n ----------\n size : (int, int)\n Width and height of each frame.\n config : Dict\n KLT hyperparameters.\n \"\"\"\n\n def __init__(self, size, config):\n self.size = size\n self.bg_feat_scale_factor = config['bg_feat_scale_factor']\n self.opt_flow_scale_factor = config['opt_flow_scale_factor']\n self.feature_density = config['feature_density']\n self.feat_dist_factor = config['feat_dist_factor']\n self.ransac_max_iter = config['ransac_max_iter']\n self.ransac_conf = config['ransac_conf']\n self.max_error = config['max_error']\n self.inlier_thresh = config['inlier_thresh']\n\n self.bg_feat_thresh = config['bg_feat_thresh']\n self.target_feat_params = config['target_feat_params']\n self.opt_flow_params = config['opt_flow_params']\n\n self.bg_feat_detector = cv2.FastFeatureDetector_create(threshold=self.bg_feat_thresh)\n\n # background feature points for visualization\n self.bg_keypoints = np.empty((0, 2), np.float32)\n self.prev_bg_keypoints = np.empty((0, 2), np.float32)\n\n # previous frames\n self.prev_frame_gray = None\n self.prev_frame_small = None\n\n # preallocate\n self.ones = np.full(self.size[::-1], 255, np.uint8)\n self.fg_mask = np.empty_like(self.ones)\n self.frame_rect = to_tlbr((0, 0, *self.size))\n\n def initiate(self, frame):\n \"\"\"\n Preprocesses the first frame to prepare for subsequent optical\n flow computations.\n Parameters\n ----------\n frame : ndarray\n Initial frame.\n \"\"\"\n self.prev_frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n self.prev_frame_small = cv2.resize(self.prev_frame_gray, None,\n fx=self.opt_flow_scale_factor[0],\n fy=self.opt_flow_scale_factor[1])\n\n def predict(self, frame, tracks):\n \"\"\"\n Predicts tracklet positions in the next frame and estimates camera motion.\n Parameters\n ----------\n frame : ndarray\n The next frame.\n tracks : List[Track]\n List of tracks to predict.\n Feature points of each track are updated in place.\n Returns\n -------\n Dict[int, ndarray], ndarray\n Returns a dictionary with track IDs as keys and predicted bounding\n boxes of [x1, x2, y1, y2] as values, and a 3x3 homography matrix.\n \"\"\"\n # preprocess frame\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame_small = cv2.resize(frame_gray, None, fx=self.opt_flow_scale_factor[0],\n fy=self.opt_flow_scale_factor[1])\n # order tracks from closest to farthest\n tracks.sort(reverse=True)\n\n # detect target feature points\n all_prev_pts = []\n np.copyto(self.fg_mask, self.ones)\n for track in tracks:\n inside_tlbr = intersection(track.tlbr, self.frame_rect)\n target_mask = crop(self.fg_mask, inside_tlbr)\n target_area = mask_area(target_mask)\n keypoints = self._rect_filter(track.keypoints, inside_tlbr, self.fg_mask)\n # only detect new keypoints when too few are propagated\n if len(keypoints) < self.feature_density * target_area:\n img = crop(self.prev_frame_gray, inside_tlbr)\n feature_dist = self._estimate_feature_dist(target_area, self.feat_dist_factor)\n keypoints = cv2.goodFeaturesToTrack(img, mask=target_mask,\n minDistance=feature_dist,\n **self.target_feat_params)\n if keypoints is None:\n keypoints = np.empty((0, 2), np.float32)\n else:\n keypoints = self._ellipse_filter(keypoints, track.tlbr, inside_tlbr[:2])\n # batch keypoints\n all_prev_pts.append(keypoints)\n # zero out target in foreground mask\n target_mask[:] = 0\n target_ends = list(itertools.accumulate(len(pts) for pts in\n all_prev_pts)) if all_prev_pts else [0]\n target_begins = itertools.chain([0], target_ends[:-1])\n\n # detect background feature points\n prev_frame_small_bg = cv2.resize(self.prev_frame_gray, None,\n fx=self.bg_feat_scale_factor[0],\n fy=self.bg_feat_scale_factor[1])\n bg_mask_small = cv2.resize(self.fg_mask, None, fx=self.bg_feat_scale_factor[0],\n fy=self.bg_feat_scale_factor[1], interpolation=cv2.INTER_NEAREST)\n keypoints = self.bg_feat_detector.detect(prev_frame_small_bg, mask=bg_mask_small)\n if len(keypoints) == 0:\n self.bg_keypoints = np.empty((0, 2), np.float32)\n LOGGER.warning('Camera motion estimation failed')\n return {}, None\n keypoints = np.float32([kp.pt for kp in keypoints])\n keypoints = self._unscale_pts(keypoints, self.bg_feat_scale_factor, None)\n bg_begin = target_ends[-1]\n all_prev_pts.append(keypoints)\n\n # match features using optical flow\n all_prev_pts = np.concatenate(all_prev_pts)\n scaled_prev_pts = self._scale_pts(all_prev_pts, self.opt_flow_scale_factor)\n all_cur_pts, status, err = cv2.calcOpticalFlowPyrLK(self.prev_frame_small, frame_small,\n scaled_prev_pts, None,\n **self.opt_flow_params)\n status = self._get_status(status, err, self.max_error)\n all_cur_pts = self._unscale_pts(all_cur_pts, self.opt_flow_scale_factor, status)\n\n # reuse preprocessed frame for next prediction\n self.prev_frame_gray = frame_gray\n self.prev_frame_small = frame_small\n\n # estimate camera motion\n homography = None\n prev_bg_pts, matched_bg_pts = self._get_good_match(all_prev_pts, all_cur_pts,\n status, bg_begin, -1)\n if len(matched_bg_pts) == 0:\n self.bg_keypoints = np.empty((0, 2), np.float32)\n LOGGER.warning('Camera motion estimation failed')\n return {}, None\n homography, inlier_mask = cv2.findHomography(prev_bg_pts, matched_bg_pts,\n method=cv2.RANSAC,\n maxIters=self.ransac_max_iter,\n confidence=self.ransac_conf)\n self.prev_bg_keypoints, self.bg_keypoints = self._get_inliers(prev_bg_pts, matched_bg_pts,\n inlier_mask)\n if homography is None or len(self.bg_keypoints) < self.inlier_thresh:\n self.bg_keypoints = np.empty((0, 2), np.float32)\n LOGGER.warning('Camera motion estimation failed')\n return {}, None\n\n # estimate target bounding boxes\n next_bboxes = {}\n np.copyto(self.fg_mask, self.ones)\n for begin, end, track in zip(target_begins, target_ends, tracks):\n prev_pts, matched_pts = self._get_good_match(all_prev_pts, all_cur_pts,\n status, begin, end)\n prev_pts, matched_pts = self._fg_filter(prev_pts, matched_pts, self.fg_mask, self.size)\n if len(matched_pts) == 0:\n track.keypoints = np.empty((0, 2), np.float32)\n continue\n # model motion as partial affine\n affine_mat, inlier_mask = cv2.estimateAffinePartial2D(prev_pts, matched_pts,\n method=cv2.RANSAC,\n maxIters=self.ransac_max_iter,\n confidence=self.ransac_conf)\n if affine_mat is None:\n track.keypoints = np.empty((0, 2), np.float32)\n continue\n est_tlbr = self._estimate_bbox(track.tlbr, affine_mat)\n track.prev_keypoints, track.keypoints = self._get_inliers(prev_pts, matched_pts,\n inlier_mask)\n if (intersection(est_tlbr, self.frame_rect) is None or\n len(track.keypoints) < self.inlier_thresh):\n track.keypoints = np.empty((0, 2), np.float32)\n continue\n next_bboxes[track.trk_id] = est_tlbr\n track.inlier_ratio = len(track.keypoints) / len(matched_pts)\n # zero out predicted target in foreground mask\n target_mask = crop(self.fg_mask, est_tlbr)\n target_mask[:] = 0\n return next_bboxes, homography\n\n @staticmethod\n @nb.njit(fastmath=True, cache=True)\n def _estimate_feature_dist(target_area, feat_dist_factor):\n est_feat_dist = round(np.sqrt(target_area) * feat_dist_factor)\n return max(est_feat_dist, 1)\n\n @staticmethod\n @nb.njit(fastmath=True, cache=True)\n def _estimate_bbox(tlbr, affine_mat):\n tl = transform(tlbr[:2], affine_mat).ravel()\n scale = np.linalg.norm(affine_mat[:2, 0])\n scale = 1. if scale < 0.9 or scale > 1.1 else scale\n size = scale * get_size(tlbr)\n return to_tlbr(np.append(tl, size))\n\n @staticmethod\n @nb.njit(fastmath=True, cache=True)\n def _rect_filter(pts, tlbr, fg_mask):\n if len(pts) == 0:\n return np.empty((0, 2), np.float32)\n tl, br = tlbr[:2], tlbr[2:]\n pts2i = np.rint(pts).astype(np.int32)\n # filter out points outside the rectangle\n ge_le = (pts2i >= tl) & (pts2i <= br)\n inside = np.where(ge_le[:, 0] & ge_le[:, 1])\n pts, pts2i = pts[inside], pts2i[inside]\n # keep points inside the foreground area\n keep = np.array([i for i in range(len(pts2i)) if\n fg_mask[pts2i[i][1], pts2i[i][0]] == 255])\n return pts[keep]\n\n @staticmethod\n @nb.njit(fastmath=True, cache=True)\n def _ellipse_filter(pts, tlbr, offset):\n offset = np.asarray(offset, np.float32)\n pts = pts.reshape(-1, 2)\n pts = pts + offset\n center = get_center(tlbr)\n semi_axes = get_size(tlbr) * 0.5\n # filter out points outside the ellipse\n keep = np.sum(((pts - center) / semi_axes)**2, axis=1) <= 1.\n return pts[keep]\n\n @staticmethod\n @nb.njit(fastmath=True, cache=True)\n def _fg_filter(prev_pts, cur_pts, fg_mask, frame_size):\n if len(cur_pts) == 0:\n return prev_pts, cur_pts\n size = np.asarray(frame_size)\n pts2i = np.rint(cur_pts).astype(np.int32)\n # filter out points outside the frame\n ge_lt = (pts2i >= 0) & (pts2i < size)\n inside = ge_lt[:, 0] & ge_lt[:, 1]\n prev_pts, cur_pts = prev_pts[inside], cur_pts[inside]\n pts2i = pts2i[inside]\n # keep points inside the foreground area\n keep = np.array([i for i in range(len(pts2i)) if\n fg_mask[pts2i[i][1], pts2i[i][0]] == 255])\n return prev_pts[keep], cur_pts[keep]\n\n @staticmethod\n @nb.njit(fastmath=True, cache=True)\n def _scale_pts(pts, scale_factor):\n scale_factor = np.asarray(scale_factor, np.float32)\n pts = pts * scale_factor\n pts = pts.reshape(-1, 1, 2)\n return pts\n\n @staticmethod\n @nb.njit(fastmath=True, cache=True)\n def _unscale_pts(pts, scale_factor, mask):\n scale_factor = np.asarray(scale_factor, np.float32)\n pts = pts.reshape(-1, 2)\n if mask is None:\n pts = pts / scale_factor\n else:\n idx = np.where(mask)\n pts[idx] = pts[idx] / scale_factor\n return pts\n\n @staticmethod\n @nb.njit(fastmath=True, cache=True)\n def _get_status(status, err, max_err):\n return status.ravel().astype(np.bool_) & (err.ravel() < max_err)\n\n @staticmethod\n @nb.njit(fastmath=True, cache=True)\n def _get_good_match(prev_pts, cur_pts, status, begin, end):\n keep = np.where(status[begin:end])\n prev_pts = prev_pts[begin:end][keep]\n cur_pts = cur_pts[begin:end][keep]\n return prev_pts, cur_pts\n\n @staticmethod\n @nb.njit(fastmath=True, cache=True)\n def _get_inliers(prev_pts, cur_pts, inlier_mask):\n keep = np.where(inlier_mask.ravel())\n return prev_pts[keep], cur_pts[keep]\n" ]
[ [ "numpy.concatenate", "numpy.full", "numpy.copyto", "numpy.empty", "numpy.linalg.norm", "numpy.asarray", "numpy.sum", "numpy.rint", "numpy.where", "numpy.float32", "numpy.sqrt", "numpy.append", "numpy.empty_like" ] ]
dmilios/dirichletGPC
[ "7e460ca07005a5aed97937d2bf2a8a47b6f8051e" ]
[ "results/plot_monitoring_result.py" ]
[ "#!/usr/bin/python3\n# Copyright 2018 Dimitrios Milios, Raffaello Camoriano, \n# Pietro Michiardi,Lorenzo Rosasco, Maurizio Filippone\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# \n# Script that produces a loglog plot of the MNLL progression \n# recorded my a monitoring experiment.\n\nimport os\nimport sys\nimport pickle\nimport numpy as np\nimport matplotlib.pylab as plt\nimport matplotlib as mpl\n\nsys.path.insert(0,'src') # our imports\nsys.path.insert(0,'../src') # our imports\nimport utilities\n\n\n\n## binary datasets: EEG, HTRU2, MAGIC, CovertypeBinary, MiniBoo, SUSY\n## multiclass datasets: letter, Drive, MoCap\ndataset = ''\nsplit_idx = ''\n\n\n## cmd arguments to specify:\n## dataset (optional) \n## split_idx (optional)\nif len(sys.argv) > 1:\n dataset = str(sys.argv[1])\nif len(sys.argv) > 2:\n split_idx = str(sys.argv[2])\n\nif split_idx == '':\n split_idx = '01'\n print('Default split_idx: 01')\nif dataset == '':\n print('')\n print('Script that produces a loglog plot of the MNLL progression')\n print('recorded my a monitoring experiment.')\n utilities.print_usage(os.path.basename(__file__))\n exit()\n\n\n\n\ndef plot__gpd_vs_gpc(report, metric):\n fig = plt.figure()\n method_labels = {'gpd': 'GPD', 'gpc': 'GPC'}\n metric_labels = {'err': 'Error rate', 'mnll': 'MNLL'}\n\n method0 = 'gpd' # no batches for method0\n method1 = 'gpc' # lookup batches for method1\n training_size = report['training_size']\n\n method1_batch_sizes = [training_size]\n all_keys = report.keys()\n method1_keys = list(filter(lambda s: s.startswith(method1), all_keys))\n method1mb_keys = list(filter(lambda s: s.find('mb')>=0, method1_keys))\n for s in method1mb_keys:\n idx = s.find('mb')\n s = s[idx+len('mb'):]\n idx = s.find('_')\n s = s[:idx]\n if not s.isnumeric():\n raise Error('Incompatible report structure')\n s = int(s)\n if s not in method1_batch_sizes:\n method1_batch_sizes.append(s)\n method1_batch_sizes.sort(reverse=True)\n\n batch_sizes = {}\n batch_sizes[method1] = method1_batch_sizes\n batch_sizes[method0] = [training_size]\n\n the_colours_to_use = ['C0', 'C3', 'C1', 'C8']\n colours = []\n last_values = []\n cardinalies = []\n for method in [method0, method1]:\n for bsize in batch_sizes[method]:\n prefix = method\n if bsize < training_size:\n prefix += '_' + 'mb' + str(bsize)\n values = report[prefix + '_' + metric + '_values']\n nvalues_full = len(report[method+'_'+metric+'_values'])\n # 'values' is thinned, if much has length larger than nvalues_full\n # so, the function admits both thinned and non-thinned 'values'\n if len(values) > nvalues_full * 1.1:\n iter_per_epoch = training_size / bsize\n values = values[::int(iter_per_epoch)]\n if bsize < training_size:\n bsize_str = ' batch size: ' + str(bsize)\n else:\n bsize_str = ' full'\n label = method_labels[method] + bsize_str\n xx = np.arange(len(values))+1\n line = plt.loglog(xx, values, label=label, linewidth=2,\n color=the_colours_to_use[len(colours)])[0]\n c = line.get_color()\n colours.append(c)\n cardinalies.append(len(values))\n last_values.append(values[-1])\n\n for i in range(len(cardinalies)):\n colour = colours[i]\n xx = np.arange(cardinalies[i]-1, max(cardinalies))\n fx = last_values[i] * np.ones(len(xx))\n if len(xx) > 0:\n plt.plot(xx, fx, '--', color=colour)\n\n #plt.yticks(rotation=45)\n axes = fig.get_axes()\n for ax in axes:\n ax.set_yticklabels('')\n plt.ylabel(metric_labels[metric])\n plt.xlabel('Iteration/Epoch')\n y1 = plt.ylim()[1]\n plt.ylim(top=y1*1.2)\n plt.legend()\n plt.tight_layout()\n\n\n\npath = os.path.dirname(__file__)\nreport_filename = dataset + '_iso_report' + split_idx + '.dat'\npath = os.path.join(path, 'monitoring', report_filename)\nreport = pickle.load(open(path, \"rb\" ))\n\n\nplot__gpd_vs_gpc(report, 'mnll')\nplt.title(dataset)\nplt.show()\n" ]
[ [ "matplotlib.pylab.ylabel", "matplotlib.pylab.legend", "matplotlib.pylab.show", "matplotlib.pylab.figure", "matplotlib.pylab.xlabel", "matplotlib.pylab.ylim", "matplotlib.pylab.tight_layout", "matplotlib.pylab.title", "matplotlib.pylab.plot" ] ]
ravi-teja-mullapudi/ViZDoom
[ "7aa037fc9b25aac200f4b1496f91c8d10b30703c" ]
[ "examples/python/train_sptm/src/common/util.py" ]
[ "#!/usr/bin/env python\nimport cPickle\nimport cv2\nimport numpy as np\nimport h5py\nfrom vizdoom import *\nimport math\nimport os\nimport os.path\nimport sys\nimport random\nimport scipy.misc\n\nfrom constants import *\nfrom video_writer import *\n\nimport cv2\nimport os\nimport cPickle\nimport numpy as np\nnp.random.seed(DEFAULT_RANDOM_SEED)\nimport keras\nimport random\nrandom.seed(DEFAULT_RANDOM_SEED)\n\ndef mean(numbers):\n return float(sum(numbers)) / max(len(numbers), 1)\n\ndef wait_idle(game, wait_idle_tics=WAIT_IDLE_TICS):\n if wait_idle_tics > 0:\n game.make_action(STAY_IDLE, wait_idle_tics)\n\ndef game_make_action_wrapper(game, action, repeat):\n game.make_action(action, repeat)\n wait_idle(game)\n return None\n\ndef save_list_of_arrays_to_hdf5(input, prefix):\n stacked = np.array(input)\n h5f = h5py.File(prefix + HDF5_NAME, 'w')\n h5f.create_dataset('dataset', data=stacked)\n h5f.close()\n\ndef load_array_from_hdf5(prefix):\n h5f = h5py.File(prefix + HDF5_NAME,'r')\n data = h5f['dataset'][:]\n h5f.close()\n return data\n\nclass StateRecorder():\n def __init__(self, game):\n self.game = game\n self.game_variables = []\n self.actions = []\n self.rewards = []\n self.screen_buffers = []\n\n def record_buffers(self, state):\n self.screen_buffers.append(state.screen_buffer.transpose(VIZDOOM_TO_TF))\n\n '''records current state, then makes the provided action'''\n def record(self, action_index, repeat):\n state = self.game.get_state()\n self.record_buffers(state)\n self.game_variables.append(state.game_variables)\n r = game_make_action_wrapper(self.game, ACTIONS_LIST[action_index], repeat)\n self.actions.append(action_index)\n self.rewards.append(r)\n\n def save_recorded_buffers(self):\n save_list_of_arrays_to_hdf5(self.screen_buffers, SCREEN_BUFFERS_PATH)\n\n def save_recorded(self):\n self.save_recorded_buffers()\n data = (self.game_variables,\n self.actions,\n self.rewards)\n with open(NAVIGATION_RECORDING_PATH, 'wb') as output_file: \n cPickle.dump(data, output_file)\n\ndef downsample(input, factor):\n for _ in xrange(factor):\n input = cv2.pyrDown(input)\n return input\n\ndef double_downsampling(input):\n return cv2.pyrDown(cv2.pyrDown(input))\n\ndef double_upsampling(input):\n return cv2.pyrUp(cv2.pyrUp(input))\n\ndef color2gray(input):\n return cv2.cvtColor(input, cv2.COLOR_RGB2GRAY)\n\ndef doom_navigation_setup(seed, wad):\n game = DoomGame()\n game.load_config(DEFAULT_CONFIG)\n game.set_doom_scenario_path(wad)\n game.set_seed(seed)\n game.add_game_args(\"+sv_cheats 1\")\n game.init()\n\n return game\n\ndef calculate_distance_angle(start_coordinates, current_coordinates):\n distance = math.sqrt((start_coordinates[0] - current_coordinates[0]) ** 2 +\n (start_coordinates[1] - current_coordinates[1]) ** 2 + \n (start_coordinates[2] - current_coordinates[2]) ** 2)\n abs_angle_difference = math.fabs(start_coordinates[3] - current_coordinates[3])\n angle = min(abs_angle_difference, 360.0 - abs_angle_difference)\n return distance, angle\n\ndef generator(x, y, batch_size, max_action_distance):\n while True:\n number_of_samples = x.shape[0]\n x_list = []\n y_list = []\n for index in xrange(batch_size):\n choice = random.randint(0, number_of_samples - max_action_distance - 1)\n distance = random.randint(1, max_action_distance)\n current_x = x[choice]\n current_y = y[choice]\n future_x = x[choice + distance]\n x_list.append(np.concatenate((current_x, future_x), axis=2))\n y_list.append(current_y)\n yield np.array(x_list), np.array(y_list)\n\ndef vertically_stack_image_list(input_image_list):\n image_list = []\n for image in input_image_list:\n image_list.append(image)\n image_list.append(np.zeros([SHOW_BORDER, image.shape[1], SHOW_CHANNELS], dtype=np.uint8))\n return np.concatenate(image_list, axis=0)\n\ndef save_np_array_as_png(input, path):\n scipy.misc.toimage(input, cmin=0.0, cmax=255.0).save(path)\n\nclass NavigationVideoWriter():\n def __init__(self, save_path, nonstop=False):\n self.nonstop = nonstop\n self.video_writer = VideoWriter(save_path,\n (2 * SHOW_WIDTH + SHOW_BORDER, SHOW_HEIGHT),\n mode='replace',\n framerate=FPS)\n\n def side_by_side(self, first, second):\n if not HIGH_RESOLUTION_VIDEO:\n first = double_upsampling(first)\n second = double_upsampling(second)\n return np.concatenate((first,\n np.zeros([SHOW_HEIGHT, SHOW_BORDER, SHOW_CHANNELS], dtype=np.uint8),\n second), axis=1)\n\n def write(self, left, right, counter, deep_net_actions):\n side_by_side_screen = self.side_by_side(left, right)\n if not self.nonstop:\n if counter == 0:\n for _ in xrange(START_PAUSE_FRAMES):\n self.video_writer.add_frame(side_by_side_screen)\n elif counter + 1 < deep_net_actions:\n self.video_writer.add_frame(side_by_side_screen)\n else:\n for _ in xrange(END_PAUSE_FRAMES):\n self.video_writer.add_frame(side_by_side_screen)\n for _ in xrange(DELIMITER_FRAMES):\n self.video_writer.add_frame(np.zeros_like(side_by_side_screen))\n else:\n self.video_writer.add_frame(side_by_side_screen)\n\n def close(self):\n self.video_writer.close()\n\ndef make_deep_action(current_screen, goal_screen, model, game, repeat, randomized):\n x = np.expand_dims(np.concatenate((current_screen,\n goal_screen), axis=2), axis=0)\n action_probabilities = np.squeeze(model.predict(x,\n batch_size=1))\n action_index = None\n if randomized:\n action_index = np.random.choice(len(ACTIONS_LIST), p=action_probabilities)\n else:\n action_index = np.argmax(action_probabilities)\n game_make_action_wrapper(game, ACTIONS_LIST[action_index], repeat)\n return action_index, action_probabilities, current_screen\n\ndef current_make_deep_action(goal_screen, model, game, repeat, randomized):\n state = game.get_state()\n current_screen = state.screen_buffer.transpose(VIZDOOM_TO_TF)\n return make_deep_action(current_screen, goal_screen, model, game, repeat, randomized)\n\ndef get_deep_prediction(current_screen, goal_screen, model):\n x = np.expand_dims(np.concatenate((current_screen,\n goal_screen), axis=2), axis=0)\n return np.squeeze(model.predict(x, batch_size=1))\n\ndef current_get_deep_prediction(goal_screen, model, game):\n state = game.get_state()\n current_screen = state.screen_buffer.transpose(VIZDOOM_TO_TF)\n return get_deep_prediction(current_screen, goal_screen, model)\n\ndef explore(game, number_of_actions):\n is_left = random.random() > 0.5\n start_moving_straight = random.randint(0, number_of_actions)\n for counter in xrange(number_of_actions):\n if counter >= start_moving_straight:\n action_index = INVERSE_ACTION_NAMES_INDEX['MOVE_FORWARD']\n else:\n if is_left:\n action_index = INVERSE_ACTION_NAMES_INDEX['TURN_LEFT']\n else:\n action_index = INVERSE_ACTION_NAMES_INDEX['TURN_RIGHT']\n game_make_action_wrapper(game, ACTIONS_LIST[action_index], TEST_REPEAT)\n\ndef get_distance(first_point, second_point):\n return math.sqrt((first_point[0] - second_point[0]) ** 2 +\n (first_point[1] - second_point[1]) ** 2)\n\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros_like", "numpy.zeros", "numpy.random.seed", "numpy.argmax" ] ]
cjg91/tinygrad
[ "32d42c2667ce7e7231fc9197f2975eafb2663935" ]
[ "models/vit.py" ]
[ "import numpy as np\nfrom tinygrad.tensor import Tensor\nfrom models.transformer import TransformerBlock\n\nclass ViT:\n def __init__(self, layers=12, embed_dim=192, num_heads=3):\n self.embedding = (Tensor.uniform(embed_dim, 3, 16, 16), Tensor.zeros(embed_dim))\n self.cls = Tensor.ones(1, 1, embed_dim)\n self.pos_embedding = Tensor.ones(1, 197, embed_dim)\n self.tbs = [\n TransformerBlock(embed_dim=embed_dim, num_heads=num_heads, ff_dim=embed_dim*4,\n prenorm=True, act=lambda x: x.gelu())\n for i in range(layers)]\n self.encoder_norm = (Tensor.uniform(embed_dim), Tensor.zeros(embed_dim))\n self.head = (Tensor.uniform(embed_dim, 1000), Tensor.zeros(1000))\n\n def patch_embed(self, x):\n x = x.conv2d(*self.embedding, stride=16)\n x = x.reshape(shape=(x.shape[0], x.shape[1], -1)).transpose(order=(0,2,1))\n return x\n\n def forward(self, x):\n ce = self.cls.add(Tensor.zeros(x.shape[0],1,1))\n pe = self.patch_embed(x)\n x = ce.cat(pe, dim=1)\n x = x.add(self.pos_embedding).sequential(self.tbs)\n x = x.layernorm().linear(*self.encoder_norm)\n return x[:, 0].linear(*self.head)\n\n def load_from_pretrained(m):\n import io\n from extra.utils import fetch\n\n # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py\n url = \"https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz\"\n dat = np.load(io.BytesIO(fetch(url)))\n\n #for x in dat.keys():\n # print(x, dat[x].shape, dat[x].dtype)\n\n m.embedding[0].assign(np.transpose(dat['embedding/kernel'], (3,2,0,1)))\n m.embedding[1].assign(dat['embedding/bias'])\n\n m.cls.assign(dat['cls'])\n\n m.head[0].assign(dat['head/kernel'])\n m.head[1].assign(dat['head/bias'])\n\n m.pos_embedding.assign(dat['Transformer/posembed_input/pos_embedding'])\n m.encoder_norm[0].assign(dat['Transformer/encoder_norm/scale'])\n m.encoder_norm[1].assign(dat['Transformer/encoder_norm/bias'])\n\n for i in range(12):\n m.tbs[i].query[0].assign(dat[f'Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/query/kernel'].reshape(192, 192))\n m.tbs[i].query[1].assign(dat[f'Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/query/bias'].reshape(192))\n m.tbs[i].key[0].assign(dat[f'Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/key/kernel'].reshape(192, 192))\n m.tbs[i].key[1].assign(dat[f'Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/key/bias'].reshape(192))\n m.tbs[i].value[0].assign(dat[f'Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/value/kernel'].reshape(192, 192))\n m.tbs[i].value[1].assign(dat[f'Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/value/bias'].reshape(192))\n m.tbs[i].out[0].assign(dat[f'Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/out/kernel'].reshape(192, 192))\n m.tbs[i].out[1].assign(dat[f'Transformer/encoderblock_{i}/MultiHeadDotProductAttention_1/out/bias'].reshape(192))\n m.tbs[i].ff1[0].assign(dat[f'Transformer/encoderblock_{i}/MlpBlock_3/Dense_0/kernel'])\n m.tbs[i].ff1[1].assign(dat[f'Transformer/encoderblock_{i}/MlpBlock_3/Dense_0/bias'])\n m.tbs[i].ff2[0].assign(dat[f'Transformer/encoderblock_{i}/MlpBlock_3/Dense_1/kernel'])\n m.tbs[i].ff2[1].assign(dat[f'Transformer/encoderblock_{i}/MlpBlock_3/Dense_1/bias'])\n m.tbs[i].ln1[0].assign(dat[f'Transformer/encoderblock_{i}/LayerNorm_0/scale'])\n m.tbs[i].ln1[1].assign(dat[f'Transformer/encoderblock_{i}/LayerNorm_0/bias'])\n m.tbs[i].ln2[0].assign(dat[f'Transformer/encoderblock_{i}/LayerNorm_2/scale'])\n m.tbs[i].ln2[1].assign(dat[f'Transformer/encoderblock_{i}/LayerNorm_2/bias'])\n" ]
[ [ "numpy.transpose" ] ]