content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def parse_clan_file(clan_list): """ Parses a list of Rfam clan accessions clan_list: A plain .txt file containing a list of Rfam Clan Accessions return: A list of clan accessions """ fp = open(clan_list, 'r') clan_accessions = [x.strip() for x in fp] fp.close() return clan_accessions
c5869235750902876f10408e73bbf675316d130c
49,312
def get_column(su, i): """ get the ith column of the sudoku """ return [su[j][i] for j in range(len(su))]
ff35098f14bed4cef938a4a4e6af3d60fb4e7be3
38,674
def compute_age(date, dob): """ Compute a victim's age. :param datetime.date date: crash date :param datetime.date dob: date of birth :return: the victim's age. :rtype: int """ DAYS_IN_YEAR = 365 # Compute the age. return (date - dob).days // DAYS_IN_YEAR
39a88d3f780f46b50d4d64aa0f05f54d20389396
680,992
def is_unique(l): """Check if all the elements in list l are unique.""" assert type(l) is list, "Type %s is not list!" % type(l) return len(l) == len(set(l))
b39f70c2a9ccf0c862a57775c2090e4d7ebe46a2
358,922
def get_unchanged(src_list, npred_dict_new, npred_dict_old, npred_threshold=1e4, frac_threshold=0.9): """Compare two dictionarys of npreds, and get the list of sources than have changed less that set thresholds Parameters ---------- src_list : list List of sources to examine npred_dict_new : dict Dictionary mapping source name to npred for the current weights file npred_dict_old : dict Dictionary mapping source name to npred for the previous weights file npred_threshold : float Minimum value of npred above which to consider sources changed frac_threshold : float Value of npred_old / npred_new above which to consider sources unchanged Returns ------- l : list Names of 'unchanged' sources """ o = [] for s in src_list: npred_new = npred_dict_new[s] if npred_new < npred_threshold: o += [s] continue if npred_dict_old is None: npred_old = 0. else: npred_old = npred_dict_old[s] frac = npred_old / npred_new if frac > frac_threshold: o += [s] return o
d344c4aa34ceff2003b4350d31436c03656d4bfb
202,459
def _get_hidden_node_location(flattened_index, num_rows, num_columns): """Converts the flattened index of a hidden node to its index in the 3D array. Converts the index of a hidden node in the first convolution layer (flattened) into its location- row, column, and channel in the 3D activation map. The 3D activation map has dimensions: (num_channels, num_rows, num_columns). Args: flattened_index: int, index of a hidden node in the first convolution layer after it is flattened. num_rows: int, number of rows in the activation map produced by each kernel. num_columns: int, number of columns in the activation map produced by each kernel. Returns: channel: int, channel number of the activation map to which the hidden node belongs to. row: int, row number of the hidden node in the activation map. column: int, column number of the hidden node in the activation map. """ total = num_rows * num_columns output_activation_map_row = (flattened_index % total) // num_columns output_activation_map_column = (flattened_index % total) % num_columns return (flattened_index // total, output_activation_map_row, output_activation_map_column)
34eaae4183f234ee7bdf176740621ad465cf7006
559,578
def get_filt_raref_suffix(p_filt_threshs: str, raref: bool) -> str: """Create a suffix based on passed config to denote whether the run is for filtered and/or rarefied data. Parameters ---------- p_filt_threshs : str Minimum sample read abundance to be kept in the sample raref : bool Whether to repeat all analyses on rarefied datasets (must be set for a config passed to `-r` to be used) Returns ------- filt_raref : str Suffix for output scripts denoting whether the run is for filtered and/or rarefied data """ filt_raref = '' if p_filt_threshs: filt_raref += '_flt' if raref: filt_raref += '_rrf' return filt_raref
274add1a8b771013c8e3b4d473a3afbeec0db3a8
411,756
def square_boxes(boxes): """ Takes bounding boxes that are almost square and makes them exactly square to deal with rounding errors. Parameters ---------- boxes : list of tuple of tuple of int The bounding boxes to be squared. Returns ------- boxes : list of tuple of tuple of int The squared version of the boxes. """ for i,box in enumerate(boxes): #Ignore missing boxes if box is None: continue; x = box[1][0] - box[0][0]; y = box[1][1] - box[0][1]; if x == y: continue;#Box is already square elif x < y: #x was rounded down boxes[i] = (box[0],(box[1][0] + 1,box[1][1])); else: #y was rounded down boxes[i] = (box[0],(box[1][0],box[1][1] + 1)); return boxes;
4e772a5c1d01cd2850c717dc0a14ef68c4d04569
381,115
def read_file(infile): """Read an ASCII file and return the contents.""" f_in = open(infile) contents = f_in.readlines() f_in.close() return contents
70c2153335e8a8cb834e8602f90ee6815d111efc
191,279
def parse_sources(sources_filename): """Parse a 'sources' file to get a list of files that need to be downloaded for a package. Args: param1 (str) sources_filename: the full path to a valid sources file Returns: list of (str:md5sum, str:url, str:override_filename) or (str:md5sum, str:url, bool:has_override) """ fbuff = "" with open(sources_filename, "r") as fd: fbuff = fd.readlines() sources = [] for line in fbuff: split_line = line.rstrip().split(" ") line_length = len(split_line) if line_length == 2 or line_length == 3: pass else: print("{} appears to have a mangled line...".format(sources_filename)) exit() md5 = split_line[0] url = split_line[1] if len(split_line) == 3: filename = split_line[2] else: filename = None sources.append([md5, url, filename]) return sources
faff9bc3ee2ff634d899219620d60dd93bd23615
283,784
def getArch(rec): """Return arch type (intel/amd/arm). """ info = rec["product"]["attributes"] if info["physicalProcessor"].startswith("Intel "): return "intel" if info["physicalProcessor"].startswith("High Frequency Intel "): return "intel" if info["physicalProcessor"].startswith("AMD EPYC "): return "amd" if info["physicalProcessor"].startswith("AWS Graviton"): return "arm" if info["physicalProcessor"].startswith("Variable"): return "intel" raise Exception("unknown cpu: %s" % info["physicalProcessor"])
cc75766c49c748e9be8cbda3ca12d957777092c4
201,971
from pathlib import Path from typing import Dict def license_destination( destination: Path, libname: str, filename: str, license_directories: Dict[str, str] ) -> Path: """Given the (reconstructed) library name, find appropriate destination""" normal = destination / libname if normal.is_dir(): return normal / filename lowercase = destination / libname.lower() if lowercase.is_dir(): return lowercase / filename if libname in license_directories: return destination / license_directories[libname] / filename # fallback to libname.LICENSE (used for nondirs) return destination / "{}.{}".format(libname, filename)
c10397d1e7f1664251edf085e78bdd4728b3e846
362,463
def dumb_unix2dos(in_str): """In-efficient but simple unix2dos string conversion convert '\x0A' --> '\x0D\x0A' """ return in_str.replace('\x0A', '\x0D\x0A')
dfe5e8ad2f58b6440cb38c9b7e86dc8d1559346d
323,638
def mk_time_info_extractor(spec): """ Returns a function that will extract information from timestamps in a dict format. The specification should be a list of timetuple attributes (see https://docs.python.org/2/library/time.html#time.struct_time) to extract, or a {k: v, ...} dict where v are the timetuple attributes, and k what you want to call them in the output. Example: fun = mk_time_info_extractor({'day_of_week': 'tm_wday', 'hour_of_day': 'tm_hour'}) # assuming you defined some timestamp called t... print t print fun(t) 2015-06-02 20:46:16.629000 {'day_of_week': 1, 'hour_of_day': 20} """ if not isinstance(spec, dict): # if spec is not a dict, make it so spec = {x: x for x in spec} def extractor(timestamp): time_struct = timestamp.timetuple() return {k: time_struct.__getattribute__(v) for k, v in spec.items()} return extractor
808382a459064a31e95c481b16c66f92a0e70a16
687,579
def get_duplicates(items): """ gets a list of duplicate items in given list of items. :param list | tuple | set items: items to be checked for duplications. :rtype: list """ unique_set = set(items) if len(unique_set) == len(items): return [] duplicates = [] for item in unique_set: if items.count(item) > 1: duplicates.append(item) return duplicates
36e936d029567df40919e4ed6676de0a5eca89a9
218,608
def restaurant_bouncer(age:int, fully_vaxed:bool = False) -> bool: """ checks if the person is allowed to enter restaurant or not. raises exception if age <= 0 and not int raises exception if fully_vaxed is not a bool :param age: (int) age of the individual :param fully_vaxed: (bool) vaccination status, default is False :return: (bool) True if allowed else False """ assert isinstance(age, int), 'age must be an int' assert isinstance(fully_vaxed, bool), 'fully_vaxed must be a bool' assert age >= 0, 'age cannot be negative' if fully_vaxed or age < 18: print('Allowed to enter restaurant') return True else: print('Sorry, not allowed in') return False
e7cafd0b7bff12ce73828d56df49a085a1c21d8a
196,841
def is_odd(x): """ Judge whether the parameter is odd """ if x % 2: return True return False
7b99fceaf587cdacae853f19fe08a274e746ef0b
279,367
def is_number(s): """Check if `s` is a number. Parameters ---------- s : str, int, float, list Variable for which the function checks, if it is a number. Returns ------- bool True, if the variable is a number. For example 1, 1.0. False, if the variable is not a pure number. For example "a", "1a". """ try: float(s) return True except ValueError: return False
129933b7e28bbfacbe042cef53ac6d3d60354a38
448,218
def idxd_scan_accel_engine(client, config_number=None, config_kernel_mode=None): """Scan and enable IDXD accel engine. Args: config_number: Pre-defined configuration number, see docs. config_kernel_mode: Use kernel IDXD driver. (optional) """ params = {} params['config_number'] = config_number if config_kernel_mode is not None: params['config_kernel_mode'] = config_kernel_mode return client.call('idxd_scan_accel_engine', params)
0a0871357c06eeb09ca6af72532b88c262398d82
323,981
def _build_rules_helper(search): """Helper function for build_rules(). A branch node like: ["and", [node1, node2]] will be transformed, recursively, into: { "condition": "AND", "rules": [_build_rules_helper(node1), _build_rules_helper(node2)] } A leaf node like: ["nm", "contains", "paracetamol"] will be transformed into: {"id": "nm", "operator": "contains", "value": "paracetamol"} """ assert len(search) in [2, 3] if len(search) == 2: # branch node return { "condition": search[0].upper(), "rules": list(map(_build_rules_helper, search[1])), } else: # leaf node value = search[2] if value is True: value = 1 elif value is False: value = 0 return {"id": search[0], "operator": search[1], "value": value}
7414ad3b7801291e09a0ec505ab81502187f273d
374,361
def postproc(maps): """Generate PD, R1, R2* (and MTsat) volumes from log-parameters Parameters ---------- maps : ParameterMaps Returns ------- pd : ParameterMap r1 : ParameterMap r2s : ParameterMap mt : ParameterMap, optional """ maps.r1.volume = maps.r1.fdata().exp_() maps.r1.name = 'R1' maps.r1.unit = '1/s' maps.r2s.volume = maps.r2s.fdata().exp_() maps.r2s.name = 'R2*' maps.r2s.unit = '1/s' maps.pd.volume = maps.pd.fdata().exp_() maps.r2s.name = 'PD' maps.r2s.unit = 'a.u.' if hasattr(maps, 'mt'): maps.mt.volume = maps.mt.fdata().neg_().exp_() maps.mt.volume += 1 maps.mt.volume = maps.mt.fdata().reciprocal_() maps.mt.volume *= 100 maps.mt.name = 'MTsat' maps.mt.unit = 'p.u.' return maps.pd, maps.r1, maps.r2s, maps.mt return maps.pd, maps.r1, maps.r2s
db16ec87e2400e7a627f23cc3f89a982c6a3ba66
42,910
import hashlib import json def get_config_tag(config): """Get configuration tag. Whenever configuration changes making the intermediate representation incompatible the tag value will change as well. """ # Configuration attributes that affect representation value config_attributes = dict(frame_sampling=config.proc.frame_sampling) sha256 = hashlib.sha256() sha256.update(json.dumps(config_attributes).encode("utf-8")) return sha256.hexdigest()[:40]
2cab6e9473822d0176e878114ceb3fda94d1e0f7
1,510
import math def is_hexagonal_number(number: int) -> bool: """Check if a given number `number` is a hexagonal number of the form n * (2*n − 1).""" return ((math.sqrt(8*number + 1) + 1) / 4.0).is_integer()
bfa54070f199af7341bd7092b8006f770cad7f9a
506,554
def relative_abundance(data, percent=False): """ Compute the relative abundance values for a set of data Args: data (list of lists): Each list in data represents a row of data. percent (bool): Abundance is a percent of 100 (30 for 30% instead of 0.3) Requires: None Returns: (list of lists): Each list in data represents a row of data with relative abundance values. Example: relative_abundance([[1,2,3],[4,5,6]]) """ # compute the sum for each column sums=[0.0]*len(data[0]) for i in range(len(data[0])): for row in data: sums[i]+=float(row[i]) relab=[] for row in data: new_row=[] for i, value in enumerate(row): try: new_value=value/sums[i] except ZeroDivisionError: new_value=0 new_row.append(new_value) if percent: new_row = map(lambda x: x * 100.0, new_row) relab.append(new_row) return relab
74809214376b0741468a999270e4b921cb0afbae
252,152
import re def clean_tag(tag): """ Clean image tags before logging to tensorboard """ invalid_characters = re.compile(r'[^-/\w\.]') return invalid_characters.sub('_', tag)
e5b200564966a5832cd0ec5be14b42c77fe21f32
114,096
import hashlib def checksum(file_path, hash_type = hashlib.sha256, chunk_num_blocks = 128): """ Compute a hash Checksum of the given File. Default Hash Method is MD5 :param file_path: Path of the File. :param hash_type: Specify which Hash Algorithm to use (mdf5, sha256, sha3, etc). :param chunk_num_blocks: :return: Hexadecimal Checksum of the File. """ hash_to_use = None hash_to_use = hash_type with open(file_path, "rb") as file: # Read and Iterate over the Data a step at a time until an Empty Line is received. for chunk in iter(lambda: file.read(chunk_num_blocks * hash_to_use.block_size), b""): hash_to_use.update(chunk) return hash_to_use.hexdigest()
a2c9210e6b908f69f8a4ac8e476156c31d9b8687
481,392
import re def is_dashed_words(token): """Is the string is dash separated like "Galatasary-Besiktas". :param token: string :return: Boolean """ regexp = re.compile(r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$') if regexp.search(token) is not None: return True else: return False
c7bf01603aae5f7b8517d2f33d79346dac0940e9
535,980
def read_labeled_image_list(image_list_file): """Reads a .csv file containing paths and labels, should be in the format: image_file_location1,valence_value1,arousal_value1 image_file_location2,valence_value2,arousal_value2 ... images should be jpgs Returns: a list with all filenames in file image_list_file and a list containing lists of the 2 respective labels """ f = open(image_list_file, 'r') filenames = [] labels_val = [] labels_ar = [] for line in f: inputs = line.rstrip().split(',') filenames.append(inputs[0]) labels_val.append(float(inputs[1])) labels_ar.append(float(inputs[2])) labels = [list(a) for a in zip(labels_val, labels_ar)] return filenames, labels
15b86403c2eaf9ddc358a2c49a5d53a87e90a4d7
360,268
def merge_dict(*data) -> dict: """Merges any number of dictionaries together Args: data: any number of dictionaries Returns: result: merged dictionary """ result = {} for d in data: result.update(d) return result
d5a77e8742bfae6e6846ae05be80e51e6e45e849
490,905
def read_clique_file(clique_ases_filename): """ Format of clique file: !XXXXX <- These lines correspond to blacklisted guard ASes XXXXX <- These lines correspond to client ASes in the clique One AS per line. """ blacklisted_guard_asns = [] clique_asns = [] with open(clique_ases_filename, 'r') as clique_file: for line in clique_file: line = line.strip('\n') if line[0] == '!': blacklisted_guard_asns.append(line[1:]) else: clique_asns.append(line) return clique_asns, blacklisted_guard_asns
9b1caee9a01ee74fe79bfcd3e9573cb0c33acc58
403,433
def find_person_id(source_table): """ convenience function to return the person_id for a source table Args: - source_table (ScanReportTable) Returns: - person_id (ScanReportField) """ return source_table.person_id
533fdcd7206b8af9ff35d5120af943e5380e246a
197,132
def first_matching(iterable, predicate): """The first item matching a predicate. Args: iterable: An iterable series of items to be searched. predicate: A callable to which each item will be passed in turn. Returns: The first item for which the predicate returns True. Raises: ValueError: If there are no matching items. """ for item in iterable: if predicate(item): return item raise ValueError("No matching items")
8fe4a07e8794847b7e3fb6488ffdf5216e3b261f
446,886
def _chk(resval): """Check result of command.""" if resval[0] != 'OK': raise Exception(resval[1]) return resval[1]
9ba56f760a4b25874c62f44301856d77b786c778
74,514
def upload_metadata_fixture() -> dict: """Fixture to return an example metadata dict for creating a dataset Returns: dict: JSON formatted metadata dict for a Dataset """ metadata = { "@context": ["metadata-v1"], "@type": "dcat:Dataset", "dafni_version_note": "Initial Dataset version", "dcat:contactPoint": { "@type": "vcard:Organization", "vcard:fn": "Tester 1", "vcard:hasEmail": "[email protected]", }, "dcat:keyword": ["Test"], "dcat:theme": ["Utility and governmental services"], "dct:PeriodOfTime": { "type": "dct:PeriodOfTime", "time:hasBeginning": None, "time:hasEnd": None, }, "dct:accrualPeriodicity": None, "dct:conformsTo": {"@id": None, "@type": "dct:Standard", "label": None}, "dct:created": "2021-03-29", "dct:creator": [ { "@type": "foaf:Organization", "@id": "https://testing.com", "foaf:name": "Testing", "internalID": None, } ], "dct:description": "Some data for testing", "dct:identifier": [], "dct:language": "en", "dct:license": { "@type": "LicenseDocument", "@id": "https://creativecommons.org/licences/by/4.0/", "rdfs:label": None, }, "dct:publisher": { "@id": None, "@type": "foaf:Organization", "foaf:name": None, "internalID": None, }, "dct:rights": None, "dct:spatial": {"@id": None, "@type": "dct:Location", "rdfs:label": None}, "dct:subject": "Utilities / Communication", "dct:title": "Jamie test data", "geojson": {}, } return metadata
fdd924e1c91331425cb67b694050fa8e7eedea1a
349,260
def _app_complete_on_provider(application, provider): """ Determines if an Application is completely available on a Provider, i.e. all active ApplicationVersions have a ProviderMachine + InstanceSource on given Provider. Args: application: Application object provider: Provider object Returns: boolean """ for av in application.active_versions(): av_on_prov = False for prov_machine in av.machines.all(): if prov_machine.instance_source.provider == provider: av_on_prov = True if not av_on_prov: return False return True
9fb3d4671a9cfd0b6207306b48314a0b303a6b52
640,216
def group_split(items, group_size): """Split a list into groups of a given size""" it = iter(items) return list(zip(*[it] * group_size))
0fdb7eeaf29513b9c331aa19015a47e964ad19fc
590,023
def get_main_app_sub_parser(parent_parser): """ Creates and adds a sub_parser to parent_parser. Returns the sub_parser :param parent_parser: Parser to add the sub_parser to :type parent_parser: argparse.ArgumentParser :return: Sub Parser :rtype: argparse.ArgumentParser """ # Define sub_parser object, its dest is cmd sub_parser = parent_parser.add_subparsers( title="subcommands", description="one of these subcommands must be provided", metavar="", dest="cmd" ) return sub_parser
3cb0e8d61473785d7cdfb5fdfcab5f040cc1e8e1
191,797
def AcPathProg(context, program, selection=None, value_if_not_found=None, path=None, pathext=None, reject=[], prog_str=None): """Corresponds to AC_PATH_PROG_ autoconf macro. :Parameters: context SCons configuration context. program Name of the program to be checked. selection If ``None`` (default), the program will be found automatically, otherwise the method will return the value of **selection**. value_if_not_found Value to be returned, when the program is not found. path Search path. pathext Extensions used for executable files. reject List of file names to be rejected if found. prog_str Used to display 'Checking for <prog_str>...' message. .. _AC_PATH_PROG: http://www.gnu.org/software/autoconf/manual/autoconf.html#index-AC_005fPATH_005fPROG-318 """ if prog_str is None: prog_str = program context.Display("Checking for %s... " % prog_str) if not selection: progpath = context.env.WhereIs(program, path, pathext, reject) if progpath: context.Result(progpath) else: if value_if_not_found: progpath = value_if_not_found context.Result("not found, using '%s'" % progpath) else: context.Result('no') return progpath else: context.Result(str(selection)) return selection
f4de015d78e5df4781123dbe1d491c33aaea739b
452,052
def cluster_rating(df, movie_id, cluster): """Return the average rating for a movie, based on the cluster""" cluster_rating = df[(df['movie_id'] == movie_id) & (df['cluster'] == cluster)] return cluster_rating['rating'].mean()
f3fdf129efef25669b7f8b34b61c7cb7a6ce35ef
405,369
def z_denorm(data, mean, std): """ Denormalize the data using that was normalized using z-norm :param data: 1d array of power consumption :param mean: the mean of the power consumption :param std: the std of the power consumption :return: denormalized power consumption """ return data * std + mean
00bfb9178ff6c5b0160613ac90e19b2722165037
285,280
def to_from_idx(iterable, start_idx=0): """Return mappings of items in iterable to and from their index. >>> char2idx, idx2char = to_from_idx("abcdefg") >>> char2idx {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6} >>> idx2char {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g'} """ return map(dict, zip(*( ((item, i), (i, item)) for i, item in enumerate(iterable, start=start_idx))))
8e62ea0222848a17deff5bef6942e71e71fef00f
231,603
def check_list_date(date_list): """ Check that date is a list with 3 ints (day, month, yr) :param date: list of length of 3 :return: boolean """ return len(date_list) == 3 and all(isinstance(item, int) for item in date_list)
680775a1d85dadc8c937f39b93c2d9c4e9cb18dd
267,248
def int_to_16bit(i): """ return an int as a pair of bytes """ return ((i >> 8) & 0xff, i & 0xff)
4432252b485bf9cfd8cd4d42df533348d1d35200
611,063
def _filter_resources(prefix, resources): """ Returns a list of resources, which starts with given prefix """ return list(filter(lambda r: r.startswith(prefix), resources))
8f14e5cf4bf083499c75b3c5e7d29bb323958712
552,819
import torch def abs_loss(labels, predictions): """ Square loss function Args: labels (array[float]): 1-d array of labels predictions (array[float]): 1-d array of predictions Returns: float: square loss """ # In Deep Q Learning # labels = target_action_value_Q # predictions = action_value_Q # loss = 0 # for l, p in zip(labels, predictions): # loss = loss + (l - p) ** 2 # loss = loss / len(labels) # loss = nn.MSELoss() output = torch.abs(predictions - labels) output = torch.sum(output) / len(labels) # output = loss(torch.tensor(predictions), torch.tensor(labels)) # print("LOSS OUTPUT") # print(output) return output
f371a972180960a089e720b0d4e6b08da602f7ae
110,396
def jsonrpc_result(id, result): """Create JSON-RPC result response""" return { 'jsonrpc': '2.0', 'result': result, 'id': id, }
a318b85ac2a9727a3b6c02b1dde499bc4c3a505d
339,947
def batchify(data, bsz, device): """Divides the data into bsz separate sequences, removing extra elements that wouldn't cleanly fit. Args: data: Tensor, shape [N] bsz: int, batch size Returns: Tensor of shape [N // bsz, bsz] """ seq_len = data.size(0) // bsz data = data[:seq_len * bsz] data = data.view(bsz, seq_len).t().contiguous() return data.to(device)
9529f65a7029fc49cf11c19dbfd6b46f7c32e1f9
496,586
from typing import Optional from typing import Dict def remove_none(d: Optional[Dict] = None) -> Dict: """Remove None value from dict. Args: d: Dict to remove None value. Returns: dict: Dict without None value. """ if not d: return {} return {k: v for k, v in d.items() if v is not None}
8dc75a41a45cf39fa1a978e142ffc2b41375fdb4
469,964
def retrieve_from_cosmos_db(db_handler, vault_id): """ Retrieve vault from CosmosDB defined in db_handler :param db_handler: handler for CosmosDB :param vault_id: vault ID to be retrieved :return: Vault """ vault_ret = db_handler.find_fuzzy_vault(vault_id) return vault_ret
c69302240556980a35811777e9bbd81fca19310d
98,972
def find_policy(boto_iam, policy_name): """ Find an AWS policy by name and return a dict of information about it. AWS's existing `list_policies()` API doesn't seem to let you search by name, so I iterate over all of them and match on our side. """ paginator = boto_iam.get_paginator('list_policies') response_iterator = paginator.paginate() for policy_resp in response_iterator: for policy in policy_resp['Policies']: if policy['PolicyName'] == policy_name: return policy return None
870b240c001c83be8016059dffe704c71786b485
137,595
def get_all_with_given_response(rdd, response='404'): """ Return a rdd only with those requests that received the response code entered. Default set to '404'. return type: pyspark.rdd.PipelinedRDD """ def status_iterator(ln): try: status = ln.split(' ')[-2] return True if status == response else False except: pass return rdd.filter(status_iterator)
8268095938bbc35a6418f557af033a458f041c89
4,881
def mirror(arr, axes=None): """ Reverse array over many axes. Generalization of arr[::-1] for many dimensions. Parameters Adapted from scikit-ued: https://github.com/LaurentRDC/scikit-ued ---------- arr : `~numpy.ndarray` Array to be reversed axes : int or tuple or None, optional Axes to be reversed. Default is to reverse all axes. Returns ------- out : `~numpy.ndarray` Mirrored array. """ if axes is None: reverse = [slice(None, None, -1)] * arr.ndim else: reverse = [slice(None, None, None)] * arr.ndim if isinstance(axes, int): axes = (axes,) for axis in axes: reverse[axis] = slice(None, None, -1) return arr[tuple(reverse)]
eddb83ff410f8d32a1d615cba53412b927d69fd3
184,594
def pretty_byte_count(num): """Converts integer into a human friendly count of bytes, eg: 12.243 MB""" if num == 1: return "1 byte" elif num < 1024: return "%s bytes" % num elif num < 1048576: return "%.2f KB" % (num/1024.0) elif num < 1073741824: return "%.3f MB" % (num/1048576.0) elif num < 1099511627776: return "%.3f GB" % (num/1073741824.0) else: return "%.3f TB" % (num/1099511627776.0)
f0bac2c6ab126b4cfefeee1ff6522b85234e0a1b
555,338
import operator def upper_bound(x, values, cmp=operator.lt): """Find the last position in values where x could be inserted without changing the ordering. >>> values = [1, 2, 3, 3] >>> print upper_bound(0, values) 0 >>> print upper_bound(3, values) 4 >>> print upper_bound(4, values) 4 """ first, count = 0, len(values) while count > 0: half = count/2 middle = first + half if cmp(x, values[middle]): count = half else: first = middle + 1 count = count - half - 1 return first
ab12fadda504b982aca146a7fae682dd163dc392
551,150
import socket def is_hostname_valid(host: str) -> bool: """ Test if a given hostname can be resolved. """ try: socket.gethostbyname(host) return True except socket.gaierror: return False return False
99210e9a1c1eebb2a360e022a0d8dfa3c36418e9
538,439
def filter_and_rename_dict(indict, filterdict): """ Renames the keys of the input dict using the filterdict. Keys not present in the filterdict will be removed. Example: >>> import pprint >>> outdict =filter_and_rename_dict({'a': 1, 'b':[2,2]}, {'a': 'c', 'b': 'd', 'e': 'f'}) >>> pprint.pprint(outdict) {'c': 1, 'd': [2, 2]} """ outdict = { filterdict[k]: v for k, v in indict.items() if ((v is not None) and (k in filterdict.keys())) } return outdict
26fd12002aeee455645333748b2a4f9d09642d3a
489,126
from typing import Optional import requests import json def authenticate_username( token: str, github_api: str = 'https://api.github.com/graphql') -> Optional[str]: """Check that the token correspond to a valid GitHub username. Using `GitHub GraphQL API v4 <https://developer.github.com/v4/>`_ Parameters ---------- token GitHub token that gives read only authorization github_api URL of GitHub's API Return ------ GitHub's username or None """ headers = {'Authorization': f'bearer {token}'} query = "query { viewer { login }}" reply = requests.post(github_api, json={'query': query}, headers=headers) status = reply.status_code if status != 200: return None data = json.loads(reply.text)['data'] return data['viewer']['login']
b8ba4b3fb2e42c27b474d364f37c6c7086450401
75,177
def _get_connection_params(resource): """Extract connection and params from `resource`.""" args = resource.split(";") if len(args) > 1: return args[0], args[1:] else: return args[0], []
87cdb607027774d58d1c3bf97ac164c48c32395c
1,630
def ig_unfold_tree_with_attr(g, sources, mode): """Call igraph.Graph.unfold_tree while preserving vertex and edge attributes. """ g_unfold, g_map = g.unfold_tree(sources, mode=mode) g_eids = g.get_eids([(g_map[e.source], g_map[e.target]) for e in g_unfold.es]) for attr in g.edge_attributes(): g_unfold.es[attr] = g.es[g_eids][attr] for attr in g.vertex_attributes(): g_unfold.vs[attr] = g.vs[g_map][attr] return g_unfold
c451318ba382164ab73e35d2094737e94189453b
499,172
def check_sig_name(sig_name, sigs, errors): """ Check sig name :param sig_name: name of sig in sig-info.yaml :param sigs: content of all sigs :param errors: errors count :return: errors """ if sig_name not in [x['name'] for x in sigs]: print('ERROR! sig named {} does not exist in sigs.yaml.'.format(sig_name)) errors += 1 return errors
08b750a71a67fa6926e9625db3be47f9281b72c6
472,975
def seq_max_split(seq, max_entries): """ Given a seq, split into a list of lists of length max_entries each. """ ret = [] num = len(seq) seq = list(seq) # Trying to use a set/etc. here is bad beg = 0 while num > max_entries: end = beg + max_entries ret.append(seq[beg:end]) beg += max_entries num -= max_entries ret.append(seq[beg:]) return ret
5f17c521abf0279267711811a6cbf54f8029f78b
533,189
def _no_common_member(a, b): """ Validates that there is no common member in the two lists a and b. Returns False if there is a common member. """ a_set = set(a) b_set = set(b) if (a_set & b_set): return False else: return True
da514ff4be2ed0cfd02d9266e33851dd41d37b02
526,115
def div_growth_rateYr(t, dt, d0): """ Calculates the growth rate of a dividend using the dividend growth rate valuation model where dividend is paid yearly. parameters: ----------- t = time dt = current price of dividend d0 = base year dividend price """ t = t - 1 growth_rate = (((dt/d0) ** (1/t)) - 1) * 100 return round(growth_rate, 4)
a6e497c426ed212d1caa2d241bacb8a0aa07b3bd
190,817
from typing import List def rotated_array(A: List[int], x: int) -> int: """We can use a binary search to get the time less than O(n). Instead of looking for a particular element, we are going search for an index `i` such that A[i - 1] > A[i] to find the actual starting index of the array. Once we have the actual starting point of the array, we can break it into two halves. Each half of the array is ascending within some range, and we can use a simple comparison to check which half to search for `x`. We then perform another binary search in that range. If we don't find anything, that means the array was rotated 0 times. param A: The array to search param x: The element to find returns: The index of the element in the array """ left = 0 right = len(A) - 1 def find_start_idx() -> int: """Find the starting index of the array given that there's some arbitrary rotation. """ left = 0 right = len(A) - 1 # Find the starting index of the array while left < right: mid = (left + right) // 2 if A[mid - 1] > A[mid]: return mid elif A[mid] > A[-1]: left = mid else: right = mid return 0 def binary_search(left: int, right: int) -> int: while left < right: mid = (left + right) // 2 if A[mid] == x: return mid elif A[mid] < x: left = mid else: right = mid return -1 start_idx = find_start_idx() # Check which half `x` is in, then set the left and right bounds for the # search if A[start_idx] <= x <= A[-1]: left = start_idx right = len(A) - 1 else: left = 0 right = start_idx return binary_search(left, right)
ff652bdcdac7b409bf4e1c4bd10e5fff6e9c67c4
54,530
import io import tarfile def extract_tarbytes(file_bytes, path): """Extract tarfile as bytes. Args: file_bytes (bytearray): Bytes of file to extract path (str): Path to extract it to Returns: path: destination path """ tar_bytes_obj = io.BytesIO(file_bytes) with tarfile.open(fileobj=tar_bytes_obj, mode="r:gz") as tar: tar.extractall(path) return path
6c4d77d362d8bb0755cca00a505c1a40b2db2cb9
267,568
from typing import AnyStr import json def load_json_dict(text: AnyStr) -> dict: """Loads from JSON and checks that the result is a dict. Raises ------ ValueError if `text` is not valid JSON or is valid JSON but not a dict """ ans = json.loads(text) if not isinstance(ans, dict): raise ValueError('not a dict') return ans
3f35c6eed694f8b8087a7ee1252ef9fa99864280
696,138
import random def fisher_yates_shuffle(sequence): """Shuffles the sequence in-place randomly and returns it""" if len(sequence) < 1: return sequence index_last = len(sequence) - 1 for index_curr in range(index_last): index_random = random.randint(index_curr, index_last) if index_random != index_curr: sequence[index_curr], sequence[index_random] =\ sequence[index_random], sequence[index_curr] return sequence
4c0b55a23ce7d8c580e477fd94274d5b8b872ad5
570,442
def format_number(value): """ Format a number returns it with comma separated """ return "{:,}".format(value)
2f198c08beee40f3e128992c5c9b9de9a300bbdf
492,080
def EscapePath(path): """Returns a path with spaces escaped.""" return path.replace(" ", "\\ ")
b07b0b5148eb95ce1a94363e045fb4468b172aa0
139,006
def _array(fn, cls, genelist, **kwargs): """ Returns a "meta-feature" array, with len(genelist) rows and `bins` cols. Each row contains the number of reads falling in each bin of that row's modified feature. """ reader = cls(fn) _local_coverage_func = cls.local_coverage biglist = [] if 'bins' in kwargs: if isinstance(kwargs['bins'], int): kwargs['bins'] = [kwargs['bins']] for gene in genelist: if not isinstance(gene, (list, tuple)): gene = [gene] coverage_x, coverage_y = _local_coverage_func( reader, gene, **kwargs) biglist.append(coverage_y) return biglist
7e7dae118a04ab787ad65a29f9ef4af851f25258
490,969
import re def rm_stress(word_list): """ Takes a list of strings in IPA that contain prosodic accent marks and removes the dashes to clean the data. :word_list: list of strings :returns: list of strings without prosodic accent marks :rtype: list of strings """ new_list = [] for s in range(len(word_list)): word = word_list[s] new_word = re.compile(r"'").sub("",word) new_list.append(new_word) return(new_list)
a05b24a552380d5a2af2abe219a89b3496351f42
572,871
def stdDevOfLengths(L): """ L: a list of strings returns: float, the standard deviation of the lengths of the strings, or NaN if L is empty. """ if not L: return float('NaN') mean = sum([len(t) for t in L]) / float(len(L)) quantities = [(len(t) - mean)**2 for t in L] stdDev = (sum(quantities) / len(L))**0.5 return stdDev
62621be9ae0ad79ae523dc014a0b3e8bdbfbb19e
108,974
def zipdict(keys, vals): """Creates a dict with keys mapped to the corresponding vals.""" return dict(zip(keys, vals))
15cb1a2c5936f973703ea14126056ff076160f70
72,870
def _sanitize_text(text): """ Cleans up line and paragraph breaks in `text` """ text = ' '.join(text.replace('\n', ' ').split()) return text.replace('<br> ', '\n\n') \ .replace('<p>', '\n')
845b41bf4de770422d0ad227bd08cffe02255822
138,555
def get_snapshots(cluster_config, repository): """ Get list of snapshots. Args: cluster_config: cluster specific config repository: repository name Returns: Returns list of snapshots from the given cluster:repo """ es = cluster_config['es'] snapshots = es.cat.snapshots( repository=repository, format='json', request_timeout=300 ) return snapshots
679f530eb83f51577e4a8bf3d39544a341f1d5bd
202,167
import re def get_region_from_image_uri(image_uri): """ Find the region where the image is located :param image_uri: <str> ECR image URI :return: <str> AWS Region Name """ region_pattern = r"(us(-gov)?|ap|ca|cn|eu|sa)-(central|(north|south)?(east|west)?)-\d+" region_search = re.search(region_pattern, image_uri) assert region_search, f"{image_uri} must have region that matches {region_pattern}" return region_search.group()
e9051bcd53b44fd8ff09ab6beebd861994a195af
149,380
import re def split_program_by_processing_element_label(program_string): """ Split a program string into a list of individual processing element programs based on the location of <*> labels. :param program_string: multiline string containing an assembly program with <*> labels :return: the corresponding list of label-string tuples """ # Find the labels. labels = re.findall(r"<(.*?)>", program_string) # Replace all instances of processing element delimiter with just the bookends "<>" as a sigil. program_string = re.sub(r"<.*>", "<>", program_string) # Split the program string into a list of processing element programs and delete the leading null field. processing_element_program_strings = program_string.split("<>") del processing_element_program_strings[0] # Return the resulting list. return list(zip(labels, processing_element_program_strings))
409ec401352bb7284f17fb49a9076b5cdd0359b6
617,595
def standard_arguments(parser): """Add required and optional arguments common to all scripts""" parser._action_groups.pop() required = parser.add_argument_group('required arguments') optional = parser.add_argument_group('optional arguments') required.add_argument('-s', dest='server', action='store', required=True, help="DHIS2 server URL") required.add_argument('-u', dest='username', action='store', required=True, help='DHIS2 username') optional.add_argument('-p', dest='password', action='store', required=False, help='DHIS2 password') return required, optional
ab51507481cc34666acfa95f987aa36b39c9fdbf
203,799
def new_vars(n): """Output a list of n characters startin with 'a' Args: n (int): Number of characters to output Returns: list[str]: A list ['a', 'b', 'c', ...] of length n. """ return [chr(ord('a') + i) for i in range(n)]
c9f17330dc5ca141b87c1a7cd365c3c09a005941
420,656
def compute_lambda_tilde(m1, m2 ,l1 , l2): """ Compute Lambda Tilde from masses and tides components -------- m1 = primary mass component [solar masses] m2 = secondary mass component [solar masses] l1 = primary tidal component [dimensionless] l2 = secondary tidal component [dimensionless] """ M = m1 + m2 m1_4 = m1**4. m2_4 = m2**4. M5 = M**5. comb1 = m1 + 12.*m2 comb2 = m2 + 12.*m1 return (16./13.)*(comb1*m1_4*l1 + comb2*m2_4*l2)/M5
620ae66de9e06cdbf08561fd1bf2514b4cc6ba63
603,038
def rax_clb_node_to_dict(obj): """Function to convert a CLB Node object to a dict""" if not obj: return {} node = obj.to_dict() node['id'] = obj.id node['weight'] = obj.weight return node
14526744ef608f06534011d1dcdd5db62ac8b702
675,212
def distinct(l): """ Given an iterable will return a list of all distinct values. param: l:an iterable return: the list """ return list(set(l))
8051823ad033bbe978204dfee104aca7348af038
546,964
def transpose(grid): """ Switches rows and columns. >>> transpose([[1, 2, 3], [4, 5, 6]]) [[1, 4], [2, 5], [3, 6]] """ R = len(grid) C = len(grid[0]) inverted = [] for r in range(C): row = [c[r] for c in grid] inverted.append(row) return inverted
b0b28a44784cfb7e212d48d5eb6b76d8e7f36904
212,160
def chunk(text: str, size: int) -> list: """ Split text into chunks of a given size. """ return [text[i : i + size] for i in range(0, len(text), size)]
b6f063fcc1d678d7590b9a59eed580f7643cabbd
298,397
def stmts_to_json(stmts_in, use_sbo=False, matches_fun=None): """Return the JSON-serialized form of one or more INDRA Statements. Parameters ---------- stmts_in : Statement or list[Statement] A Statement or list of Statement objects to serialize into JSON. use_sbo : Optional[bool] If True, SBO annotations are added to each applicable element of the JSON. Default: False matches_fun : Optional[function] A custom function which, if provided, is used to construct the matches key which is then hashed and put into the return value. Default: None Returns ------- json_dict : dict JSON-serialized INDRA Statements. """ if not isinstance(stmts_in, list): json_dict = stmts_in.to_json(use_sbo=use_sbo) return json_dict else: json_dict = [st.to_json(use_sbo=use_sbo, matches_fun=matches_fun) for st in stmts_in] return json_dict
274a3507ff72432cf93171a7785ab5c05c00322c
468,312
def construct_patch(self, path, data='', content_type='application/octet-stream', **extra): """Construct a PATCH request.""" return self.generic('PATCH', path, data, content_type, **extra)
d063eef4ce62c8d764766a281bea48b0be15095c
523,231
import hashlib def get_checksum(data): """Return the MD5 hash for the given data.""" m = hashlib.md5() m.update(data) return m.hexdigest()
6693610f0bbacfc5ee91f327b1deb377c00445fe
332,164
import re def order_files_numerically(file_list): """ If there is a list of files, order them numerically, not alphabetically and return the sorted list of files. Parameters ---------- file_list : list A list of strings that contain one or more numerical values. The strings will be sorted by only the numerical values within them. Returns ------- sorted_file_list : list A new list of strings sorted numerically """ sorted_file_list = [] numerical_dict = {} for i, file_name in enumerate(file_list): numbers = re.findall(r"\d+", file_name) numbers = tuple([int(j) for j in numbers]) numerical_dict[numbers] = i numerical_list = sorted(numerical_dict.keys()) for number in numerical_list: index = numerical_dict[number] sorted_file_list.append(file_list[index]) return sorted_file_list
ec602520005c470c2b2d977a4d4ea54827a6acc0
533,173
def enrich_ioc_dict_with_ids(ioc_dict): """ Enriches the provided ioc_dict with IOC ID :param ioc_dict: IOC dict transformed using the SEARCH_IOC_KEY_MAP :return: ioc_dict with its ID key:value updated """ for ioc in ioc_dict: ioc['ID'] = '{type}:{val}'.format(type=ioc.get('Type'), val=ioc.get('Value')) return ioc_dict
34c471337a4e4c520dd8598e85c4cbd25ce56b93
600,868
def encrypt(public_key, message): """ Takes a non-negative integer message encrypts it using the public key """ if message >= public_key["n"]: raise ValueError("Message is too long. Consider generating larger keys.") return pow(message, public_key["e"], public_key["n"])
6a1be5b987c2d4bec644df8fee76762235cedb44
237,015
def monomial_gcd(a, b): """Greatest common divisor of tuples representing monomials. Lets compute GCD of x**3*y**4*z and x*y**2: >>> monomial_gcd((3, 4, 1), (1, 2, 0)) (1, 2, 0) which gives x*y**2. """ return tuple([ min(x, y) for x, y in zip(a, b) ])
3e00732514415b21fbf105ee42486aef608d08c7
353,971
from typing import Any from typing import Optional from typing import Type import json import hashlib def gethash( value: "Any", hashtype: str = "sha256", encoding: str = "utf-8", json_encoder: "Optional[Type[json.JSONEncoder]]" = None, ) -> str: """Return a hash of `value`. Can hash most python objects. Bytes and bytearrays are hashed directly. Strings are converted to bytes with the given encoding. All other objects are first serialised using json. Args: value: Value to hash. hashtype: Any of the hash algorithms supported by hashlib. encoding: Encoding used to convert strings to bytes before calculating the hash. json_encoder: Customised json encoder for complex Python objects. Returns: A hash of the input `value`. """ if isinstance(value, (bytes, bytearray)): data = value elif isinstance(value, str): data = value.encode(encoding) else: # Try to serialise using json data = json.dumps( value, ensure_ascii=False, cls=json_encoder, sort_keys=True, ).encode(encoding) hash_ = hashlib.new(hashtype) hash_.update(data) return hash_.hexdigest()
fab7801a8cc6bf5a23d3ba681a0a5d0fb2a81a0e
477,162
def join_lemmas(doc): """Return joined lemmas with appropriate whitespace.""" return "".join(token.lemma_ + token.whitespace_ for token in doc)
87e6f5b1d826dd9d97519dfe105d9020260d8626
71,434
def truncate_string(string, max_len=100): """Shorten string s to at most n characters, appending "..." if necessary. """ if string is None: return None if len(string) > max_len: string = string[:max_len-3] + '...' return string
0b2ef8974e3509dd051c10fb5c3059a366e218a5
201,677
def contains(value, arg): """ Checks insensitive if a substring is found inside a string Args: value (str): The string arg (str): The substring Returns: bool: True if string contains substring, False otherwise """ value = value.upper() arg = arg.upper() if arg in value: return True return False
7da05cf849c3e0bce88d039852175eee6b35c221
22,817
import math def get_line_length(start_x, start_y, end_x, end_y): """ takes x and y of start and end point returns distance of those points """ a = end_x - start_x g = end_y - start_y length = math.sqrt(a**2+g**2) return length
404075b6f387280ca906c16441da305e59cfbcf0
353,527
def min_list(lst): """ A helper function for finding the minimum of a list of integers where some of the entries might be None. """ if len(lst) == 0: return None elif len(lst) == 1: return lst[0] elif all([entry is None for entry in lst]): return None return min([entry for entry in lst if entry is not None])
5287286c843086c271f6fc709b275796cc68f314
675,056
def d8n_get_all_images_topic_bag(bag): """ Returns the (name, type) of all topics that look like images. """ tat = bag.get_type_and_topic_info() consider_images = [ 'sensor_msgs/Image', 'sensor_msgs/CompressedImage', ] all_types = set() found = [] topics = tat.topics for t,v in topics.items(): msg_type = v.msg_type all_types.add(msg_type) _message_count = v.message_count if msg_type in consider_images: # quick fix: ignore image_raw if we have image_compressed version if 'raw' in t: other = t.replace('raw', 'compressed') if other in topics: continue found.append((t,msg_type)) return found
1093cb8bb13946a170acf2333bcae1617db163d9
657,652
def validate_password(data): """ Validates the given password in a request data. First, checks if it exists, then check if the password has the necessary length :param data: Dict containing all the request data :return: Boolean determining the request has a valid password """ if "password" not in data: return False password = data["password"] if len(password) < 4 or len(password) > 16: return False return True
7b739842f1a584195e96993e968d2bd54cb3a5fa
499,903
import re def strip_concat(sequence: str, ) -> str: """ Cleans up concat sequences (peptide_charge) and remove modifications to return peptide string for labeling site calculations :param sequence: concat sequence containing charge and modificaitons :return: """ # 2021-05-18 strip all N-terminal n from Comet sequence = re.sub('^n', '', sequence) # Strip all modifications sequence = re.sub('\\[.*?\\]', '', sequence) # Strip the underscore and charge sequence = re.sub('_[0-9]+', '', sequence) return sequence
c646f90a68873661049dd39b7cc4c9bcf47134b5
590,251