content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import warnings import numbers def create_plist_from_par_opt_to_par_sim(mapping_par_opt_to_par_sim): """ Create list of parameter indices for which sensitivity is to be computed. From the parameter mapping `mapping_par_opt_to_par_sim`, create the simulation plist according to the mapping `mapping`. Parameters ---------- mapping_par_opt_to_par_sim: array-like of str len == n_par_sim, the entries are either numeric, or optimization parameter ids. Returns ------- plist: array-like of float List of parameter indices for which the sensitivity needs to be computed """ warnings.warn( "This function will be removed in future releases. ", DeprecationWarning, ) plist = [] # iterate over simulation parameter indices for j_par_sim, val in enumerate(mapping_par_opt_to_par_sim): if not isinstance(val, numbers.Number): plist.append(j_par_sim) # return the created simulation parameter vector return plist
1df8d0a9814a99b1ba083fbe40ad71a4d0cb8403
493,088
import torch def get_devices_spec(devices=None): """ Get a valid specification for one or more devices. If `devices` is None get devices for all CUDA devices available. If `devices` is and zero-length structure a single CPU compute device is returned. In any other cases `devices` is returned unchanged. Args: devices (list, optional): list of devices to request, None for all GPU devices, [] for CPU. Returns: list of torch.device: list of devices. """ if devices is None: devices = [torch.device('cuda:%i' % d) for d in range(torch.cuda.device_count())] if len(devices) == 0: raise ValueError("No GPU devices available") elif len(devices) == 0: devices = [torch.device("cpu")] return devices
2afd6df60a5e2f5eeaa2997b5f9efabfa4437bd2
304,951
def awgGate(gate, station): """ Return True if the specified gate can be controlled by the AWG """ awg = getattr(station, 'awg', None) if awg is None: return False return awg.awg_gate(gate)
58ce5aee33e3630db3bc6f0fa0c277f2e9c231cc
368,125
def harmonic_mean(x1, x2): """ Calculate the harmonic mean between two values. """ return (2*x1*x2)/(x1+x2)
c88436a0fe7962c6de17e096f6e0a0a59aeb98db
402,745
def show_tnseq_upload_btn(network_type): """Show TnSeq upload button when combined networks are selected.""" return {'display': 'block'} if network_type == 'combined' else {'display': 'none'}
a875b176c7b56b09d5c38c3fd4fc2afbd350e220
471,676
from typing import Mapping from typing import Any from typing import Optional def check_override(params: Mapping[str, Any], key: str, override: Optional[Any]) -> Any: """Return desired value, with optional override.""" if override is None: return params[key] saved = params.get(key, None) print(f'Overriding saved {key}. Saved: {saved}. Override with: {override}.') return override
ea9776d7d55594ab6ce22168fa4ac2879a0de7f8
297,266
import json import hashlib def hexdigest(jsonable): """ Calculate hex digest of a `jsonable` object. >>> hexdigest({'a': 1, 'b': 2, 'c': 3}) 'e20096b15530bd66a35a7332619f6666e2322070' """ string = json.dumps(jsonable, sort_keys=True).encode() return hashlib.sha1(string).hexdigest()
b523381c8e2d4d66c2d4c608c2e212f139d6c43a
533,895
def list_to_array_syntax( l ): """Convert a python list to PGSQL array syntax for insertion""" l_text = str(l) l_text = "'{" + l_text[1:-1] + "}'" return l_text
66a90a0fe132c858c84c37331ae56f342eca5507
332,106
def json_utf8_encode(obj: object) -> object: """Binary encode all strings in an object. :arg obj: Object. :returns: Object with binary encoded strings. """ if isinstance(obj, str): return obj.encode('utf-8') if isinstance(obj, list) or isinstance(obj, tuple): return [json_utf8_encode(item) for item in obj] return obj
6d11790e2fb18eb0265452bff21ca6d082eae86a
229,601
def get_filtered_attributes(cube, attribute_filter=None): """ Build dictionary of attributes that match the attribute_filter. If the attribute_filter is None, return all attributes. Args: cube (iris.cube.Cube): A cube from which attributes partially matching the attribute_filter will be returned. attribute_filter (str or None): A string to match, or partially match, against attributes to build a filtered attribute dictionary. If None, all attributes are returned. Returns: dict: A dictionary of attributes partially matching the attribute_filter that were found on the input cube. """ attributes = cube.attributes if attribute_filter is not None: attributes = {k: v for (k, v) in attributes.items() if attribute_filter in k} return attributes
c9dc018244856c2148beae0de96f4fe2853017f9
496,774
def armijo(fun, xk, xkp1, p, p_gradf, fun_xk, eta=0.5, nu=0.9): """ Determine step size using backtracking f(xk + alpha*p) <= f(xk) + alpha*nu*<p,Df> Args: fun : objective function `f` xk : starting position xkp1 : where new position `xk + alpha*p` is stored p : search direction p_gradf : local slope along the search direction `<p,Df>` fun_xk : starting value `f(xk)` eta : control parameter (shrinkage) nu : control parameter (sufficient decrease) Returns: Set `xkp1[:] = xk + alpha*p` and return a tuple (f_xkp1, alpha) f_xkp1 : new value `f(xk + alpha*p)` alpha : determined step size """ if p_gradf >= 0: raise Exception("Armijo: Not a descent direction!") alpha = 1.0 while True: xkp1[:] = xk + alpha*p f_xkp1 = fun(xkp1) if f_xkp1 <= fun_xk + alpha*nu*p_gradf: break else: alpha *= eta if alpha < 1e-10: raise Exception("Armijo: Not a descent direction!") return f_xkp1, alpha
0b44b06fe6db1f778dbc22995a2800ebbf6f051a
678,650
def set_range_offset(ds, h_deploy): """ Adds an instrument's height above seafloor (for an up-facing instrument) or depth below water surface (for a down-facing instrument) to the range of depth bins Parameters ---------- ds : xarray.Dataset The adcp dataset to ajust 'range' on h_deploy : numeric Deployment location in the water column, in [m] Returns ------- ds : xarray.Dataset The adcp dataset with 'range' adjusted Notes ----- `Center of bin 1 = h_deploy + blank_dist + cell_size` Nortek doesn't take `h_deploy` into account, so the range that DOLfYN calculates distance is from the ADCP transducers. TRDI asks for `h_deploy` input in their deployment software and is thereby known by DOLfYN. If the ADCP is mounted on a tripod on the seafloor, `h_deploy` will be the height of the tripod +/- any extra distance to the transducer faces. If the instrument is vessel-mounted, `h_deploy` is the distance between the surface and downward-facing ADCP's transducers. """ r = [s for s in ds.dims if 'range' in s] for val in r: ds = ds.assign_coords({val: ds[val].values + h_deploy}) ds[val].attrs['units'] = 'm' ds.attrs['h_deploy'] = h_deploy return ds
13257dc4b5fce72667b58d60f39566e33427167f
402,462
def str2list(_str): """Convert string type to list of one string :arg str _str: a string """ if isinstance(_str, str): return [_str] elif isinstance(_str, list): return _str else: raise ValueError('"_str" input is not a str or list')
c9f184957167b32d412cc7b592c093d37faa3d6a
84,664
def load_sentences(filename): """give us a list of sentences where each sentence is a list of tokens. Assumes the input file is one sentence per line, pre-tokenized.""" out = [] with open(filename) as infile: for line in infile: line = line.strip() tokens = line.split() out.append(tokens) return out
6a4c458f9a0d9b17eaa38c38570dacc4c40e86c0
706,735
import string def get_first_n_alphabet(n): """Return the first n lowercase letters of the alphabet as a list.""" return string.ascii_lowercase[:n]
6b011a6a0f121565ed32bfe5e28a31e4bbb82d07
242,848
from pathlib import Path import pickle def read_pickle_dict(filename, folder='C:/Users/Avram/Dropbox (MIT)/MIT' '/Research/Thesis/Sections/mod_studies/'): """Read ptrac data dictionary from pickle file.""" if folder == '': folder = Path.cwd() # set folder ='' to use current directory path = Path(folder, filename).with_suffix('.pkl') try: print(f'Reading {path}...') with open(path, 'rb') as f: data = pickle.load(f) except IOError: print('Pickle file not found.') data = {} return data
dcf38d34d596a19d9b69a32e48d0f8f1ca10eda4
615,005
def empty_data(data): """Check to see if data is an empty list or None.""" if data is None: return True elif type(data) == list: if len(data) == 0: return True return False
3dddfe9e561dafb7bdf73b35605097f4d7691c3c
119,272
def deprocess_image(img):#, rescale=False): """Undo preprocessing on an image and convert back to uint8.""" #img = (img * SQUEEZENET_STD + SQUEEZENET_MEAN) img = (img + 1) * 0.5 # if rescale: # vmin, vmax = img.min(), img.max() # img = (img - vmin) / (vmax - vmin) return img
9048dbd638da17426e412628a804699a3d6f4026
205,505
def generate_script(group, entry_point, header, template): """Generate the script based on the template. :param str group: The entry-point group name, e.g., "console_scripts". :param str header: The first line of the script, e.g., "!#/usr/bin/env python". :param str template: The script template. :returns: The templated script content :rtype: str """ if not entry_point.attrs or len(entry_point.attrs) > 2: raise ValueError("Script targets must be of the form " "'func' or 'Class.class_method'.") script_text = template % dict( group=group, module_name=entry_point.module_name, import_target=entry_point.attrs[0], invoke_target='.'.join(entry_point.attrs), ) return header + script_text
847871bc7344dcfda994a9e985e9a541c96fff81
45,123
def matchAllowedVOs(vo, resource_ad): """True if `vo` is in the AllowedVOs list in `resource_ad`, or if AllowedVOs is undefined or empty """ allowed_vos = resource_ad.get('AllowedVOs', None) if not allowed_vos: return True else: return vo in list(allowed_vos)
acc90deedcb4f0573975088c39c2d98e7fccc30c
339,029
def xstr(s): """Creates a string object, but for null objects returns an empty string Args as data: s: input object Returns: object converted to a string """ if s is None: return "" else: return str(s)
b7ea8e906598d259244cc8a7cbb9cb1a142ba3d8
26,829
def is_valid_host(host): """ Check if host is valid. Performs two simple checks: - Has host and port separated by ':'. - Port is a positive digit. :param host: Host in <address>:<port> format. :returns: Valid or not. """ parts = host.split(':') return len(parts) == 2 or parts[1].isdigit()
e7d22558e8a41b3b3345e863e11cdeec1a37d984
77,561
import ipaddress def list_all_available_cidr(jnj_root_cidr_list, allocated_cidr_list, subnet_prefix): """ Find all CIDRs of specified size from the provided top level CIDR list in the region Args: jnj_root_cidr_list: top-level CIDRs allocated to region allocated_cidr_list: CIDRs currently in use in region subnet_prefix: requested CIDR size Returns: locked CIDR """ # Initialize result array available_cidr_list = [] # Iterate through root level CIDRs for cidr in jnj_root_cidr_list: # Cast top-level CIDR string to network objet cidr = ipaddress.IPv4Network(cidr) # If top-level CIDR is smaller than requested CIDR, skip this top-level CIDR if int(cidr.prefixlen) > int(subnet_prefix): continue # Iterate through already allocated CIDRs allocated_cidr_in_master_list = [ipaddress.IPv4Network(cidr_block) for cidr_block in allocated_cidr_list if ipaddress.IPv4Network(cidr_block).overlaps(cidr)] # Divide the top-level CIDR into a CIDRs of the requested size cidr_subnets = list(cidr.subnets(new_prefix=int(subnet_prefix))) # Iterate through theoretical subnets and search for overlap for subnet in cidr_subnets: # Search for overlap with already allocated CIDRs subnet_conflict_flag = False for allocated_cidr in allocated_cidr_in_master_list: if subnet.overlaps(allocated_cidr): subnet_conflict_flag = True break # Found a conflict if subnet_conflict_flag: continue # This subnet has no conflicts, append to list of available subnets else: available_cidr_list.append(subnet.with_prefixlen) # Return results return available_cidr_list
caf84de05b7c8b6a7246062e2f34ce57329cf6b7
41,374
import copy def modify_namespace(namespace, args): """Modify the specified arguments in the passed Namespace. namespace argparse.Namespace object args dict of argument: value pairs For most command-line tests, we define a base argparse.Namespace object, then change a few arguments. This function takes the base namespace and a dictionary of argument: value pairs, and returns the modified namespace. """ new_namespace = copy.deepcopy(namespace) for argname, argval in args.items(): setattr(new_namespace, argname, argval) return new_namespace
65aee8eb630ee0b75b81b18234ea04b6ca888491
666,773
def list_wrap_remove(var): """ Helper function for removing list wrapping for a single item that might be wrapped into a list """ if type(var) == list and len(var) > 1: # If an actual list, return the list return var elif type(var) == list and len(var) == 1: # If a list of one item, unpack return var[0] else: return var
f13ec71bc9275d4ac5b5747f0daa97af24b40361
420,641
def sizeof_fmt(num, suffix='B', longsuffix=True, usespace=True, base=1024): """ Returns a string representation of the size ``num``. - Examples: >>> sizeof_fmt(1020) '1020 B' >>> sizeof_fmt(1024) '1 KiB' >>> sizeof_fmt(12011993) '11.5 MiB' >>> sizeof_fmt(123456789) '117.7 MiB' >>> sizeof_fmt(123456789911) '115 GiB' Options include: - No space before unit: >>> sizeof_fmt(123456789911, usespace=False) '115GiB' - French style, with short suffix, the "O" suffix for "octets", and a base 1000: >>> sizeof_fmt(123456789911, longsuffix=False, suffix='O', base=1000) '123.5 GO' - Reference: https://stackoverflow.com/a/1094933/5889533 """ num = float(num) # force typecast base = float(base) suffixes = ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'] if longsuffix: suffixes = ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'] for unit in suffixes[:-1]: if abs(num) < base: return "{num:3.1f}{space}{unit}{suffix}".format( num=num, space=' ' if usespace else '', unit=unit, suffix=suffix, ).replace(".0", "") num /= base return "{num:.1f}{space}{unit}{suffix}".format( num=num, space=' ' if usespace else '', unit=suffixes[-1], suffix=suffix, ).replace(".0", "")
192809ce48ccdbeeb657ad0be60414664fc60544
594,338
def load_cluster_labels(clusterpath, one_indexed=True): """ one_indexed: if true, assume cluster index starts at 1 (e.g. as in 2018_scMCA data) """ cluster_labels = {} if one_indexed: dec = 1 else: dec = 0 # expected format of csv file is "cluster number, name" with open(clusterpath) as f: for idx, line in enumerate(f): line = line.rstrip() line = line.split(',') print(line) cluster_labels[int(line[0])-dec] = line[1] return cluster_labels
fed3adb9dc9c5653aaf05548a8c7e6d5739f6e82
633,878
def match_lab_lightness(outputs, targets): """ Replace the L layer of LAB image in outputs by the ones in targets """ outputs = outputs.permute(0, 2, 3, 1) targets = targets.permute(0, 2, 3, 1) outputs[..., 0] = targets[..., 0] return outputs.permute(0, 3, 1, 2)
1747123e30518784d3c554960c050e62a88e2200
286,531
def smiles(sid): """ SMILES string from a species ID """ smi = str.split(sid, '_')[0] return smi
8c47d163d9110027f9fa47903ab1a1abbaa54fe6
83,288
def _min_index(b, h): """ Returns: The index of the minimum value in b[h..] Parameter b: The sequence to search Precondition: b is a mutable sequence (e.g. a list). """ # We typically do not enforce preconditions on hidden helpers # Start from position h i = h index = h; # index is position of min in b[h..i-1] while i < len(b): if b[i] < b[index]: index = i i = i+1 # index is position of min in b[h..len(b)-1] return index
8d1058177967f9e17405802912b2f080757ac1d1
524,211
def dot_esc(str): """Escape str for use in DOT""" return str.replace('"', '\\"')
ad82f29d9f02523b344f8ed81e73e372913bba0d
605,047
def read_answer(equation): """ Read user answer """ print(equation) print() reply = input("Answer? ") return reply.strip() or '0'
e52521a8e19fcb3a1928e913d6ba6a915f41c567
427,507
def cast_bool(value): """Cast boolean value in database format to bool.""" if value: return value[0] in ('t', 'T')
e3a93a175f6cc3b9e8bc00305636016c6e670056
413,728
def odder(num: int) -> int: """Forces a number to be odd""" if num % 2 == 0: num += 1 return int(num)
90261e03ae5784c9cc555963732f8b43203ddecd
483,327
import socket def getFQDN(default=None): """Return the FQDN.""" fqdn = socket.gethostname() if not '.' in fqdn: return default return fqdn
33157712a5e4ec7bf198005389d2718d7895463c
641,568
def get_filename(pathname, split_char='/'): """ :param pathname: path name :param split_char: Path interval character :return: file name """ pathname = pathname.split(split_char) return pathname[-1]
11bead040c84c105ce0c0c4dc354ef04400e1b57
644,456
def center_scale(arr): """ Center and scale values """ xmax, xmin = arr.max(), arr.min() arr = ((arr - xmin) / (xmax - xmin)) -.5 return arr
28e9a0e0eb6314ef0f293e7440395493d3e8f299
560,728
import random def successfulStarts(successProb, numTrials): """Assumes successProb is a float representing probability of a single attempt being successful. numTrials a positive int Returns a list of the number of attempts needed before a success for each trial.""" triesBeforeSuccess = [] for t in range(numTrials): consecFailures = 0 while random.random() > successProb: consecFailures += 1 triesBeforeSuccess.append(consecFailures) return triesBeforeSuccess
16fd09295f2fb617f2be9061ff77117856dc11cf
547,408
def rowwidth(view, row): """Returns the number of characters of ``row`` in ``view``. """ return view.rowcol(view.line(view.text_point(row, 0)).end())[1]
f8db1bf6e3d512d1a2bd5eeb059af93e8ac3bc5f
705,633
def parse_float(arg): """Parses an argument to float number. Support converting string `none` and `null` to `None`. """ if arg is None: return None if isinstance(arg, str) and arg.lower() in ['none', 'null']: return None return float(arg)
abc7a98509bb1e1aa6d8e42151b98ed020fd483a
385,847
def get_port_detail(ports): """ Iterate over ports details from response and retrieve details of ports. :param ports: list of ports details from response :return: list of detailed element of ports :rtype: list """ return [{ 'ID': port.get('id', ''), 'Number': port.get('number', '') } for port in ports]
003ecf7d3453659d7e43d07f0ad416d0f9da84ca
478,499
def nvmf_create_subsystem(client, nqn, serial_number, tgt_name=None, model_number='SPDK bdev Controller', allow_any_host=False, max_namespaces=0, ana_reporting=False): """Construct an NVMe over Fabrics target subsystem. Args: nqn: Subsystem NQN. tgt_name: name of the parent NVMe-oF target (optional). serial_number: Serial number of virtual controller. model_number: Model number of virtual controller. allow_any_host: Allow any host (True) or enforce allowed host list (False). Default: False. max_namespaces: Maximum number of namespaces that can be attached to the subsystem (optional). Default: 0 (Unlimited). ana_reporting: Enable ANA reporting feature. Default: False. Returns: True or False """ params = { 'nqn': nqn, } if serial_number: params['serial_number'] = serial_number if model_number: params['model_number'] = model_number if allow_any_host: params['allow_any_host'] = True if max_namespaces is not None: params['max_namespaces'] = max_namespaces if tgt_name: params['tgt_name'] = tgt_name if ana_reporting: params['ana_reporting'] = ana_reporting return client.call('nvmf_create_subsystem', params)
909bff67a43ca106d756225e328666d93533d495
298,344
def polygonal(i, n): """ Compute i-polygonal number n For example, if i=3, the nth triangle number will be returned """ return n * ((i - 2) * n + 4 - i) // 2
d228b268cdeb422e94c2becd3f958d5f0bcd3f0a
274,490
def _resolve_text(unicode_bytes): """Resolves Facebook text data, which is often dumped as Unicode bytes, to actual text.""" return unicode_bytes.encode("charmap").decode("utf8")
d572c71382322fa924b603ed8a9c262a8c87025a
256,143
from functools import reduce def map_reduce_attr(attr, elems, op, init): """ Execute map-reduce on instance attributes (non-callable). """ return reduce(op, map(lambda elem: getattr(elem, attr), elems), init)
d295a6fd5ae039f6309353930ef7cb052a3806e4
328,088
from datetime import datetime def display_day(uuid): """Display a readable form of the day for a given UUID entity.""" date_str = "{0}{1}".format(uuid.PartitionKey, uuid.RowKey) date_obj = datetime.strptime(date_str, "%Y%m%d") return date_obj.strftime("%A, %d")
0d990aa858f9f4554139b49f64ba1797adfa9553
334,358
def adjust_widget(widget,window_title=None,size=None,**kwargs): """Apply some adjustments to a widget. Unused kwargs are returned.""" if window_title is not None: widget.setWindowTitle(window_title) if size is not None: widget.resize(*size) return kwargs
636cb487440747bff2e709a7b36a4149cc95bf7c
640,785
def round_list(x, n = 2): """Auxiliary function to round elements of a list to n decimal places. Parameters ---------- x : list List of float values. n : int Number of decmial places to round the elements of list. Returns ------- list List with elements rounded to n decimal places. """ try: return [round(i,n) for i in x] except: return x
d4a6b1d283d970618591346caedde559069c9877
500,223
def swa_lr_decay(step: int, cycle_len: int, start_lr: float, end_lr: float) -> float: """ Linearly decrease the learning rate over the cycle. """ return start_lr + ((end_lr - start_lr) / cycle_len) * (step % cycle_len)
928757aae18145371b3c80cc74d749f95c8b6363
255,112
def parse_so_terms(so_file): """Retrieve all available Sequence Ontology terms from the file. """ so_terms = [] with open(so_file) as in_handle: for line in in_handle: if line.find('name:') == 0: name = line[5:].strip() so_terms.append(name) return so_terms
c61acbd574701244a4ad9cdca5095e4b12514bda
673,943
def get_greatest_depth(germanet, category) -> int: """ Iterate trough the synsets of a given word category. For each synset check the depth and return the greatest depth that has been seen. :type category: WordCategory :type germanet: Germanet :param germanet: the germanet graph :param category: the wordcategory :return: the greatest depth for a given word category. The depth of a synset is defined by the shortest path length between the synset and the root node """ synsets = germanet.get_synsets_by_wordcategory(category) max_depth = 0 for synset in synsets: depth = synset.min_depth() if depth >= max_depth: max_depth = depth return max_depth
bfc354fe8e0f49a2100c0cb4ccf2ebb080616991
461,823
import re def inc_guard(header_name): """convert a header file name into an include guard""" return "_{0}_".format(re.sub(r'[./\\]','_', header_name.upper()))
2329bf00e069b673f2ea69b8e77a80a1ea694c7d
151,230
import json import random import time def pick_random_quote(path='responses/class_quotes.json'): """ Get a random quote from a jason file. :param path: a string that indicates the location where the quotes are stored. :return: a random quote """ with open(path) as f: responses = json.loads(f.read()) random.seed(round(time.time()) % 1e5) ## put random seed to make sure we don't get the same groups everytime response = random.sample(responses['responses'], 1)[0] return '"' + response["quote"] + '"' + " -- " + response["quotee"]
6d41f35a8316f09d30849d3d1c790e5be2065f68
113,145
async def get_story_data(session, story_id, story_rank): """ Gets the given story data - title and url """ url = 'https://hacker-news.firebaseio.com/v0/item/{}.json'.format(story_id) async with session.get(url) as response: result_data = await response.json() story_url = "" if "url" in result_data: # The url key might not be in the results data story_url = result_data['url'] return story_rank, result_data['title'], story_url
cbac4d05915a82ab11854b9365acddb9c42944bd
25,816
def _GetAndroidVersionFromMetadata(metadata): """Return the Android version from metadata; None if is does not exist. In Android PFQ, Android version is set to metadata in master (MasterSlaveLKGMSyncStage). """ version_dict = metadata.GetDict().get('version', {}) return version_dict.get('android')
e1d1ed9d0bbf2f65d646c11007739f6b5a9b78ec
700,514
import json def format_lld(data_set): """Format JSON output for Zabbix LLD :param data_set: set or list of dictionaries :return: formatted Zabbix LLD data """ return json.dumps({'data': data_set})
fdcb577fd75336ff439e45313448a1cb3ff03fc0
281,725
def asn1_to_der(asn1): """Convert from asn1crypto x509 to DER bytes. Args: asn1 (:obj:`x509.Certificate`): asn1crypto x509 to convert to DER bytes Returns: (:obj:`bytes`) """ return asn1.dump()
b22f68537f32e8072bd0a1b5e25a494a5071e699
511,125
def _local(tag): """Extract the local tag from a namespaced tag name (PRIVATE).""" if tag[0] == '{': return tag[tag.index('}') + 1:] return tag
00895cb03f968a565de3224caad2f05d72557cdd
86,056
def unique_tuples(df, columns): """ Return the set of unique tuples from a dataframe for the specified columns. Parameters ---------- df : pandas.DataFrame Dataframe with the columns of data to be considered. columns : list-like A list of column names in the dataframe from which to construct the tuples. """ return set(zip(*[df[_] for _ in columns]))
9e6c31e957a1a7a5bf58fd8362ad8ae8106ac253
108,445
def NestedMultiplication(x, xValues, coeff): """Evaluates Newton Polynomial at x in nested form given the interpolating points and its coefficents""" n = coeff.size y = coeff[n-1] for i in reversed(range(n - 1)): y = coeff[i] + (x - xValues[i]) * y return y
143729cfab7bfd61e76b6195d505e34e81366ad9
652,758
import torch def normalize_pointcloud_transform(x): """ Compute an affine transformation that normalizes the point cloud x to lie in [-0.5, 0.5]^2 :param x: A point cloud represented as a tensor of shape [N, 3] :return: An affine transformation represented as a tuple (t, s) where t is a translation and s is scale """ min_x, max_x = x.min(0)[0], x.max(0)[0] bbox_size = max_x - min_x translate = -(min_x + 0.5 * bbox_size) scale = 1.0 / torch.max(bbox_size) return translate, scale
06c2178a92d8e5a1b585249e9aa1e1bd4c4e3d5d
652,736
def readWord (file): """ Read a 16 bit word from file. """ return ord (file.read (1)) << 8 | ord (file.read (1))
d05ca2ad97b2a146f0cf39641eb597024d85dd6a
594,621
from typing import List import heapq def find_nth_highest_index(input: List[int], n: int = 2) -> int: """ Finds the nth highest item in a list and returns its index. Note that n is 1-indexed here, e.g. n=1 would return the 1st largest element. """ heap = [] # Create a min-heap of size n. for idx, value in enumerate(input): if not heap or len(heap) < n: heapq.heappush(heap, (value, idx)) elif value > heap[0][0]: heapq.heappushpop(heap, (value, idx)) value, idx = heapq.heappop(heap) return idx
c470694aadffef32b514e7c18469da8b6e2b2a69
409,258
def count_true_positive(truth, recommend): """Count number of true positives from given sets of samples. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: int: Number of true positives. """ tp = 0 for r in recommend: if r in truth: tp += 1 return tp
6630d8a27ff401aa7d15ec81a23c3fc047c26a9e
274,235
import re def _hours_to_ints(col_name): """A macro to rename hourly demand columns.""" if re.match(r"^hour\d\d$", col_name): col_name = int(col_name[4:]) return col_name
1599617416d3e0408ce90c344470d7c5cf9e843a
400,351
def get_span_row_count(span): """ Gets the number of rows included in a span Parameters ---------- span : list of lists of int The [row, column] pairs that make up the span Returns ------- rows : int The number of rows included in the span Example ------- Consider this table:: +--------+-----+ | foo | bar | +--------+ | | spam | | +--------+ | | goblet | | +--------+-----+ :: >>> span = [[0, 1], [1, 1], [2, 1]] >>> print(get_span_row_count(span)) 3 """ rows = 1 first_row = span[0][0] for i in range(len(span)): if span[i][0] > first_row: rows += 1 first_row = span[i][0] return rows
e226e0f78bd6711a7ddbe9c749ed43d1d2bc476c
14,083
def _CopyFieldToProtocolBuffer(field, pb): """Copies field's contents to a document_pb.Field protocol buffer.""" pb.set_name(field.name.encode('utf-8')) field_value_pb = pb.mutable_value() if field.language: field_value_pb.set_language(field.language.encode('utf-8')) if field.value is not None: field._CopyValueToProtocolBuffer(field_value_pb) return pb
61406ff3d3e85d2f528888b5f7384bc4056e0da1
597,688
import mimetypes def get_content_type(filename): """ Use the python mimetypes to determine a mime type, or return application/octet-stream """ return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
ba52c9c0aa9e421e74dbfb0c19da21fff3bcad0b
106,542
def bbox2ogr_clipdst(bbox): """ convert "29.9997,59.7816,30.6396,60.1117" to "29.9997 59.7816 30.6396 60.1117" """ clipdst = '{x1} {y1} {x2} {y2}' clipdst = clipdst.format( x1 = bbox.split(',')[0], y1 = bbox.split(',')[1], x2 = bbox.split(',')[2], y2 = bbox.split(',')[3], ) return clipdst
f41af0056a990258c03a5007f09ac1dd99451596
272,799
def detrend_none(x, axis=None): """ Return x: no detrending. Parameters ---------- x : any object An object containing the data axis : int This parameter is ignored. It is included for compatibility with detrend_mean See Also -------- detrend_mean : Another detrend algorithm. detrend_linear : Another detrend algorithm. detrend : A wrapper around all the detrend algorithms. """ return x
73a99772443220314c7ce803fdbd815910e706d0
694,398
def dict_nested_get(dictionary_or_value, keys, default=None): """ Performs a dictionary.get(key, default) using the supplied list of keys assuming that each successive key is nested. For example, for a dictionary dictionary = { "key1": { "key2": 1 } }, use nested_get(dictionary, ["key1", "key2"]) to get the value of "key2". Args: dictionary_or_value: The dictionary to get the value from or the value itself from this recursive method. keys: The list of nested keys. default: The default value to return if no value exists. Default is None. Returns: The value of the nested key or the default if not found. """ if isinstance(dictionary_or_value, dict) and isinstance(keys, list) and (len(keys) > 1): key = keys.pop(0) return dict_nested_get(dictionary_or_value.get(key, default), keys, default) elif isinstance(dictionary_or_value, dict) and isinstance(keys, list) and (len(keys) == 1): return dictionary_or_value.get(keys[0], default) elif (dictionary_or_value is not None) and (not isinstance(dictionary_or_value, dict)): return dictionary_or_value else: return default
0faec6cb8111d25267ca757f11abc6f68621aec0
374,366
import json def get_http_error_response_json(error_title, error_key, error_value): """Returns a JSON object indicating an Http Error""" http_error_resp = {} http_error_resp["errors"] = [] http_error_resp["errors"].append( {"error": error_title, "error_values": [{error_key: error_value}]} ) return json.dumps(http_error_resp)
7e0b35e69c2961f21fe690179e7432e8f060982f
346,857
import math def log2(x): """Calculate the base-2 logatihm of x: log2(x)""" return math.log2(x)
ab3cc630622e9d5edc426952ba02ed69247db6f0
153,803
def remove_prefix(string, prefix): """ This function removes the prefix string of a string. If the prefix does not exist, the string is returned unchanged. See https://stackoverflow.com/a/16891418 Parameters ---------- string : string The string that the prefix of which will be taken out. prefix : string The prefix. Returns ------- base : string The string, without the prefix. """ try: # This behavior is available in Python 3.9+. As such, # we have the fall back. string.removeprefix(prefix) except Exception: # The fall back solution. if string.startswith(prefix): string = string[len(prefix):] else: string = string finally: # Return, all done, naming convention. base = string return base # The code should not reach here. raise core.error.BrokenLogicError return None
de742a6df7926f4e407edad577de070cf18d1d6e
380,019
def eeg_name_frequencies(freqs): """ Name frequencies according to standart classifications. Parameters ---------- freqs : list or numpy.array list of floats containing frequencies to classify. Returns ---------- freqs_names : list Named frequencies Example ---------- >>> import neurokit as nk >>> >>> nk.eeg_name_frequencies([0.5, 1.5, 3, 5, 7, 15]) Notes ---------- *Details* - Delta: 1-3Hz - Theta: 4-7Hz - Alpha1: 8-9Hz - Alpha2: 10-12Hz - Beta1: 13-17Hz - Beta2: 18-30Hz - Gamma1: 31-40Hz - Gamma2: 41-50Hz - Mu: 8-13Hz *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) References ------------ - None """ freqs = list(freqs) freqs_names = [] for freq in freqs: if freq < 1: freqs_names.append("UltraLow") elif freq <= 3: freqs_names.append("Delta") elif freq <= 7: freqs_names.append("Theta") elif freq <= 9: freqs_names.append("Alpha1/Mu") elif freq <= 12: freqs_names.append("Alpha2/Mu") elif freq <= 13: freqs_names.append("Beta1/Mu") elif freq <= 17: freqs_names.append("Beta1") elif freq <= 30: freqs_names.append("Beta2") elif freq <= 40: freqs_names.append("Gamma1") elif freq <= 50: freqs_names.append("Gamma2") else: freqs_names.append("UltraHigh") return(freqs_names)
f5f055c7855ad3cf1d2bcf2bb8cde1efb6888ad3
137,868
from typing import Dict from typing import Tuple def get_pair_stats(vocab: Dict[str, int]) -> Dict[Tuple[str, str], int]: """Get counts of pairs of consecutive symbols.""" pairs = {} for word, frequency in vocab.items(): symbols = word.split() # count occurrences of pairs for i in range(len(symbols) - 1): pair = (symbols[i], symbols[i + 1]) current_frequency = pairs.get(pair, 0) pairs[pair] = current_frequency + frequency return pairs
87300f91867f66f3c686ab22f064f92bd6a129d3
356,238
from typing import Counter def count_steps(input_str): """Count steps up/down ( '(' / ')' )""" counter = Counter(input_str) return counter['('] - counter[')']
829f3f245cbf7ca98038d5978ae624537f285fac
138,206
def removeMacColons(macAddress): """ Removes colon character from Mac Address """ return macAddress.replace(':', '')
babe078c4a2b91e7ee56be15f62e58baa4189b8d
646,320
def _GetCulpritInfo(analysis): """Returns a dict with information about the culprit git_hash. Args: analysis (MasterFlakeAnalysis): The master flake analysis the suspected flake build is associated with. Returns: A dict in the format: { 'commit_position': int, 'git_hash': str, 'url': str, } """ if analysis.culprit is None: return {} return { 'commit_position': analysis.culprit.commit_position, 'git_hash': analysis.culprit.revision, 'url': analysis.culprit.url, 'confidence': analysis.culprit.confidence, }
feb0d50465f8a3a32153fd5ccadcfb6268116719
519,113
def get_user_name(handle): """Returns the user name of the user executing the command, given 'handle'.""" return handle['user']
273c1b20db105daf17a4fb9ec37ed97bde67fe93
526,369
import ntpath def fileparts(n): """ p,n,e = fileparts(filename) fileparts(r'c:\blah\BLAH.jpg') returns ('c:\blah','BLAH','.jpg') Note that the '.' lives with the extension, and separators have been removed. """ p = ntpath.dirname(n) basename = ntpath.basename(n) n,e = ntpath.splitext(basename) return p,n,e
609aa0dfaa1cfb55520dcbf650e98ba2c4c07948
336,671
from typing import Union import multiprocessing import re def parse_num_processors(value: Union[str, int, float]): """Convert input value (parse if string) to number of processors. Args: value: an int, float or string; string value can be "X" or "MAX-X" Returns: An int of the number of processors to use Raises: Exception: Input value exceeds number of available processors Exception: Input value less than 1 processors """ max_processors = multiprocessing.cpu_count() if isinstance(value, str): result = value.upper() if result == "MAX": return max_processors if re.match("^[0-9]+$", value): return int(value) result = re.split(r"^MAX[\s]*-[\s]*", result) if len(result) == 2: return max(max_processors - int(result[1]), 1) raise Exception(f"Input value {value} is an int or string as 'MAX-X'") result = int(value) if result > max_processors: raise Exception(f"Input value {value} greater than available processors") if result < 1: raise Exception(f"Input value {value} less than 1 processors") return value
174f825c00946e80a64ce998508d34149e01bf14
659,772
def load_targets_file(input_file): """ Takes a string indicating a file name and reads the contents of the file. Returns a list containing each line of the file. Precondition: input_file should exist in the file system. """ with open(input_file, 'r') as f: f = f.readlines() out = [i.replace('\n','').replace('\r','') for i in f] return out
40d305e244264d6c3249bb9fb914cda3ebcda711
705,072
def BM3_EOS_energy (V, V0, E0, K0, Kp0): """Calculate the energy from a 3rd order BM EOS""" E = E0 + ((9.0*V0*K0)/16.0) * ( (((V0/V)**(2.0/3.0)-1.0)**3.0)*Kp0 + (((V0/V)**(2.0/3.0) - 1.0)**2.0 * (6.0-4.0*(V0/V)**(2.0/3.0)))) return E
b0a4371e9710a3be08541dae6f2d1650ada61d8b
245,908
from typing import OrderedDict def _translate_reverse_relationships(opts, reverse_relations): """ DRF's `_get_reverse_relationships` uses the `get_accessor_name` method of `ForeignObjectRel` as the key for the relationship. This function replaces those keys with the rel's `name` property. This allows us to later lookup the relationship with `opts.get_field()`. """ return OrderedDict([( relation.name, reverse_relations.pop(relation.get_accessor_name()) ) for relation in opts.related_objects])
56851cb19ec4410bff5d18f08b088957a04a8ae5
656,053
def parse_u32(byte_seq, index): """Parses a u32 value at the given index from the byte_seq.""" return (byte_seq[index] | (byte_seq[index + 1] << 8) | (byte_seq[index + 2] << 16) | (byte_seq[index + 3] << 24))
d8c29a2d255900c09b9b313b4d636cc40782be46
482,799
def flatten(l): """Flatten a nested list.""" return sum(map(flatten, l), []) \ if isinstance(l, list) or isinstance(l, tuple) else [l]
99fbeb4257d6f1d12569858b8f85445c6de3c0d9
174,946
def sectRect(rect1, rect2): """Return a boolean and a rectangle. If the input rectangles intersect, return True and the intersecting rectangle. Return False and (0, 0, 0, 0) if the input rectangles don't intersect. """ (xMin1, yMin1, xMax1, yMax1) = rect1 (xMin2, yMin2, xMax2, yMax2) = rect2 xMin, yMin, xMax, yMax = (max(xMin1, xMin2), max(yMin1, yMin2), min(xMax1, xMax2), min(yMax1, yMax2)) if xMin >= xMax or yMin >= yMax: return False, (0, 0, 0, 0) return True, (xMin, yMin, xMax, yMax)
4c9d5dd10c3aa0d30127776a92f70d7e2037b066
281,380
def cut_to_specific_word(dataframe, specific_word, part_in_bid): """ Takes in a dataframe and a column, and then creates a new dataframe containing only the rows from the original dataframe that had the search word in that column :params: a dataframe, a specific_word to look for, and a column name relating to a part in a case study in which the specific_word should be searched for :return: a dataframe containing only the case studies which had specific word in the column of interest """ # Cut dataframe down to only those rows with a word in the right column current_df = dataframe[dataframe[part_in_bid].str.contains(r'\b' + specific_word + r'\b', regex=True, na=False)] # Suppress the annoyingly pedantic SettingWithCopyWarning, # since we know explicitly we're dealing with a copy current_df.is_copy = False # Add a new col to indicate where the specific word was found new_col_name = specific_word + '_found_in_' + part_in_bid current_df[new_col_name] = part_in_bid # Drop all columns except the case study and the col showing where the word was found current_df = current_df[['Case Study Id', new_col_name]] return current_df
c8ee42088cb85c053b1a5a8b3877436a50df689d
653,543
def load_metaparameters(param_dict=None): """ Parameters for bayesian optimizer. Default dictionary listed and updated by param_dict. """ metaparams = {'architecture': 'svm', 'log_gamma': -3, 'log_C': -2} if param_dict: metaparams.update(param_dict) return metaparams
c07e2c24fdcb4cd553559129a4a6a2cfe8cff726
372,407
def get_consecutive_num(arr): """ Method to get indices of second number in a pair of consecutive numbers Note: solve_90f3ed37 uses this function """ rows = [] for i in range(len(arr) - 1): if (arr[i] + 1) == arr[i + 1]: rows.append(arr[i + 1]) return rows
24417126d4133db62519dd044f7e0b9498d1ad0f
676,356
def dictkeyclean(d): """Convert all keys of the dict `d` to strings. """ new_d = {} for k, v in d.items(): new_d[str(k)] = v return new_d
97db663ef2bd135ab96719cd05f0a55afdb57b99
263,469
def glue(stdin, delimiter=" "): """ Join every lines in the stream, using the given delimiter. The default delimiter is a space. >>> list([[[1], [2]], [[3], [4]], [[5], [6]]] | ... traverse() | map(str) | glue(" ")) ['1 2 3 4 5 6'] """ data = list(stdin) return iter([delimiter.join(data)])
9ea1b5caa226326ef26389b0ea1cc77eac64e1df
560,448
from typing import List from typing import Tuple def find_two_smallest_walk(L: List[float]) -> Tuple[int, int]: """Return a tuple of the indices of the two smallest values in list L. >>> items = [809, 834, 477, 478, 307, 122, 96, 102, 324, 476] >>> find_two_smallest_walk(items) (6, 7) >>> items == [809, 834, 477, 478, 307, 122, 96, 102, 324, 476] True >>> items = [96, 834, 477, 478, 307, 122, 96, 102, 324, 476] >>> find_two_smallest_edit(items) (0, 6) """ smallest = L[0] second_smallest = L[0] # Examine each value in the list in order for element in L: # Keep track of the indices of the two smallest values found so far # Update the indices when a new smaller value is found if element < smallest: second_smallest = smallest smallest = element elif element > smallest and element < second_smallest: second_smallest = element # Return the two indices return (L.index(smallest), L.index(second_smallest))
c00d018fd553fd15eb0063909023d2b56423c3b1
422,933
import random def generate_chunks(total, min_val, max_val, num_chunks=-1): """ Randomly generate a list of integers l such that sum(l) = total and for each x in l, min_val <= x <= max_val. If num_chunks > 0, it is guaranteed that the list contains exactly num_chunks elements. """ if num_chunks <= 0: chunks = [] while total > 0: next_chunk_size = random.randint(min(total, min_val), min(total, max_val)) if 0 < total - next_chunk_size < min_val: continue total -= next_chunk_size chunks.append(next_chunk_size) return chunks else: if total < num_chunks * min_val: raise ValueError('Total ({}) must be >= num_chunks * min_val ({}*{})'.format(total, num_chunks, min_val)) if total > num_chunks * max_val: raise ValueError('Total ({}) must be <= num_chunks * max_val ({}*{})'.format(total, num_chunks, max_val)) total -= num_chunks * min_val chunks = None while not chunks or any([x > max_val for x in chunks]): split_points = [0, total] for _ in range(num_chunks - 1): split_points.append(random.randint(0, total)) split_points.sort() chunks = [split_points[i + 1] - split_points[i] + min_val for i in range(len(split_points) - 1)] return chunks
a76c2f206801b339b203197249bc969cf91c7caa
115,584
import random def pick_best_and_random(pop, maximize=False): """ Here we select the best individual from a population and pair it with a random individual from a population :param pop: input population :param maximize: when true a higher fitness score is better, otherwise a lower score is considered better :return: a tuple with the best and a random individual """ evaluated_individuals = tuple(filter(lambda x: x.fitness is not None, pop)) if len(evaluated_individuals) > 0: mom = max(evaluated_individuals, key=lambda x: x.fitness if maximize else -x.fitness) else: mom = random.choice(pop) dad = random.choice(pop) return mom, dad
35c8ecb4f3966a06dd62b7fa34a3c5faf076805f
201,794
def range_splits(tensor, split_ranges, dim): """Splits the tensor according to chunks of split_ranges. Arguments: tensor (Tensor): tensor to split. split_ranges (list(tuples(int,int))): sizes of chunks (start,end). dim (int): dimension along which to split the tensor. """ return [tensor.narrow(int(dim), start, end - start) for start, end in split_ranges]
67de1dd67a49e953dfc259031d852649be0e6343
588,925
def getlabel(section): """ Converts all activity outcome strings in either 1, 0 or None for a list """ for i in range(len(section)): if section[i] == 'Active': section[i] = 1 elif section[i] == 'Inactive': section[i] = 0 else: section[i] = None return section
6b30fd141137690e9919503f91063db12eede8aa
316,515
def read_from_occ_translation_dict(occupation, tdict): """map an original occupation to HISCO code using the dictionary of unique occupations""" hisco = tdict[occupation] return hisco
8089d42143c2e7012ac31cdf1a5ee214347c7fe3
70,518
def read_file_data(filepath): # type: (str) -> list """ reads the database files and returns them as list. """ dblist = [] try: with open(filepath, 'r') as f: dblist = f.read().splitlines() except (IOError, TypeError) as e: print(e) return dblist
96716241fbcd64c03af22821504af7ce53ea7f28
169,333