content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def get_index_from_tensor_name(tensor_name): """ Get the index of the tensor of a certain op. Args: tensor_name (str): The tensor name Returns: int: The index of the tensor """ return int(tensor_name.split(':')[1])
dc429ca35a2e35e536298b4a355acbdc8076f41e
587,633
def merge_key(record): """ Sort records by all fields that records_match will use to check for duplicates, in sequence, so that all identical records according to records_match will be adjacent """ chr2 = record.info['CHR2'] if 'CHR2' in record.info else None end2 = record.info['END2'] if 'END2' in record.info else None return (record.pos, record.stop, record.info['SVTYPE'], record.info['SVLEN'], chr2, end2, record.info['STRANDS'], record.id)
4ad5ffca7bfa00b41e932280caa0d9a58773b6a1
281,254
def select_keys(d, keys): """ >>> d = {'foo': 52, 'bar': 12, 'baz': 98} >>> select_keys(d, ['foo']) {'foo': 52} """ return {k:d[k] for k in keys}
2bfb7e39328e89a7111a00b72818a0ceeb260916
533,103
def msb(n): """Given an integer >= 0, return the most significant bit position.""" assert n < 2**32 c = 0 while n > 0: n >>= 1 c += 1 return c
3f651aa1268af5614530897c01effd109761f589
435,413
import math def tp2xy(tp, r): """Converts TP angles into XY cartesian coordinates, where arm lengths associated with angles theta and phi are respectively r[1] and r[2]. INPUTS: tp ... [theta,phi], unit degrees r ... [central arm length, eccentric arm length] OUTPUT: xy ... [x,y] """ t = math.radians(tp[0]) t_plus_p = t + math.radians(tp[1]) x = r[0] * math.cos(t) + r[1] * math.cos(t_plus_p) y = r[0] * math.sin(t) + r[1] * math.sin(t_plus_p) return x, y
8bc27bad8d6916891f535bc1e290226f05d21bea
356,638
import re def convert_c_source_to_bytes(input_cc_file): """Converts C++ source file to bytes (immutable). Args: input_cc_file: A .cc file to process. Returns: A bytearray corresponding to the input cc file array. """ pattern = re.compile(r'(((0x[0-9a-fA-F]+),?)+)') model_bytearray = bytearray() with open(input_cc_file) as file_handle: for line in file_handle: values_match = pattern.search(line) if values_match is None: continue list_text = values_match.group(1) values_text = filter(None, list_text.split(',')) values = [int(x, base=16) for x in values_text] model_bytearray.extend(values) return bytes(model_bytearray)
f60832e365ea33a4b1dc4c2566ac675ed56aa131
334,794
def line_number(filename, string_to_match): """Helper function to return the line number of the first matched string.""" with open(filename, 'r') as f: for i, line in enumerate(f): if line.find(string_to_match) != -1: # Found our match. return i+1 raise Exception("Unable to find '%s' within file %s" % (string_to_match, filename))
8be5cae464da1b65d4d14b6dc2c78299be77650c
483,023
import hashlib def checksum(filename): """Get file checksum. Keyword arguments: filename -- input filename :param filename: str :return md5sum: str """ data = open(filename, "rb") md5sum = hashlib.md5() # Loop over chunks of the file so we don't read it all into memory for chunk in iter(lambda: data.read(4096), b""): # Update the checksum with each chunk md5sum.update(chunk) return md5sum.hexdigest()
845fc8709cfc3232987580d1900ecc11b2ce8473
525,777
import resource def memory_fitness(threshold=2e9, maximum=3e9): """ Returns a penalty for using too much memory. Add this to your fitness function. This measures the current processes maximum resident set (maxrss) which is the all time peak memory usage for the calling process. Argument threshold is where the this penalty begins. Argument maximum is where this penalty becomes an error (ValueError). Returns in the range [0, 1] where 0 is no penalty and 1 is the maximum memory usage. Linear ramp from threshold to maximum. """ rsc = resource.getrusage(resource.RUSAGE_SELF) size = rsc.ru_maxrss * 1024 fit = (size - threshold) / (maximum - threshold) if fit > 1 and False: # Don't raise an exception because this is measured after collecting all # of the data. If there was a way to raise a OS exception when it tried # to allocate the memory... raise ValueError("Individual exceded memory limit (size %d bytes, maximum %d)."%(size, maximum)) return max(0, fit)
136e87a625cbc667a2cbfefe1ad9fe85d6348d4f
281,060
def number_of_constituents(bc_class): """ Calculates the number of constituents Args: bc_class: The ADH simulation class that holds all simulation information Returns: The number of transport constituents """ num_trn = 0 cn = bc_class.constituent_properties if cn.salinity: num_trn += 1 if cn.temperature: num_trn += 1 if cn.vorticity: num_trn += 1 if not cn.general_constituents.empty: num_trn += len(cn.general_constituents.index) if not cn.sand.empty: num_trn += len(cn.sand.index) if not cn.clay.empty: num_trn += len(cn.clay.index) return num_trn
b290bc6ef6f4b02889dcc82d91120f44bff5f650
15,646
import collections def scale_cluster_zero(filenames, clustering, size_multiplier=4): """ Scales cluster zero to a multiple of the second largest class Parameters ---------- filenames : list of str list of strings containing file names of images clustering : list of int list of integers giving the cluster assignment of each image in filenames size_multiplier : int, default=4 Determines how to scale cluster 0. Cluster 0 will have, at maximum, as many images as size_multiplier times the size of the second largest cluster. Returns ------- scaled_filenames : list of str scaled_clustering : list of str """ scaled_filenames = [] scaled_clustering = [] cluster_sizes = collections.Counter(clustering) size_of_class_0_prior = cluster_sizes.most_common(2)[0][1] second_largest_cluster_size = cluster_sizes.most_common(2)[1][1] size_of_class_zero = size_multiplier * second_largest_cluster_size print("Reducing size of class 0 from {} to {} based on second largest cluster (size {})".format( size_of_class_0_prior, size_of_class_zero, second_largest_cluster_size )) admitted = 0 for fn, cluster in zip(filenames, clustering): if cluster == 0: if admitted >= size_of_class_zero: continue else: admitted += 1 scaled_filenames.append(fn) scaled_clustering.append(cluster) else: scaled_filenames.append(fn) scaled_clustering.append(cluster) return scaled_filenames, scaled_clustering
89e4a9b78e99ae4c0fb3a9d4147bc633979c48b7
152,773
def remove_repeated_asn(path): """ remove repeated ASN in the give path Args: path (list of ASN): ASN can be int for str if IXP hop Returns: list of ASN """ removed = [] for idx, hop in enumerate(path): if idx == 0: removed.append(hop) elif hop != path[idx-1]: removed.append(hop) return removed
3c9900a3c2cdb3236926a87e9404aa027a9afaa8
67,470
def parse_req(spec: str) -> str: """ Parse package name==version out of requirments file""" if ";" in spec: # remove restriction spec, _ = [x.strip() for x in spec.split(";", 1)] if "#" in spec: # remove comment spec = spec.strip().split("#")[0] if "\\" in spec: # remove line breaks spec = spec.strip().split("\\")[0] if "--hash=" in spec: # remove line breaks spec = spec.strip().split("--hash=")[0] return spec
2aa838a7e0182fa600c8500db4bd00a964d072bc
98,489
def remover_sp_str_load(list_parameters): """ Remove the sp and the brackets of the elements of the list """ i = 0 length = len(list_parameters) while i < length: list_parameters[i] = list_parameters[i].replace("[", "").replace("]", "").replace("sp", "") i += 1 list_parameters.remove("") if len(list_parameters) == 1: list_parameters.append("#0") return [list_parameters[0], list_parameters[1]]
98f090585395b4f105f262f93671d2b4c3dd3d34
289,205
def get_nonzero_either_mask(vector_a, vector_b): """Returns a numpy array of boolean values indicating where values in two vectors are both greater than zero. Parameters ---------- vector_a : numpy.ndarray Array of counts or RPKM vector_b : numpy.ndarray Array of counts or RPKM Returns ------- numpy.ndarray Boolean array that is `True` where both `vector_a` and `vector_b` have values greater than zero, and `False` elsewhere. """ return (vector_a > 0) & (vector_b > 0)
82f7433bcbcfcfc799b46083b112a9a7abcab918
30,642
def cal_support(freq, n_rows): """ Calculate support Parameters ---------- freq : int frequation of data. for example : 32 n_rows : int rows of data. for example : 100 Returns ------- support. for example : = 32 / 100 = 0.32 """ if n_rows == 0: raise ValueError("The rows supposed not to be zero") support = freq / n_rows return round(support, 3)
a767b3dab71e83418759abfe6243f28308b051e7
380,888
from datetime import datetime from dateutil import tz def datetime_to_isoformat(dt: datetime) -> str: """ Convert a datetime to and iso8601 date string. :param dt: datetime object :return: iso8601 date string """ if dt.tzinfo is None: dt = dt.replace(tzinfo=tz.tzutc()) return dt.isoformat()
54cada59974202e0a9d71ddfb5a04e8a69ab1816
380,398
import hashlib def sha256(text): """Returns SHA256 of the string `text`.""" return hashlib.sha256(text.encode("utf8")).hexdigest()
2609c5338e59bb34017898ca28e8e60966b7402f
601,631
def load_txt_file_as_string(path_txt_file: str) -> str: """This function loads a txt file and returns its content as a string. Args: path_txt_file (str): path to txt file Returns: content (str): content of txt file """ file = open(path_txt_file) content = file.read() file.close() return content
1de33e180fc10eca051f16dc6f03af1e6ec7193c
426,970
import functools def strip_string(original_func): """ Strip white space from a function's argument to float before the function operates on it, like: Examples -------- >>> @strip_string ... def kcmpnm_to_chan(kcmpnm): ... return kcmpnm[:6] >>> kclmpnm_to_chan(' my_too_long_component ') 'my_too' """ @functools.wraps(original_func) def converter(hdr): return original_func(int(hdr)) return converter
2509b7a3300a82a604246272d8a42702c0d921d2
519,510
from typing import Any from typing import Optional def get_nonempty_str(arg: Any) -> Optional[str]: """Either get the non-empty string from the given arg or None if it is not a non-empty string.""" if isinstance(arg, str): if len(arg) > 0: return str(arg) return None
7f9069059febcd5695166320d21937528424d8bc
385,445
def clean_consecutive_duplicates( move_data, subset=None, keep="first", inplace=False ): """ Removes consecutives duplicate rows of the Dataframe, optionally only certaind columns can be consider. Parameters ---------- move_data : dataframe The input trajectory data subset : Array of Strings, optional(None by default) Specifies Column label or sequence of labels, considered for identifying duplicates. By default all columns are used. keep : String. Optional(first by default) Determine wich duplicate will be removed. if keep is set as first, all the duplicates except for the first occurrence will be droped. Otherwise, all duplicates except for the last occurrence will be droped. inplace : boolean, optional(False by default) if set to true the original dataframe will be altered, the duplicates will be droped in place, otherwise a copy will be returned. Returns ------- move_data : dataframe or None The filtered trajectories points without consecutive duplicates. """ if keep == "first": n = 1 else: n = -1 if subset is None: filter_ = (move_data.shift(n) != move_data).any(axis=1) else: filter_ = (move_data[subset].shift(n) != move_data[subset]).any(axis=1) return move_data.drop(index=move_data[~filter_].index, inplace=inplace)
7f1d4258810c7c5bc117d2b5b2d098abffd4b613
101,414
def convert_acl_to_iam_policy(acl): """Converts the legacy ACL format to an IAM Policy proto.""" owners = acl.get('owners', []) readers = acl.get('readers', []) if acl.get('all_users_can_read', False): readers.append('allUsers') writers = acl.get('writers', []) bindings = [] if owners: bindings.append({'role': 'roles/owner', 'members': owners}) if readers: bindings.append({'role': 'roles/viewer', 'members': readers}) if writers: bindings.append({'role': 'roles/editor', 'members': writers}) return {'bindings': bindings}
990cdb6a51a696cf2b7825af94cf4265b2229be9
706,748
def _get_bit(byte, ii): """Return the bit value at index `ii` of `byte`. Bit index is 0 = MSB, 7 = LSB """ return (byte >> (7 - ii)) & 1
65de3c1f4e0fce19d05113b4cb13629b591c1f0d
606,149
def TrimBeginningAndEndingSlashes(path): """Trims beginning and ending slashes :Parameters: - `path`: str :Returns: str, path with beginning and ending slashes trimmed """ if path.startswith('/'): # Returns substring starting from index 1 to end of the string path = path[1:] if path.endswith('/'): # Returns substring starting from beginning to last but one char in the string path = path[:-1] return path
05bf230ca5123c0d0e6cc7b1eb96fe144c0a3b41
432,248
from typing import List from typing import Dict def to_dict(args: List[Dict]) -> Dict: """Convert a list of serialized key-value pairs into a dictionary that maps the keys to their values. Parameters ---------- args: list List of dictionary serializations for key-value pairs. Returns ------- dict """ return {a['key']: a['value'] for a in args}
41de2fd7c53b1dbd808c429b79ab22238ba66940
298,874
def rigidity_bending_plate(height, e_modulus, poisson): """ Calculates the bending rigidity of a plate. """ return e_modulus * (height ** 3) / (12 * (1 - poisson ** 2))
69ec71931742c30544ef96ea5be9cd515ae55292
419,467
import math def get_bearing(start_point, end_point): """ Calculates the bearing between two points. Parameters ---------- start_point: geopy.Point end_point: geopy.Point Returns ------- point: int Bearing in degrees between the start and end points. """ start_lat = math.radians(start_point.latitude) start_lng = math.radians(start_point.longitude) end_lat = math.radians(end_point.latitude) end_lng = math.radians(end_point.longitude) d_lng = end_lng - start_lng if abs(d_lng) > math.pi: if d_lng > 0.0: d_lng = -(2.0 * math.pi - d_lng) else: d_lng = (2.0 * math.pi + d_lng) tan_start = math.tan(start_lat / 2.0 + math.pi / 4.0) tan_end = math.tan(end_lat / 2.0 + math.pi / 4.0) d_phi = math.log(tan_end / tan_start) bearing = (math.degrees(math.atan2(d_lng, d_phi)) + 360.0) % 360.0 return bearing
186150a64f3448a9cd42e35851997d33c7d0758d
141,051
def _fmt_msg(msg): """Format the message for final display. Parameters ---------- msg : str The message to show to the user to provide additional context. returns ------- fmtd : str The formatted message to put into the error message. """ if not msg: return '' return msg + '\n'
896fca1d342d966fa31ed7966eda76471e2c8f7f
468,173
def mavlink_latlon(degrees): """Converts a MAVLink packet lat/lon degree format to decimal degrees.""" return float(degrees) / 1e7
20d883e45f99cca3c99eeb9d7c5fae96db03fd5a
13,436
def parse_events(props): """ Pull out the dashEvents from the Component props Parameters ---------- props: dict Dictionary with {propName: propMetadata} structure Returns ------- list List of Dash event strings """ if 'dashEvents' in props and props['dashEvents']['type']['name'] == 'enum': events = [v['value'] for v in props['dashEvents']['type']['value']] else: events = [] return events
f5cfb1b7feaa8833fecff47be2a3e9ba8128df54
628,797
def verifywin(item1, item2): """ -> Check who won. :param item1: machine :param item2: player :return: 'Loss' if item1 win or 'Win' if item2 win """ if item1 == 'tesoura' and item2 == 'papel': return 'Loss' elif item1 == 'papel' and item2 == 'pedra': return 'Loss' elif item1 == 'pedra' and item2 == 'tesoura': return 'Loss' elif item2 == 'tesoura' and item1 == 'papel': return 'Win' elif item2 == 'papel' and item1 == 'pedra': return 'Win' elif item2 == 'pedra' and item1 == 'tesoura': return 'Win' else: return 'Draw'
ba0e5119fde0c8f836f69e9ed67bbcb43f9e95f3
281,639
def extract_impression_id(line, assert_first_line=False): """ Extracts the impression_id from a line """ if type(line) == bytes: line = line.decode() return line[:line.index("|")].strip()
09f67f24e4e517c1ac66df5cc1fb8d7d359ad3c9
42,806
def scrape_col(soup, tag, extended = False): """Return list of column headers Args: soup (BeautifulSoup): Navigable BeautifulSoup obj tag (str): tag you are wishing to locate Returns: list: list of column headers """ col_labels = [] for col in soup.thead.find_all(tag): col_labels.append(col.text) if extended == True: return col_labels[1:] else: return ['Year'] + col_labels[1:]
8681d648c1db7b64b1c7d9bc9beef4e611bdd122
310,265
def find_conflicts(name): """ Return conflicts corresponding to input parameter name. Parameters --------- name : string Parameter name. Returns ------- conflicts : tuple Conflicting parameter names. """ # dict that defines input parameters that conflict with each other conflicts = [('h', 'H0'), ('T_cmb', 'Omega_g', 'omega_g', 'Omega0_g'), ('Omega_b', 'omega_b', 'Omega0_b'), # ('Omega_fld', 'Omega0_fld'), # ('Omega_Lambda', 'Omega0_Lambda'), ('N_ur', 'Omega_ur', 'omega_ur', 'Omega0_ur', 'N_eff'), ('Omega_cdm', 'omega_cdm', 'Omega0_cdm', 'Omega_c', 'omega_c', 'Omega_m', 'omega_m', 'Omega0_m'), ('m_ncdm', 'Omega_ncdm', 'omega_ncdm', 'Omega0_ncdm'), ('A_s', 'ln10^{10}A_s', 'sigma8'), ('tau_reio', 'z_reio')] for conf in conflicts: if name in conf: return conf return ()
966531cc2bd549c1ec2ee1af0895ea6c99f7ef39
444,688
def compare_changes(obj, **kwargs): """ Compare two dicts returning only keys that exist in the first dict and are different in the second one """ changes = {} for k, v in obj.items(): if k in kwargs: if v != kwargs[k]: changes[k] = kwargs[k] return changes
ad88dc60cc3c93d0da15531bf0ef11e7610b1d66
51,022
def _traverse(graph, node, visited=[]): """Returns list of all direct/indirect adjacent nodes for a given node""" nodes = [] if node in visited: return nodes visited.append(node) nodes.append(node) for adjacent in graph[node]: nodes.extend(_traverse(graph, adjacent)) return nodes
9c10a58c96e69627fe2a6e4bfcb1fb3c9e750781
586,410
def _aggregate(query, func, by=None): """ Wrap a query in an aggregation clause. Use this convenience function if the aggregation parameters are coming from user input so that they can be validated. Args: query (str): Query string to wrap. func (str): Aggregation function of choice. Valid choices are 'avg'/'mean', 'min', 'max', 'sum'. by (list of str): Optional list of variables by which to perform the aggregation. Returns: str: New query string. """ if func == "mean": func = "avg" if func not in ["avg", "min", "max", "sum"]: raise ValueError("Unsupported aggregation function %r" % func) query = "{func}({query})".format(func=func, query=query) if by: query += " by({by_variables})".format(by_variables=", ".join(by)) return query
e26aa715fadc5a58f5f87cee297fc3e6500120e1
12,568
def _format_params(cols, fields, where, crs, precision): """ Transform parameters into a query input for ESRIs feature service: The feature service allows users to query & edit feature geoms & attributes A breakdown of the ESRIs feature service: https://developers.arcgis.com/rest/services-reference/enterprise/feature-service.htm Parameters ---------- cols: str or list (default: 'all') str list of columns or attributes to include fields: list list of fields supported by the boundary where : str sql like statement to filter geometries and attributes crs : int (default: British National Grid) epsg codes to transform and extract geometries precision: int number of digits past the decimal point are to be used Returns ------- dict: dictionary containing query inputs for ESRIs feature service """ if isinstance(cols, str): cols = cols.lower() if cols == "all": cols = "*" if isinstance(cols, list): cols = [col.lower() for col in cols] if all(elem in fields for elem in cols) is not True: raise ValueError(f"Only {fields} are supported for geometry type") cols = ", ".join(cols) return { "outFields": f"{cols}", "where": f"{where}", "outSR": crs, "f": "geojson", "geometryPrecision": f"{precision}", }
4c71004e49d2c79630a2e2dbb38d53a651ce748c
199,725
def get_brief_description(description): """Get brief description from paragraphs of command description.""" if description: return description[0] else: return 'No description available.'
eb41a9dc6dd6145defb81f73f63b604312020f6d
688,845
def _get_tags() -> list[str]: """Return test tags. :return: Tag list. """ return ["Test Tag 1", "Test Tag 2"]
79f04b17da4e3df3c2a2572980bdc0ca18d4f796
49,704
def _create_titled_dataset(root, key, title, data, comp_kw=None): """Helper to create a titled dataset in h5py""" comp_kw = {} if comp_kw is None else comp_kw out = root.create_dataset(key, data=data, **comp_kw) out.attrs['TITLE'] = title return out
6820d866ad7873f9077e9dbebfdf0f1fff0f6adb
549,773
from typing import SupportsAbs from typing import List import math def permissive_range(start: float, stop: float, step: SupportsAbs[float] ) -> List[float]: """ Returns a range (as a list of values) with floating point steps. Always starts at start and moves toward stop, regardless of the sign of step. Args: start: The starting value of the range. stop: The end value of the range. step: Spacing between the values. """ signed_step = abs(step) * (1 if stop > start else -1) # take off a tiny bit for rounding errors step_count = math.ceil((stop - start) / signed_step - 1e-10) return [start + i * signed_step for i in range(step_count)]
fb42aec19281bee289b5af68a4d3f5938ae01260
549,282
def base36_encode(integer, width=None): """ Encodes integer as a string in base 36, prepending 0's until string is of length equal to width. Parameters ---------- integer : int width : int, optional Returns ------- str """ digit_set = "0123456789abcdefghijklmnopqrstuvwxyz" digits = [] while (integer != 0): integer, i = divmod(integer, 36) digits.append(digit_set[i]) if (width is not None): while (len(digits) < width): digits.append("0") digits.reverse() return "".join(digits)
1f6a54078807becfdfb354518ff507423a7ca7c5
331,150
def analyze_text(spacy_doc): """Lemmatize, get word frequencies and part-of-speech tags Parameters: spacy_doc (spacy nlp object): output from spacy_analyze Returns: dictionary: {'lemma': {'orig': list, 'pos': string, 'count': int}} """ worddict = {} for word in spacy_doc: key = word.lemma_.lower() # filter out non-alphabetic words if word.is_alpha: # store word counts and tokens if key in worddict: orig = worddict[key].get('orig') if key == word.text.lower(): worddict[key]['count'] += 1 elif orig: if word.text.lower() not in orig: worddict[key]['orig'].append(word.text.lower()) worddict[key]['count'] += 1 else: worddict[key]['count'] += 1 else: worddict[key]['orig'] = [word.text.lower()] worddict[key]['count'] += 1 elif key == word.text.lower(): worddict[key] = {'pos': word.pos_, 'count': 1} else: worddict[key] = { 'orig': [word.text.lower()], 'pos': word.pos_, 'count': 1 } return worddict
27d6a3065fcde845b5b72625555d0546df2fa321
403,755
def cubic_ease_in_ease_out(t, b, c, d): """ Accelerate until halfway, then decelerate. """ t /= d/2 if t < 1: return c / 2 * t * t * t + b t -= 2 return c/2 * (t * t * t + 2) + b
fdad310108a3a2962cba38ceddb7600af045f644
296,268
def FromProperty(prop): """Get a default value from a property. Args: prop: properties._Property, The property to fetch. Returns: A niladic function that fetches the property. """ def DefaultFunc(): return prop.Get(required=True) return DefaultFunc
5de5730077f92dfc477a968999e5a462446126fe
213,152
def itemReadPredicted(W, A, R, t): """Weight predicted at time t. Weight predicted by given weight vector W at time t given arrival and removal matrices A and R. Args: W: R^N weight vector where N is the number of ItemReads. A: R^{TxN} arrival matrix where A[t][i] is whether ItemRead i arrived at time t. R: R^{TxN} removal matrix where R[t][i] is whether ItemRead i was removed at time t. t: time. Returns: Predicted weight. """ return sum([w*(a-r) for (w, a, r) in zip(W, A[t], R[t]) if a-r and w!=None])
422f204df3ca10e10fa36adb5033c4221da60c57
374,742
def check_system_query_status(data): """Check if any server crashed. Args: data (dict): dictionary of system query data obtained from DmgCommand.system_query() Returns: bool: True if no server crashed, False otherwise. """ failed_states = ("Unknown", "Evicted", "Errored", "Unresponsive") failed_rank_list = [] # Check the state of each rank. for rank in data: rank_info = [ "{}: {}".format(key, data[rank][key]) for key in sorted(data[rank].keys()) ] print("Rank {} info:\n {}".format(rank, "\n ".join(rank_info))) if "state" in data[rank] and data[rank]["state"] in failed_states: failed_rank_list.append(rank) # Display the details of any failed ranks if failed_rank_list: for rank in failed_rank_list: print( "Rank {} failed with state '{}'".format( rank, data[rank]["state"])) # Return True if no ranks failed return not bool(failed_rank_list)
d72bce114d4692519b0834589b15689a9084d568
145,889
def get_child_parents(edges): """Puts each non-parent node together with its parents Parameters ---------- edges : list A list of tuples corresponding to the Bayesian network structure as described in the input file Returns ------- child_parents A dictionary with non-parent nodes as keys and their parents as values """ child_parents = {} for e in edges: if e[1] in child_parents.keys(): child_parents[e[1]].append(e[0]) else: child_parents[e[1]] = [e[0]] return child_parents
4b01a264ee1e2498c37f1fa0695f9430c207f04d
16,750
def get_name(in_file): """ :param in_file: Path to file to convert :return: The inferred sample name, defined by file name shorn of any file extensions """ return in_file.split('/')[-1].split('.')[0]
30cea5b772a230b9c635f6dfbb32362480be2422
76,975
def get_labels_for_ids(labels, ids, ids_are_one_indexed=False): """Get the human-readable labels for given ids. Args: labels: dict, string-ID to label mapping from ImageNet. ids: list of ints, IDs to return labels for. ids_are_one_indexed: whether to increment passed IDs by 1 to account for the background category. See ArgParser `--ids_are_one_indexed` for details. Returns: list of category labels """ return [labels[str(x + int(ids_are_one_indexed))] for x in ids]
bc39fe8e7ccaac9ba2abc8a5a2e2fa0a779c82bf
43,282
def convert_mpas_fgco2(mpas_fgco2): """Convert native MPAS CO2 flux (mmol m-3 m s-1) to (molC m-2 yr-1) Args: mpas_fgco2 (xarray object): Dataset or DataArray containing native MPAS-O CO2 flux output. Returns: conv_fgco2 (xarray object): MPAS-O CO2 flux in mol/m2/yr. """ # The -1 term ensures that negative is uptake of CO2 by the ocean. # MPAS defaults to gregorian noleap calendar (thus, 365). conv_fgco2 = mpas_fgco2 * -1 * (60 * 60 * 24 * 365) * (1 / 10 ** 3) return conv_fgco2
37b685b6697344d3120abb8a5a55f5e2b44c83f2
195,191
def analyze_segmentation(seg_result): """ Parameters ---------- seg_result: the result of the segmentation is a list of tuples each tuple contains: * label in 'speech', 'music', 'noEnergy' * start time of the segment * end time of the segment Returns ------- count_speech: integer representing the number of segments labeled as 'speech' """ count_speech = 0 for segment in seg_result: if segment[0] == 'speech': count_speech += 1 return count_speech
1f560eafa1a39ab05fbd3ef741128110e9ab61de
498,872
from typing import Callable def preprocess_inceptionv1() -> Callable: """Returns preprocessing function to prepare input for original Tensorflow's InceptionV1 model. InceptionV1 takes in values from [-117, 138] so the preprocessing function takes in values from 0-1 and maps them to [-117, 138] See https://github.com/tensorflow/lucid/blob/master/lucid/modelzoo/other_models/InceptionV1.py#L56 for details. Thanks to ProGamerGov for this! :return: Preprocessing function :rtype: Callable """ return lambda x: x * 255 - 117
d0b97de736b4ec1428e28c8b77e68c906ca12f66
262,661
def _replace_tempate_text(template, replacements): """ replace substrings in a given template based on a dictionary of replacements. """ output = template for src, target in replacements.items(): output = output.replace(src, target) return output
2daed848095495412090cef297d0ab34125cc749
459,410
def clean_sheet_value(value): """ Takes a spreadsheet cell value and returns a cleaned version Args: value (str): A raw spreadsheet cell value Returns: str or None: A string with whitespace stripped, or None if the resulting value was an empty string """ stripped = value.strip() return None if stripped == "" else stripped
ff6b2da6f0ce3d718e71b43794e0a224e7f7b072
646,464
def valid_phage_titer(titer, o): """ Decides wether or not a phage titer is valid and, if not decides why. Args: titer(float): Titer to decide on. o (Options): Options object for lookup Returns(int): -1 if the phage titer was lower than mincp at some transfer point 0 if the phage titer was valid the whole time 1 if the phage titer was higher than maxcp at some transfer point """ gen = 1 while o.min_cp < titer[(gen * (o.tsteps + 1)) - 1] < o.max_cp: if gen == o.epochs: return gen, 0 gen += 1 if o.min_cp > titer[(gen * (o.tsteps + 1)) - 1]: tendency = -1 else: tendency = 1 return gen, tendency
d995917d5e0abcee4e10bc8209f8c3fee4ad2e2b
70,286
def span2toks(span, document): """ :param span: span array representing [start, end] :param document: list of tokens from which to extract span tokens :return: tokens from document indicated by span indices """ return document[span[0]:span[1] + 1]
a46570c5275ec9c987e999beabc967e284b62b1a
648,918
import copy def stringify_values(dictionary): """ Given a dictionary convert all values into the string representation of the value. useful for dicts that only allow string values (like os.environ) :param dictionary: the dictionary to convert :return: a copy of the dictionary where all values are now string. :rtype: dict """ dict_copy = copy.deepcopy(dictionary) for key, value in dict_copy.items(): if isinstance(value, dict): dict_copy[key] = stringify_values(value) else: dict_copy[key] = str(value) return dict_copy
c7a231c1bb127f673ab58a8ea63146ea7568add0
598,703
from typing import List from typing import Set def get_elements(li: List[int]) -> Set: """Extracts all the unique elements from a list of numbers. doctests: >>> get_elements([1, 2, 3, 1, 2, 3, 5, 4, 7, 7]) {1, 2, 3, 4, 5, 7} >>> get_elements([2, 2, 2, 3, 1]) {1, 2, 3} """ x = set() for i in li: if i in x: pass else: x.add(i) return x
f9514ed32d553649e79a952ac1c8c5e68db5cf94
524,093
def _mult_diag_matrix(D, mtx, on_right=False): """ Multiply diagonal matrix D to mtx Args: D (N ndarray) - diagonal matrix mtx (ndarray) - matrix to multiply on_right (bool) - whether to return D * mtx (False) or mtx * D (True) """ if not on_right: return (D*mtx.T).T else: return D*mtx
f2d30e8cbb2ef7b842d3daa922185bc3758a5094
630,430
def restore_scale(expr, mean, std): """ Makes each gene j have mean_j and std_j :param expr: matrix of gene expressions. Shape=(nb_samples, nb_genes) :param mean: vector of gene means. Shape=(nb_genes,) :param std: vector of gene stds. Shape=(nb_genes,) :return: Rescaled gene expressions """ return expr * std + mean
58175da2d8b3543d1eac6e70f04d91f2cb10f542
505,305
def validate_doubles(password): """ It contains at least one letter that appears twice in a row """ for i in range(len(password) - 1): if password[i] == password[i + 1]: return True return False
a7c955449060b363aee63a1ff6f52036294485c8
559,864
def _filter_kwargs(prefix, kwargs): """Return new kwargs matching a prefix, or empty dict if no matches.""" tmp_kwargs = {} for kwarg, value in kwargs.items(): if prefix in kwarg: kwarg = kwarg.lstrip(prefix) kwarg = kwarg.lstrip('_') tmp_kwargs[kwarg] = value return tmp_kwargs
5ff205dd7f8e0d7eed69cb244090fcf6d391dae6
173,807
def make_sequential(documents, answers): """ Transform an answer-based dataset (i.e. with a list of documents and a list of keyphrases) to a sequential, ner-like dataset, i.e. where the answer set for each document is composed by the lists of the documents' tokens marked as non-keyphrase (0), beginning of keyphrase (1) and inside-keyphrase (2). For example, for the tokens "I am a python developer since today." If the keyphrases are "python developer" and "today"" the answer set for these tokens is "[0 0 0 1 2 0 1]" :param documents: the list of documents :param answers: the list of keyphrases :return: the new answer set """ seq_answers = {} for key, document in documents.items(): doc_answers_set = answers[key] # Sort by length of the keyphrase. We process shorter KPs first so # that if they are contained by a longer KP we'll simply overwrite # the shorter one with the longer one doc_answers_set.sort(key=lambda a: len(a)) # This field will contain the answer. # We initialize it as a list of zeros and we will fill it # with 1s and 2s later doc_answers_seq = [0] * len(document) for answer in doc_answers_set: # Find where the first word the KP appears appearances = [i for i, word in enumerate(document) if word == answer[0]] for idx in appearances: is_kp = True # Check if the KP matches also from its second word on for i in range(1, len(answer)): if (i + idx) < len(document): is_kp = answer[i] == document[i + idx] else: # We reached the end of the document is_kp = False # If we found an actual KP, mark the tokens in the output list. if is_kp: doc_answers_seq[idx] = 1 for i in range(1, len(answer)): doc_answers_seq[idx + i] = 2 # for # for seq_answers[key] = doc_answers_seq return seq_answers
6e7f2f502f1389cdc68e644128befd1568feecf1
267,780
def find_stem(stratified_string: str): """ find the stem of the compartment name as the text leading up to the first occurrence of the joining string should run slightly faster than using find_name_components """ return stratified_string.split("X")[0]
867d01d2b8650ed6943bb507eba3977004dd31a4
52,209
import string def remove_punctuation(input_string): """ Function to remove punctuation once we know it is a question Function taken from A3 Inputs: string Outputs: string """ out_string = '' for i in input_string: if i not in string.punctuation: out_string += i return out_string
733125e98483d992726617fd296eefb885c89055
202,239
def build_html_page(title, body): """ Build the simple tag skeleton for a title and body. """ return """<html> <head> <title>%s</title> <link rel = "stylesheet" type = "text/css" href = "jsdoc.css" /> </head> <body> %s </body> </html>""" % (title, body)
c7884ce84405d8d3df2350060c366cffff3e47fe
489,722
def country_code(xml_el, namespaces): """Find the country code for this element""" if xml_el.get("class"): return xml_el.get("class").split()[-1] elif xml_el.find("svg:path", namespaces) is not None: return country_code(xml_el.find("svg:path", namespaces), namespaces) else: return country_code(xml_el.find("svg:g", namespaces), namespaces)
3239389a381aa64dd5b8ad1ba66d6a88fb90292e
224,202
def pop(lines): """ Pop from the top of list, this has the (desired) side-effect of removing the first line from the list. :param lines: the list of extracted text fields, one per line :return: the next line """ return lines.pop(0)
fc08660c0ffe34b55beb220db5b73b02e6291611
337,470
from operator import add def avg(S1,S2): """ Average of 2 scalars <div class=jython> AVG (S1, S2) = ( S1 + S2 ) / 2 </div> """ return add(S1,S2)/2
2d912e2a1e77147f821c4500ab5e7543e44aedb8
220,422
def join_regex(regexes): """Combine a list of regexes into one that matches any of them.""" if len(regexes) > 1: return "(" + ")|(".join(regexes) + ")" elif regexes: return regexes[0] else: return ""
616a4b51a64244e14b6029a91026f65a20077f06
693,578
def m_p_s_to_km_p_hr(m_p_s): """Convert to km/hr.""" return m_p_s * 3.6
478e3931dcab3301079b0fbc692124c3c3c93557
669,651
def get_user_info(account_name, account_key): """Returns an Azure Blob Storage User with .display_name and .id, or None """ if not (account_name and account_key): return None return {'display_name': account_name, 'id': account_name}
3711c97a97a0c61b75ef8d6396207d06c3f81465
154,201
def axis_for_letter(letter): """Returns 0, 1 or 2 for 'K', 'J', or 'I'; as required for axis arguments in FineCoarse methods.""" assert isinstance(letter, str) and len(letter) == 1 u = letter.upper() return 'KJI'.index(u)
32184d42cd5ebef7cedf79767a173633a7d32672
136,634
def pretty_solution(solution): """ Purpose: Modify the solution to that it is represented as a mostly legible string. Input: Solution as a list of boolean values. Return: Solution represented as a string """ pretty = "" ith_literal = 1 ten_per_line = 0 for literal in solution: pretty = pretty + "L" + str(ith_literal) + ": " + str(literal) + " " ith_literal = ith_literal + 1 ten_per_line = ten_per_line + 1 if ten_per_line > 10: ten_per_line = 0 pretty = pretty + "\n" return pretty
00ce094781120171e42fcbb0b82ba817ace557d8
548,259
def convert_title_to_snake_case(key): """Converts title-cased key to snake-cased key.""" return '_'.join(w.lower() for w in key.split(' '))
c6b7181e7769883d0cf8a54d3ce9f933c29c308b
351,919
from typing import List import torch from typing import Tuple def scale_coords( img_size: List[int], coords: torch.Tensor, img0_size: Tuple[int, int] ) -> torch.Tensor: """Rescales bounding box coordinates (x1, y1, x2, y2) from `img_size` to `img0_size`. Args: img_size (List[int]): Model input size (w x h). coords (torch.Tensor): Bounding box coordinates. img0_size (Tuple[int, int]): Size of original video frame (h x w). Returns: (torch.Tensor): Bounding boxes with resized coordinates. """ # gain = old / new gain = min(float(img_size[0]) / img0_size[1], float(img_size[1]) / img0_size[0]) pad_x = (img_size[0] - img0_size[1] * gain) / 2 # width padding pad_y = (img_size[1] - img0_size[0] * gain) / 2 # height padding coords[:, [0, 2]] -= pad_x coords[:, [1, 3]] -= pad_y coords[:, :4] /= gain coords[:, :4] = torch.clamp(coords[:, :4], min=0) return coords
8d2a1925874aedbe02f23c77e7e58131626ee88e
333,188
import six def trim(docstring): """ Remove the tabs to spaces, and remove the extra spaces / tabs that are in front of the text in docstrings. Implementation taken from http://www.python.org/dev/peps/pep-0257/ """ if not docstring: return '' # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: lines = six.u(docstring).expandtabs().splitlines() lines = [line.strip() for line in lines] res = six.u('\n').join(lines) return res
ae1eaf3b2a81082291382e161a88d46c661e399a
230,261
def check_submission(sub): """ Checks that the submission meets all the requirements. 1. No more than 22 Boxes per frame. 2. Only one label prediction per video/frame 3. No duplicate boxes per frame. 4. Boxes must be within video area: - `top` and `left` must each be >= 0 - The sum of `left` and `width` must be <= 1280 - The sum of `top` and `height` must be must be <= 720 Args: sub : submission dataframe. Returns: True -> Passed the tests False -> Failed the test """ # Maximum of 22 boxes per frame. max_box_per_frame = sub.groupby(["video_frame"])["label"].count().max() if max_box_per_frame > 22: print("Has more than 22 boxes in a single frame") return False # Only one label allowed per frame. has_duplicate_labels = sub[["video_frame", "label"]].duplicated().any() if has_duplicate_labels: print("Has duplicate labels") return False # Check for unique boxes has_duplicate_boxes = sub[["video_frame", "left", "width", "top", "height"]].duplicated().any() if has_duplicate_boxes: print("Has duplicate boxes") return False if sub['left'].min() < 0: print('left column has values less than 0') return False if sub['top'].min() < 0: print('top column has values less than 0') return False if (sub['left'] + sub['width']).max() > 1280: print('left+width columns has values greater than 1280') return False if (sub['top'] + sub['height']).max() > 720: print('top+height columns has values greater than 720') return False return True
28ee721b0da5abfb13183d26cabf5687eeb94f5b
590,263
def str_to_c_string(string): """Converts a Python bytes to a C++ string literal. >>> str_to_c_string(b'abc\x8c') '"abc\\\\x8c"' """ return repr(string).replace("'", '"').removeprefix('b')
5316e61282d3ce3a807764588904529291453a37
18,222
def search_node_from_coord(x, y, nodes): """Get node's number from coordinates""" return int(nodes.loc[(nodes['x'] == x) & (nodes['y'] == y)]['n'])
e61bb8351bbad871254a29fc0586da3a0fda0b24
653,213
def create_filepath(ds, root_path="."): """ Generate a filepath when given an xarray dataset """ varname = [x for x in ds.data_vars][0] start = ds.generalVerticalLayer.data[0] end = ds.generalVerticalLayer.data[-1] filepath = f"{root_path}/{varname}_{start}_{end}.nc" return filepath
17afd9907c6758b792fafdc0d61c1e41a707be1a
615,750
def cross(A, B): """ Cross product of elements in A and elements in B. :param A: :param B: :return: """ return [s + t for s in A for t in B]
1601cd69410a7cc6f6b847dc7796b2e330f21e72
554,741
def compute_ks_for_conv2d(w_in: int, w_out: int, padding: int=1) -> int: """Compute the kernel size to use with conv2d when we want the output tensor has smaller spatial dimensions, ie. w_out < w_in. We assume the filter has stride=1. Computation is based on the formula w_out = floor(w_in - k + 2p) + 1 We get only positive integer for k only if: w_out - w_in < 2p-1 """ assert w_out - w_in < 2*padding-1, "No valid kernel size is possible" c = w_out - w_in - 2*padding -1 return -c
277781ab8b98cf402ad7083126c4c9ec21201868
89,308
import requests def is_private(username): """ Check if user is private :param username: :return: bool """ r = requests.get(f"https://www.instagram.com/{username}/?__a=1") private_status = r.json()['graphql']['user']['is_private'] return private_status
93265f746a021f8d6afd2f5d64c7c7ee9e9a3de7
73,354
def which_db_version(cursor): """ Return version of DB schema as string. Return '5', if iOS 5. Return '6', if iOS 6 or iOS 7. """ query = "select count(*) from sqlite_master where name = 'handle'" cursor.execute(query) count = cursor.fetchone()[0] if count == 1: db_version = '6' else: db_version = '5' return db_version
07b1dbcea3fb4bf65bba5c578257440d39b6784c
1,400
import torch def voxel_grid_coords(grid_shape): """ Find normalized voxel coords corresponding to grid cells These computations are based on pytorch3d.ops.cubify https://pytorch3d.readthedocs.io/en/stable/_modules/pytorch3d/ops/cubify.html#cubify Inputs: - grid_shape: 3D integer array like containing (D, W, H) Returns: - FloatTensor of shape (D, W, H, 3) xyz (HWD) normalized coordinates for each cell """ assert(len(grid_shape) == 3) zyx_grid = torch.meshgrid([torch.arange(grid_shape[i]) for i in range(3)]) zyx_grid = torch.stack(zyx_grid, dim=0) grid_shape_expanded = torch.tensor(grid_shape) \ .view(3, 1, 1, 1).expand(-1, *grid_shape) \ .float() # map to [-1, 1]^3 range from [0, N-1)^3 zyx_grid = zyx_grid * 2.0 / (grid_shape_expanded - 1.0) - 1.0 zz, yy, xx = torch.unbind(zyx_grid, dim=0) points = torch.stack([xx, yy, zz], dim=-1) return points
446149a3d6290ab774c137eca77d7e1f5c3525c2
285,613
import math def evaluate_almost_equal(metrics, results, abs_tol=None, rel_tol=None): """ Evaluate for each given metric if values in results are almost equal Parameters ---------- metrics: List[str], metrics names results: dict, results to be evaluated abs_tol: float, absolute error tolerance rel_tol: float, relative difference tolerance Returns ------- bool, return True if all metrics in results are almost equal """ # return False if empty if len(metrics) == 0: return False eval_summary = {} for i, metric in enumerate(metrics): v_eval = [res[i] for res in results.values()] first_v = v_eval[0] if abs_tol is not None and rel_tol is not None: eval_summary[metric] = all(math.isclose(v, first_v, abs_tol=abs_tol, rel_tol=rel_tol) for v in v_eval) elif abs_tol is not None: eval_summary[metric] = all(math.isclose(v, first_v, abs_tol=abs_tol) for v in v_eval) elif rel_tol is not None: eval_summary[metric] = all(math.isclose(v, first_v, rel_tol=rel_tol) for v in v_eval) else: eval_summary[metric] = all(math.isclose(v, first_v) for v in v_eval) return eval_summary
9d6745d9e6f15d68285cb227f830a83bccf6e2f0
587,765
def filter_pre_post_table( combi_table, focus_countries, reg_var="coordinator_country", focus_var="project_lq", volume_var="project_total", ): """Filters an activity table to focus on particular variables and countries""" combined_table_focus = combi_table.loc[ combi_table[reg_var].isin(focus_countries) ].query(f"variable=='{focus_var}'") size_lookup = combi_table.query(f"variable=='{volume_var}'")[ [reg_var, "cluster_covid", "value"] ].rename(columns={"value": "volume"}) combined_table_focus = combined_table_focus.merge( size_lookup, on=[reg_var, "cluster_covid"] ) return combined_table_focus
b8f128e4ed15902fc5ba8794bfc1f86c6d178fc7
587,229
import socket def uds_reachable(uds_path, return_sock=False): """ Check if the unix domain socket at path `uds_path` is reachable. Parameters ---------- uds_path : str return_sock: bool, optional (default is False) Return the socket. Returns ------- bool, socket If the socket is reachable, the socket if `return_sock` else None Remember to close the socket! """ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: sock.connect(uds_path) except (ConnectionRefusedError, FileNotFoundError): return False, None finally: if not return_sock: sock.shutdown(socket.SHUT_RDWR) sock.close() return True, sock if return_sock else None
4c79a301dc4dfea5a3741325fe5abfc373d4c5e1
648,193
def get_initial_readme_info(readme_path): """ Get the initial lines of a readme, ignoring the list of contributions generated previously :param readme_path: the path of the readme :return: the initial lines of a the readme as string separated by '\n' """ with open(readme_path) as f: base_readme = '' lines = f.read().splitlines() for l in lines: line = l.strip() if (line == '## List of contributions'): break base_readme += line + '\n' return base_readme + '## List of contributions'
214bc8cd11134a96941528a093a1208acc4bb92a
349,596
def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n'))
ece6243dda8128c2640f14c938e54123b899eebc
518,234
def break_string_sequence_to_words(seq): """ Breaks a sequence containing multi-word strings into a set containing individual words. :param seq: The sequence containing multi-word strings :return: A set containing all the individual words >>> break_string_sequence_to_words(['hello world', 'foo bar', 'hello', 'red']) \ == set({'hello', 'world', 'foo', 'bar', 'red'}) True """ return {word for string in seq for word in string.split()}
52046b3b81a1e8864a4fd238a1df31fcf681b284
104,724
def validate_ip(ip: str) -> bool: """ Validate an IP :param ip: String to check if it's an IP. :type ip: str :return: True if ip param it's an IP, false otherwise. :rtype: bool """ a = ip.rsplit(":", 1)[0].split(".") if len(a) != 4: return False for x in a: if not x.isdigit(): return False i = int(x) if i < 0 or i > 255: return False return True
05083d2e53e4c951489c60a888d1322eb3204638
495,493
def _make_name(*args, sep="_"): """ Combine elements of `args` into a new string """ _args = (arg for arg in args if arg != "") return sep.join(_args)
8786015997dc7656cd2957080088ab4fe76f3e9f
147,851
def initialized(machine): """ Check to see if the given machine is initialized. :param machine: Machine to check to see if default attributes are set :return: `True` if the machine has its attributes set, `False` otherwise """ return all(hasattr(machine, attr) for attr in ('state', 'transition', 'transition_event'))
a625827f7c53102a98521042a050e682df69b13e
664,995
def remove_duplicates(mylist): """ Removes duplicate values from a list """ return list(set(mylist))
2a99ce44825e69e4108ae8d69d34243fef2c97da
414,285
def get_public_endpoint_url_by_name(body_response, endpoint_name, region_name): """ Get the public endpoint for service in a region by service NAME :param body_response: Keystone response (/token) :param endpoint_name: Service name :param region_name: Name of the region :return: Public URL or None if not found """ service_list = body_response['access']['serviceCatalog'] public_url = None for service in service_list: if service['name'] == endpoint_name: for endpoint in service['endpoints']: if endpoint['region'] == region_name: public_url = endpoint['publicURL'] return public_url
5fb7dcb91132b8d6dfabc9b5b9ec2b0f66e18fc7
570,202