content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
from typing import OrderedDict def read_config(config_file_path): """ Function read a SU2 configuration file Function 'read_config' return a dictionary of the data found in the SU2 configuration file. Args: config_file_path (str): SU2 configuration file path Returns: data_dict (dict): Dictionary of the data from the SU2 configuration file """ data_dict = OrderedDict() with open(config_file_path, 'r') as f: for line in f: if line.startswith('%') or '=' not in line: continue key, value = line.split('=') if '(' in value: new_value = value.split('(')[1].split(')')[0] value_list = new_value.split(',') strip_value_list = [item.strip() for item in value_list] data_dict[key.strip()] = strip_value_list else: data_dict[key.strip()] = value.strip() return data_dict
312e423faf60f4947cf4b57d75c5df50304c0501
414,549
def build_endpoint_rule_name(endpoint, method): """Build policy rule name for endpoint.""" return ('%(endpoint)s:%(method)s' % {'endpoint': endpoint, 'method': method.lower()})
6b564dacdc70804544c3f1ce3f5a1fbe33c390a2
582,549
def to_bytes(str): """Convert a text string to a byte string""" return str.encode('utf-8')
78457eb656a8a2ad82c993f19765663dfdd0bdf3
138,365
def filter_sessions_by_room(sessions, room): """only keep sessions within the room <room> Args: sessions: list of tuples room: str Returns: filtered sessions as list of tuples """ return [ [session[0].split(":")[0], session[1], session[2]] for session in sessions if session[0].endswith(room) ]
2d53460ac7e4786652885009f5b6ccc212d21c5b
305,809
def find_ngrams(input_list: list, n: int) -> list: """ Return a list of n_grams from a list Args: input_list (list): list from which n_grams are extracted n (int): n gram length Returns: list: n-gram list """ if n == 1: return input_list else: return list(zip(*[input_list[i:] for i in range(n)]))
851475b6ae19daed9c2b94fcf84d58822ccd96d2
517,567
def filter_out_dict_keys(adict, deny): """Return a similar dict, but not containing the explicitly denied keys Arguments: adict (dict): Simple python dict data struct deny (list): Explicits denied keys """ return {k: v for k, v in adict.items() if k not in deny}
a377c35f4eaedf0ed3b85eeaedbd45f7af0e8ec7
19,580
def _bulk_identity_factory(key_columns): """Return a function that accepts a dict mapping and returns its identity corresponding its key values mapped by `key_columns`. """ return lambda mapping: tuple(mapping.get(col.key) for col in key_columns)
29d60f29f95eb5e7f2ef3ecf28e2657ac0974924
617,609
def get_navigation(argv): """ return with_navigation flag from arg list """ if len(argv) > 3: return bool(argv[3]) return False
734a9ee44cc0864089f14482c7f3080336be6fe3
235,885
def generate_neighbours(image): """ Generate all neighbours of all pixels in the image :param image: :return: """ neighbour_dict = dict() for x in range(image.shape[0]): for y in range(image.shape[1]): neighbour_dict[str((x,y))] = list(filter(lambda pos: 0 <= pos[0] < image.shape[0] and 0 <= pos[1] < image.shape[1], [(x-1, y), (x+1, y), (x, y-1), (x, y+1)])) return neighbour_dict
e484a62624f0775ac90beac344bda48303f3a486
568,347
import yaml def get_yaml_from_topology_string(topology): """Transform a topology string into a dictionary of the same format used in the infrared provision.yml file. :param topology: Topology string in the format node: amount :type topology: str :returns: yaml representation of a provision.yml file :rtype: str """ provision = {'provision': {'topology': {}}} if not topology: return yaml.dump(provision) topology_dict = {} for component in topology.split(","): name, amount = component.split(":") topology_dict[name+".yml"] = int(amount) provision['provision']['topology']['nodes'] = topology_dict return yaml.dump(provision)
2a8313af9fd97c463e34c73b7aea2c87c5d05c1c
347,513
def readFPs(filepath): """Reads a list of fingerprints from a file""" try: myfile = open(filepath, "r") except: raise IOError("file does not exist:", filepath) else: fps = [] for line in myfile: if line[0] != "#": # ignore comments line = line.rstrip().split() fps.append(line[0]) return fps
96d483360c411a27a3b570875f61344ef4dae573
706,883
def PyLong_FromSsize_t(space, val): """Return a new PyLongObject object from a C Py_ssize_t, or NULL on failure. """ return space.newlong(val)
ff5eecf8335d8cd2c96c8b996cab110075aaabd8
506,406
def factors(target): """Returns the factors of target""" _factors = [] i = 1 while i <= target : if target % i == 0: _factors.append(i) i+=1 return _factors
fc0d27f011238ddba12a5c286949a326c80aa5cb
312,449
def lightness_glasser1958(Y, **kwargs): """ Returns the *Lightness* :math:`L^*` of given *luminance* :math:`Y` using *Glasser et al. (1958)* method. Parameters ---------- Y : numeric *luminance* :math:`Y`. \*\*kwargs : \*\*, optional Unused parameter provided for signature compatibility with other *Lightness* computation objects. Returns ------- numeric *Lightness* :math:`L^*`. Notes ----- - Input *luminance* :math:`Y` is in domain [0, 100]. - Output *Lightness* :math:`L^*` is in domain [0, 100]. References ---------- .. [1] http://en.wikipedia.org/wiki/Lightness (Last accessed 13 April 2014) Examples -------- >>> lightness_glasser1958(10.08) # doctest: +ELLIPSIS 36.2505626... """ L_star = 25.29 * (Y ** (1 / 3)) - 18.38 return L_star
f1af67296158c0b6c7589f887df51f17ac700daf
644,751
import base64 def Base64EncodeProto(msg): """ Encodes the proto as a base64 string which is safe to pass around on command line. """ encoded_string = base64.b64encode(msg.SerializeToString()) return encoded_string
a22ebaa0d1e0020e8d0fcf10c9728232083a5949
416,762
def calculate_color( vertex_id: int, current_color: int, maxima: bool, messages: list, superstep: int ) -> dict: """UDF that calculates the color of the nodes. Based on Local Maxima First algorithm :param vertex_id: The id of the node/vertex :param current_color: The current color of the vertex :param maxima: Flag that indicates if the node is maxima :param messages: List that contains the neighbor ids that send message :param superstep: The superstep number :return: Dictionary that contains the new color and the status of maxima """ # Return if you have already been colored! if current_color != -1: return {"id": id, "new_color": current_color, "new_maxima": True} # Local Maxima first interal procedure for message in messages: if vertex_id < message: maxima = False break return ( {"id": id, "new_color": superstep, "new_maxima": maxima} if maxima else {"id": id, "new_color": current_color, "new_maxima": True} )
6e2cd053309a05f8fc6aa489248cb4ce30d418b5
215,769
def get_bse(da, da_peak_times): """ Takes an xarray DataArray containing veg_index values and calculates the vegetation value base (bse) for each timeseries per-pixel. The base is calculated as the mean value of two minimum values; the min of the slope to the left of peak of season, and the min of the slope to the right of the peak of season. Users must provide an existing peak of season (pos) data array, which can either be the max of the timeseries, or the middle of season (mos) values. Parameters ---------- da: xarray DataArray A two-dimensional or multi-dimensional DataArray containing an array of veg_index values. da_peak_times: xarray DataArray An xarray DataArray type with an x and y dimension (no time). Each pixel must be the time (day of year) value calculated at either at peak of season (pos) or middle of season (mos) prior. Returns ------- da_bse_values : xarray DataArray An xarray DataArray type with an x and y dimension (no time). Each pixel is the base (bse) veg_index value detected at across the timeseries at each pixel. """ # notify user print('Beginning calculation of base (bse) values (times not possible).') # get vos values (min val in each pixel timeseries) print('> Calculating base (bse) values.') # split timeseries into left and right slopes via provided peak/middle values slope_l = da.where(da['time.dayofyear'] <= da_peak_times).min('time') slope_r = da.where(da['time.dayofyear'] >= da_peak_times).min('time') # get per pixel mean of both left and right slope min values da_bse_values = (slope_l + slope_r) / 2 # convert type da_bse_values = da_bse_values.astype('float32') # rename da_bse_values = da_bse_values.rename('bse_values') # notify user print('> Success!\n') return da_bse_values
3edaf6156bd9fdae15c3bf845eb3deb293489cfb
703,210
def get_class_name(cls): """Get the class full path name.""" return "{}.{}".format(cls.__module__, cls.__name__)
6dfdf76a71928507bc4cd0e19a881b0d34bee908
129,763
from typing import Union from typing import Sequence def format_as_iter(vals: Union[int, Sequence[int]], iters_per_epoch: int, time_scale: str): """Format data to be at iteration time scale. If values are negative, they correspond to the opposite time scale. For example if `time_scale="epoch"` and `val=-1`, `val` corresponds to 1 iteration. Args: vals (`int(s)`): Values to format. iters_per_epoch (int): Number of iterations per epoch. time_scale (str): Time scale of current values. Returns: int: Time (positive int) formatted in iteration format """ assert time_scale in ["epoch", "iter"] single_value = not isinstance(vals, Sequence) if single_value: vals = [vals] epoch_convention = (time_scale == "epoch" and all(x >= 0 for x in vals)) or ( time_scale == "iter" and all(x <= 0 for x in vals) ) if epoch_convention: vals = type(vals)([iters_per_epoch * abs(x) for x in vals]) else: vals = type(vals)([abs(x) for x in vals]) if single_value: return vals[0] else: return vals
f9971ad6227b497834e4667f101a56c49b71507e
34,216
def special_load_rules(branch): """ Detect if the branch requires special conversion rules. """ try: return "__data_type__" in branch except TypeError: return False
3c5737fca3538df947f9d85c616ee5750b4edc0f
583,485
def velocity(Q, S): """ :param Q: Volume flow for thermodynamic conditions in pipe, m3/s :param S: Сross-sectional area of a pipe, m2 :return: Velocity of the natural gas, m/s """ return Q/S
92f362601623ed264df2af2892d467285284689e
280,819
from typing import Any def is_hashable(obj: Any) -> bool: """Determine if the given object is hashable. Example: >>> from collectionish.utils import is_hashable >>> >>> is_hashable([1, 2, 3]) False >>> is_hashable('boop') True """ try: hash(obj) return True except TypeError: return False
a69d1c045c033fcd4d989f0359e47867eb17833a
229,751
from typing import Dict def get_all_sites(api) -> Dict[str, str]: """Returns all the StackExchange sites as a dict, api_site_parameter -> name""" # hacky way to get everything.. res = api.fetch('sites') return {s['api_site_parameter']: s['name'] for s in res['items']}
e5de50fe9e8752e412834c59fbe791c051cb594a
174,513
def remove_duplicate_values(array_like, tol=0.0): """ Removes duplicate values from list (when tol=0.0) or remove approximately duplicate values if tol!=0.0. """ unique_values = [array_like[0]] for element in array_like: element_is_duplicate = False for uval in unique_values: if abs(uval - element) <= tol: element_is_duplicate = True if not element_is_duplicate: unique_values.append(element) return unique_values
afdad5db2aa00858aa9bcd29e1b64b744b2fb963
21,932
def is_string(value) -> bool: """ Is the value a string """ return isinstance(value, str)
e10e2c05d277313832500220929391d9cda1e84a
313,358
from datetime import datetime def unixtime_to_isotime(unixtime, microseconds): """ Converts a unix timestamp to a readable string timestamp :param unixtime: :param microseconds: If True microseconds will be visible :return: """ return datetime.fromtimestamp(unixtime).strftime('%Y-%m-%dT%H:%M:%S' + microseconds * '.%f')
35a157fe503f1903a3a900a2ccc7b9f16ad5f478
282,887
from typing import List def pattern_index(text: str, pattern: str) -> List[int]: """Return the indeces (1-based) of a pattern within text Arguments: text {str} -- text to search for pattern pattern {str} -- pattern to search for in text Returns: int -- index location of pattern in text Example: >>> pattern_index("ACAACTATGCATACTATCGGGAACTATCCT", "TAT") [6, 15, 25] """ indeces = [] pattern_size = len(pattern) for i in range(len(text) - pattern_size + 1): if text[i:i+pattern_size] == pattern: indeces.append(i+1) return indeces
60c7204818b00f301b07d1e4a9ffcd62173b6e3f
281,178
from typing import Tuple from typing import Union from typing import Collection from typing import Any def insert_into_tuple(tup: Tuple, pos: Union[int, Collection[int]], val: Any) -> Tuple[int, ...]: """ insert_into_tuple(tup: Tuple, pos: Union[int, Collection[int]], val: Any) -> Tuple[int, ...]: Insert values into a tuple. Parameters ---------- tup the tuple into which values are to be inserted pos The positions into which values are to be inserted val The values corresponding to the positions in `pos` Returns ------- A copy of `tup` with values inserted. Raises ------ ValueError If length of `pos` is not equal to length of `val` Examples -------- >>> tup = (0, 1, 2, 3) >>> pos = (5, 1) >>> val = (9, 8) >>> insert_into_tuple(tup, pos, val) (0, 8, 1, 2, 3, 9) >>> insert_into_tuple(tup, (), ()) (0, 1, 2, 3) """ tl = list(tup) if isinstance(pos, int): tl.insert(pos, val) else: if len(pos) != len(val): raise ValueError("pos and val must be of the same length") if len(pos) == 0: return tup # sort pos so from low to high; sort val correspondingly stl = list(zip(*sorted(zip(pos, val)))) for p, v in zip(stl[0], stl[1]): tl.insert(p, v) return tuple(tl)
d536101bc20ceb6bc240c6e1cb7d30843827e251
271,634
def replace(line_val, seq): """ Replace special characters :param line_val: line as string :param seq: sequence number :return: changed value """ # replace TAB to ',' rtn = line_val.replace('\t', ',') # remove line break rtn = rtn.replace('\r\n', '') rtn = rtn.replace('\n', '') rtn = rtn.replace('NULL', '0') # verify if there is a 'NULL' or 'ZZ' row _tmp_lst = rtn.split(',') for _tmp in _tmp_lst: _tmp = _tmp.strip().upper() if 'ZZ' == _tmp: rtn = None break if rtn is not None: _tmp_lst.append(str(seq)) rtn = ','.join(_tmp_lst) rtn = rtn + '\n' return rtn
100e2bc08f58e1ef9dc81db88a1df569309bb2ec
317,090
def linear_skew(phenotypes, selection_gradient): """Transforms the phenotype data to match the given selection gradient and stores them as gpm.data.phenotypes. Parameters ---------- selection_gradient : float, int. (default=1) The selection gradient defines the slope of the phenotype-fitness function. Returns ------- norm_fitness : list. List of normalized fitnesses """ ### USE NUMPY SYNTAX TO SPEED UP. INPUT SHOULD BE NUMPY ARRAY ANYWAY ### b = 1/selection_gradient - 1 fitness = [ph_ + b for ph_ in phenotypes] max_fit = max(fitness) # Normalize to 1 norm_fitness = [fit / max_fit for fit in fitness] return norm_fitness
d3bb72e9b3acbf7501f3d31b98631bf67e36e4b1
601,121
def documentedEvenOdd(num): """ Returns the string "even", "odd", or "UNKNOWN". This would be a more verbose description of what this function might do and could drone one for quite a while Args: num (int): The number to be tested as even or odd Returns: str: "even" if the number is even, "odd" if the number is odd or "UNKNOWN" Examples: evenOdd(32) evenOdd(13) """ if num % 2 == 0: return "even" elif num % 2 == 1: return "odd" else: return "UNKNOWN"
b995693ccf65079b12c7eef4be59b0c13b198a36
428,710
import logging def _checker(value_list, source, data, flag): """ Checks if values in the list are in data (Syntax warnings or errors). :param list value_list: List of values to check :param str source: Name of source that's being checked :param dict data: Data being checked :param str flag: What to do if value not found ("warnings" | "errors") :return: Number of hits (warnings/errors) :rtype: int """ num_hits = 0 for value in value_list: if value not in data: if flag == "warnings": logging.warning("Missing %s in %s", value, source) elif flag == "errors": logging.error("Missing %s in %s", value, source) else: logging.error("Invalid flag for _checker: %s", flag) num_hits += 1 if num_hits > 0: logging.info("Total number of %s in %s: %d", flag, source, num_hits) return num_hits
99498f0127b074e12e19348c54fa16356be0835b
330,734
def RGBtoHSV(r, g, b): """Convert RGB values to HSV Algorithm is taken from https://www.cs.rit.edu/~ncs/color/t_convert.html """ # If we were given black, return black if r == 0 and g == 0 and b == 0: return 0,0,0 # We need RGB from 0-1, not 0-255 r /= 255 g /= 255 b /= 255 min_rgb = min(r, g, b) max_rgb = max(r, g, b) delta = max_rgb - min_rgb v = max_rgb s = delta / max_rgb if delta == 0: return 0,s,v elif r == max_rgb: # Between yellow and magenta h = (g - b) / delta elif g == max_rgb: # Between cyan and yellow h = 2 + (b - r) / delta else: # Between magenta and cyan h = 4 + (r - g) / delta h *= 60 # Degrees if h < 0: h += 360 return h,s,v
412f27fb2d72208a4cd56e6ec53053684fc7cf12
474,666
def contains(dictionary, key): """Check if key exists in dictionary.""" return key in dictionary
23262a9e96ccc283a7357464790c44779e80313e
210,615
def GetGuestPolicyRelativePath(parent, guest_policy): """Return the relative path of an osconfig guest policy.""" return '/'.join([parent, 'guestPolicies', guest_policy])
20a3ce383921230b276d70fc44021496ab7fa959
582,997
def to_time_sec(time_str): """ Tries to convert `time_str` to a valid time non negative floating point number. Args: time_str (string): String representing non negative float - time in seconds. Returns: float: Time as non negative floating point number. Raises: ValueError: If `time_str` could not be converted to float or is less than 0. """ try: time_float = float(time_str) except ValueError: raise ValueError("argument must represent a floating point number") if time_float < 0: raise ValueError("argument must be a non-negative floating point number") return time_float
651af52101efea040fc7cf831547572a9f0474fd
481,849
from pathlib import Path from typing import List def remove_unused_files(directory: Path, module_name: str, need_to_remove_cli: bool) -> None: """Remove unused files. Args: directory: path to the project directory module_name: project module name need_to_remove_cli: flag for removing CLI related files """ files_to_delete: List[Path] = [] def _cli_specific_files() -> List[Path]: return [directory / module_name / "__main__.py"] if need_to_remove_cli: files_to_delete.extend(_cli_specific_files()) for path in files_to_delete: path.unlink()
1ce15a23a06a20b7f8be6fc583699289c7a76251
607,148
def is_post(request): """Return ``True`` if the method of the request is ``POST``. """ return request.method.upper() == 'POST'
a9fc74a866157c54916a1d00c0a0fd5eb890e3a1
162,939
from typing import List def split_badge_list(badges: str, separator: str) -> List[str]: """ Splits a string of badges into a list, removing all empty badges. """ if badges is None: return [] return [badge for badge in badges.split(separator) if badge]
6dc53d45cc8390422e5a511e39475ae969bf37c9
11,516
from boto.s3.connection import S3Connection from boto.exception import NoAuthHandlerFound def S3ConnectionWithAnon(access, secret, anon=True): """ Connect to S3 with automatic handling for anonymous access Parameters ---------- access : str AWS access key secret : str AWS secret access key anon : boolean, optional, default = True Whether to make an anonymous connection if credentials fail to authenticate """ try: conn = S3Connection(aws_access_key_id=access, aws_secret_access_key=secret) return conn except NoAuthHandlerFound: if anon: conn = S3Connection(anon=True) return conn else: raise
3e640cfae9d580fe58d7f44588e2ffe7de9eb269
609,576
def get_classes(y) -> int: """ Get the total number of classes. Args: y : The labels list of the data. Returns: int: Number of total classes """ return int(y.shape[1])
f1f518cca3e5f7e43f2f93513be17283bcb0f443
79,881
import string def apply_object_attributes_to_template(template, value_object): """Generate a string from the template by applying values from the given object. If the template provided is not a template (not have any placeholders), this will not have any effect and template will be returned unchanged. If value_object is None, this will not have any effect. Arguments: template -- A string that may or may not be templated. If templated, the placeholders will be populated with values from the attributes of the value_object. value_object -- Any object that supports the __dict__ method. Most classes have a __dict__ method that return a mapping of all the attributes and the associated values. Returns: string -- This will be the template, with the values from value_object applied. """ # Parse the template and extract the field names. # We'll use the field names to explicitly look-up attributes in the value_object. # The reason for this is that it works for @property attributes as well as normal attributes. field_names = [field_name for _, field_name, _, _ in string.Formatter().parse(template) if field_name is not None] template_values = {} for field_name in field_names: try: template_values[field_name] = getattr(value_object, field_name) except AttributeError as e: raise AttributeError(('Unable to apply object to template. Could not look-up attribute \'{}\' in the ' 'object \'{}\'. Error: {}').format(field_name, str(value_object), str(e))) return template.format(**template_values)
266085845a4e4283d9ffa897b4b0b7d2f8669001
13,019
def didGen64(vk64u, method="dad"): """ didGen accepts a url-file safe base64 key in the form of a string and returns a DID. :param vk64u: base64 url-file safe verifier/public key from EdDSA (Ed25519) key :param method: W3C did method string. Defaults to "dad". :return: W3C DID string """ if vk64u is None: return None return "did:{0}:{1}".format(method, vk64u)
260a9f997050399b0ec1e75033835031840e68cc
565,501
def space_row(left, right, filler=' ', total_width=-1): """space the data in a row with optional filling Arguments --------- left : str, to be aligned left right : str, to be aligned right filler : str, default ' '. must be of length 1 total_width : int, width of line. if negative number is specified, then that number of spaces is used between the left and right text Returns ------- str """ left = str(left) right = str(right) filler = str(filler)[:1] if total_width < 0: spacing = - total_width else: spacing = total_width - len(left) - len(right) return left + filler * spacing + right
64c96ca83ab4c5fceec63c5aa4743ce398f9a48d
121,158
import time def get_timestamp(timestamp_format="%Y%m%d_%H%M%S", utc_time=False): """ Get a string representing the current UTC time in format: YYYYMMDD_HHMMSS The format can be changed if needed. """ if utc_time: time_tuple = time.gmtime() else: time_tuple = time.localtime() return time.strftime(timestamp_format, time_tuple)
39e3122a71dafbe1b70f82e35f8036fb90d8859b
432,814
def safe_divide(x: float, y: float) -> float: """This returns zero if the denominator is zero """ if y == 0.0: return 0.0 return x / y
dd438cbba16f739157b21a7bdd0e8b34cf7d0c71
576,274
from typing import Mapping def is_mappy(x): """ Return True if `x` is "mappy", i.e. a map-like object. "Mappy" is defined as any instance of `collections.Mapping`: >>> is_mappy({'a': 'b'}) True >>> from collections import defaultdict >>> is_mappy(defaultdict(list)) True >>> is_mappy('a regular string') False >>> is_mappy(['a', 'b']) False >>> is_listy(iter({'a': 'b'})) False Note: Iterables and generators fail the "mappy" test. Args: x (any value): The object to test. Returns: bool: True if `x` is "mappy", False otherwise. """ return isinstance(x, Mapping)
1feccf81fe737a64ff9df6e3c74df09a4839c898
257,225
def _andparam(params): """ string join parameter list with & """ return "&".join(params)
ca7cedf4dd1169dd83f83a97f707f2e43eeaa8f7
64,408
def evaluate_final(restore, classifier, eval_sets, batch_size): """ Function to get percentage accuracy of the model, evaluated on a set of chosen datasets. restore: a function to restore a stored checkpoint classifier: the model's classfier, it should return genres, logit values, and cost for a given minibatch of the evaluation dataset eval_set: the chosen evaluation set, for eg. the dev-set batch_size: the size of minibatches. """ restore(best=True) percentages = [] length_results = [] for eval_set in eval_sets: bylength_prem = {} bylength_hyp = {} genres, hypotheses, cost = classifier(eval_set) correct = 0 cost = cost / batch_size full_batch = int(len(eval_set) / batch_size) * batch_size for i in range(full_batch): hypothesis = hypotheses[i] length_1 = len(eval_set[i]['sentence1'].split()) length_2 = len(eval_set[i]['sentence2'].split()) if length_1 not in bylength_prem.keys(): bylength_prem[length_1] = [0,0] if length_2 not in bylength_hyp.keys(): bylength_hyp[length_2] = [0,0] bylength_prem[length_1][1] += 1 bylength_hyp[length_2][1] += 1 if hypothesis == eval_set[i]['label']: correct += 1 bylength_prem[length_1][0] += 1 bylength_hyp[length_2][0] += 1 percentages.append(correct / float(len(eval_set))) length_results.append((bylength_prem, bylength_hyp)) return percentages, length_results
288d2f3780c65dd068a819a1b9ade4b6075c4f9c
100,872
def cqp(c): """Complex quadratic polynomial, function used for Mandelbrot fractal""" return lambda z: z**2 + c
e1209088e3dee290b2623a44498a55df023c57b6
380,643
def configure(simulator, co_simulation, nb_neurons=10000): """ configure NEST before the simulation modify example of https://simulator.simulator.readthedocs.io/en/stable/_downloads/482ad6e1da8dc084323e0a9fe6b2c7d1/brunel_alpha_simulator.py :param simulator: nest simulator :param co_simulation: boolean for checking if the co-simulation is active or not :param nb_neurons: number of neurons :return: """ # create the neurons and the devices neuron_params = {"C_m": 250.0, "tau_m": 20.0, "tau_syn_ex": 0.5, "tau_syn_in": 0.5, "t_ref": 2.0, "E_L": 0.0, "V_reset": 0.0, "V_m": 0.0, "V_th": 20.0} nodes_ex = simulator.Create("iaf_psc_alpha", nb_neurons, params=neuron_params) nodes_in = simulator.Create("iaf_psc_alpha", 25, params=neuron_params) noise = simulator.Create("poisson_generator", params={"rate": 8894.503857360944}) espikes = simulator.Create("spike_recorder") ispikes = simulator.Create("spike_recorder") espikes.set(label="brunel-py-ex", record_to="ascii") ispikes.set(label="brunel-py-in", record_to="ascii") # create the connection simulator.CopyModel("static_synapse", "excitatory", {"weight": 20.68015524367846, "delay": 1.5}) simulator.CopyModel("static_synapse", "inhibitory", {"weight": -103.4007762183923, "delay": 1.5}) simulator.Connect(noise, nodes_ex, syn_spec="excitatory") simulator.Connect(noise, nodes_in, syn_spec="excitatory") simulator.Connect(nodes_ex[:50], espikes, syn_spec="excitatory") simulator.Connect(nodes_in[:25], ispikes, syn_spec="excitatory") conn_params_ex = {'rule': 'fixed_indegree', 'indegree': 10} conn_params_in = {'rule': 'fixed_indegree', 'indegree': 2} simulator.Connect(nodes_ex, nodes_ex + nodes_in, conn_params_ex, "excitatory") simulator.Connect(nodes_in, nodes_ex + nodes_in, conn_params_in, "inhibitory") # Cosimulation devices if co_simulation: input_to_simulator = simulator.Create("spike_generator", nb_neurons, params={'stimulus_source': 'mpi', 'label': '/../transformation/spike_generator'}) output_from_simulator = simulator.Create("spike_recorder", params={"record_to": "mpi", 'label': '/../transformation/spike_detector'}) simulator.Connect(input_to_simulator, nodes_ex, {'rule': 'one_to_one'}, {"weight": 20.68015524367846, "delay": 0.1}) simulator.Connect(nodes_ex, output_from_simulator, {'rule': 'all_to_all'}, {"weight": 1.0, "delay": 0.1}) return espikes, input_to_simulator, output_from_simulator else: return espikes, None, None
09f701fccd1ba36500bd5f4a8198f47edd1b0a00
575,590
def change_to_id(obj): """Change key named 'uuid' to 'id' Zun returns objects with a field called 'uuid' many of Horizons directives however expect objects to have a field called 'id'. """ obj['id'] = obj.pop('uuid') return obj
aad9c05e5359d6ca5788413d02454f22e2d7d3be
259,400
def create_lexicon(word_tags): """ Create a lexicon in the right format for nltk.CFG.fromString() from a list with tuples with words and their tag. """ # dictionary to filter the double tags word_dict = {} for word, tag in word_tags: if tag not in word_dict: word_dict[tag] = {word} else: word_dict[tag].add(word) # PRO is the tag for 's, but the 's is not removed on nouns. word_dict['NN'] = [x.replace('\'s', '') for x in word_dict['NN']] word_dict['JJ'] = [x.replace('\'s', '') for x in word_dict['JJ']] del word_dict[','] word_dict['PRP'].update(word_dict['PRP$']) del word_dict['PRP$'] word_dict['POS'] = ['"s'] # convert the dictionary to the right NLTK format lexicon = '' for key, val in word_dict.items(): lexicon += key + ' -> ' # add ' ' around every word val = [f'\'{v}\'' for v in val] # the words are seperated by a pipe lexicon += ' | '.join(val) + '\n' return lexicon
3a91671d559f5924ec9326520db6e11a1672fee4
707,263
def get_max_val(_dict: dict): """ Returns the largest value of the dict of format {int: List[int]}""" return max(item for val in _dict.values() for item in val)
f317b8a6533a6a65fd524fdc3ae0a61dfd554b3a
286,367
def rect2math(r): """Convert rectangles to mathematical coordinates.""" return (r.x0,r.y0,r.x1,r.y1)
9eefb2dc1c5a6e783ead82cc9ba88e361a498ab4
601,785
def get_mask_from_lengths(memory, memory_lengths): """Get mask tensor from list of length Args: memory: (batch, max_time, dim) memory_lengths: array like """ mask = memory.data.new(memory.size(0), memory.size(1)).byte().zero_() for idx, l in enumerate(memory_lengths): mask[idx][:l] = 1 return ~mask
129feda0dabdc5d3b264e82b4fb417723baba31c
676,154
def get_feature_channels_per_level(base_feature_channels, max_feature_channels, num_levels, decoder_min_channels=None): """Get the number of output channels for each of the encoder and decoder blocks. Args: base_feature_channels (int): number of the output channels of the first block in the encoder part. Number of the output channels of the encoder blocks is given by the geometric progression: `base_feature_channels` ** `k` (`k`: level of the encoder block). max_feature_channels (int): max output channels of the encoder/decoder blocks. num_levels (int): number of the levels (= blocks) in the encoder/decoder part. decoder_min_channels (int, optional): min output channels of the decoder blocks. Defaults to None. Returns: tuple[list[int]]: encoder output channels and decoder output channels. """ # output channels of the encoder blocks encoder_feature_channels = [min(max_feature_channels, base_feature_channels * (2**i)) for i in range(num_levels)] # output channels of the decoder blocks decoder_feature_channels = list(reversed(encoder_feature_channels))[1:] if decoder_min_channels is not None: for i in range(len(decoder_feature_channels)): decoder_feature_channels[i] = max(decoder_min_channels, decoder_feature_channels[i]) return encoder_feature_channels, decoder_feature_channels
f4c216d419b7006510bb19493a3be4a49484f9dc
246,867
def _get_segmentation_strategy(segmentation): """Get the baci string for a geometry pair strategy.""" if segmentation: return 'segmentation' else: return 'gauss_point_projection_without_boundary_segmentation'
267a10a7237b2d42f1988ad9e35eddc7febbe789
645,748
def _parse_input(obj, arg): """ Returns obj[arg] if arg is string, otherwise returns arg. """ return obj[arg] if isinstance(arg, str) else arg
e6b0b61d8a910b5ab1e60c6c0ad3f73a374647a5
465,666
def score_survey(star_list,field_list,overlap_bonus): """ Given the stars you want and a set of fields, scores the stars, sums it up. Parameters ---------- star_list : list list of Star objects field_list : list list of Field objects overlap_bonus Returns ------- score : float Sum of the scores of all of the stars """ score = 0.0 for star in star_list: star.reset() star.count_fields_star_in(field_list) score += star.score_star(overlap_bonus = overlap_bonus) return score
1f258ceb59048e9130dd2fa341fe6fd67dd69421
159,838
import re def parse_sequence_idx_from_pattern(patt): """Extracts the (first) numeric sequence index from the given pattern. Args: patt: a pattern like "/path/to/frames/frame-%05d.jpg" Returns: the numeric sequence string like "%05d", or None if no sequence was found """ m = re.search("%[0-9]*d", patt) return m.group() if m else None
576e9ed2f4c6769008f91bb4f2b8f18f547cf690
259,249
def comma_separated_int(s): """ Used as the 'dtype' argument to TextFileParser.add_field(), to parse integer fields which appear in text files with comma separators. """ s = s.replace(',','') return int(s)
1eecd58efadf780d37915b733b0b613147dc425f
621,942
import pathlib def try_relative(path): """Try making `path` relative to current directory, otherwise make it an absolute path""" try: here = pathlib.Path.cwd() return pathlib.Path(path).relative_to(here) except ValueError: return pathlib.Path(path).resolve()
ab40fa091253f599af1bd20695ab80b6d8580ef8
544,627
import re import socket def get_hostname() -> str: """ Get the stripped down version of the host name to be used for sudmodel_dir, ckpt names, scoping. Names had '.' and '-' in them which I want for scoping/file suffix. keeps only letters, numbers, _. """ return re.sub('\W+', '', socket.gethostname())
d83acd728afcf8a23a5102fef1978a4d8f598448
433,495
def number_normalization(value, fromvalue, tovalue): """数値を範囲内の値に正規化する。 value: 正規化対象の数値。 fromvalue: 範囲の最小値。 tovalue: 範囲の最大値+1。 """ if 0 == tovalue: return value if tovalue <= value or value < fromvalue: value -= (value // tovalue) * tovalue if value < fromvalue: value += tovalue return value
912c515991246204ebc4d5eae8ffedb1c6d5823b
10,891
import pprint def fmt_repr(obj): """ Return pretty printed string representation of an object. """ items = {k: v for k, v in list(obj.__dict__.items())} return "<%s: {%s}>" % (obj.__class__.__name__, pprint.pformat(items, width=1))
c2b32f393263f3e85f517e816414bd7136a804a5
181,584
def string_vector(v): """Convert [3] array to string""" return '[%5.3f,%5.3f,%5.3f]' % (v[0], v[1], v[2])
8a10262fe5c7e83987b097cb4bb29b2801fcff38
197,380
from typing import Dict from typing import Any def get_update_response_item(response) -> Dict[str, Any]: """ Deserializes the response of Update operation and deserialized DynamoDB objects :param response: The response of Update operation :return: A deserialized DynamoDB object """ return response.get('Attributes')
56b2143b136e008faa0f617746b083dbbadcb5d1
543,443
import typing def format_commit_sha(commit: str, repo: typing.Dict[str, typing.Any]) -> str: """ Format a GitHub commit SHA into a Markdown message. """ return f"[{commit[:7]}]({repo['html_url']}/commit/{commit})"
6524863411d72f69969518c21970f6e7f57f14fb
465,503
def read_file(file): """Reads en entire file and returns file bytes.""" BUFFER_SIZE = 16384 # 16 kilo bytes b = b"" with open(file, "rb") as f: while True: # read 16K bytes from the file bytes_read = f.read(BUFFER_SIZE) if bytes_read: # if there is bytes, append them b += bytes_read else: # if not, nothing to do here, break out of the loop break return b
293fe86c524f3f46945b8c1a0c37a148c300f9f1
174,380
def doc_metadata(doc): """Create a metadata dict from a MetatabDoc, for Document conversion""" r = doc['Root'].as_dict() r.update(doc['Contacts'].as_dict()) r['author'] = r.get('author', r.get('creator', r.get('wrangler'))) return r
b88c7158d143c51b7da4ef8d516d084234381e9c
513,188
def rgb2gray(img): """Convert a RGB image to gray scale.""" if len(img.shape) == 2: grayimg = img[:, :] elif img.shape[-1] >= 3: grayimg = 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2] else: grayimg = img[:, :, 0] return grayimg
be42500e4c8c5bc78c8e34b9e228a89c816c7510
443,693
def first_role_id_in_roles(roles): """ Return the first role ID found in list of roles.""" for role in roles: if not isinstance(role, dict): continue role_id = role.get('role') if not role_id: continue return str(role_id).strip()
a5756270ac9f6c2e0e53af13001c6aa430608e26
353,329
from typing import Union from pathlib import Path from typing import List from typing import Dict import json def readJSONLFile(file_name: Union[str, Path]) -> List[Dict]: """ Read a '.jsonl' file and create a list of dicts Args: file_name: `Union[str,Path]` The file to open Returns: The list of dictionaries read from the 'file_name' """ lines = ( open(file_name, 'r', encoding='utf-8').readlines() if isinstance(file_name, str) else file_name.read_text('utf-8').splitlines(False) ) return [json.loads(line) for line in lines]
8e33fad766a255578179828dc76ec793c02f90b9
1,818
from typing import Sequence def my_dot(v0: Sequence[float], v1: Sequence[float]) -> float: """ Perform mathematical dot product between two same-length vectors. IE component-wise multiply, then sum. :param v0: any number of floats :param v1: same number of floats :return: single float """ dot = 0.0 for (a, b) in zip(v0, v1): dot += a * b return dot
6a4742cbd47395de69ee88f5fe0baf696993b644
577,835
def order_keys(keys, orders): """Get ordered keys. :param keys: keys to be sorted. :type keys: list of str :param orders: the order of the keys. '.' is all other keys not in order. :type orders: list of str. :returns: keys as list sorted by orders. :raises: TypeError if keys or orders is not list. """ if not isinstance(keys, list): raise TypeError('keys %s type should be list' % keys) if not isinstance(orders, list): raise TypeError('orders ^s type should be list' % orders) found_dot = False pres = [] posts = [] for order in orders: if order == '.': found_dot = True else: if found_dot: posts.append(order) else: pres.append(order) return ([pre for pre in pres if pre in keys] + [key for key in keys if key not in orders] + [post for post in posts if post in keys])
8f865f07ebd5538f5a289ffaaed0e705bf3c0b7c
415,968
def update_qTable(q_table, state, action, reward, next_state_value, gamma_discount = 0.9, alpha = 0.5): """ Update the q_table based on observed rewards and maximum next state value Sutton's Book pseudocode: Q(S, A) <- Q(S, A) + [alpha * (reward + (gamma * maxValue(Q(S', A'))) - Q(S, A) ] Args: q_table -- type(np.array) Determines state value state -- type(int) state value between [0,47] action -- type(int) action value [0:3] -> [UP, LEFT, RIGHT, DOWN] reward -- type(int) reward in the corresponding state next_state_value -- type(float) maximum state value at next state gamma_discount -- type(float) discount factor determines importance of future rewards alpha -- type(float) controls learning convergence Returns: q_table -- type(np.array) Determines state value """ update_q_value = q_table[action, state] + alpha * (reward + (gamma_discount * next_state_value) - q_table[action, state]) q_table[action, state] = update_q_value return q_table
f267851a520c95259f363d89842ed7e50e5ddf30
80,171
def conv_output_shape(dimension_size, filter_size, padding, stride): """ Computes convolution's output shape. Parameters ---------- dimension_size : int Size of the dimension. Typically it's image's weight or height. filter_size : int Size of the convolution filter. padding : {``valid``, ``full``, ``half``} or int Type or size of the zero-padding. stride : int Stride size. Returns ------- int Dimension size after applying convolution operation with specified configurations. """ if dimension_size is None: return None if not isinstance(stride, int): raise ValueError("Stride needs to be an integer, got {} (value {!r})" "".format(type(stride), stride)) if not isinstance(filter_size, int): raise ValueError("Filter size needs to be an integer, got {} " "(value {!r})".format(type(filter_size), filter_size)) if padding == 'valid': output_size = dimension_size - filter_size + 1 elif padding == 'half': output_size = dimension_size + 2 * (filter_size // 2) - filter_size + 1 elif padding == 'full': output_size = dimension_size + filter_size - 1 elif isinstance(padding, int): output_size = dimension_size + 2 * padding - filter_size + 1 else: raise ValueError("`{!r}` is unknown convolution's border mode value" "".format(padding)) return (output_size + stride - 1) // stride
8923a3bbc315d38fbbf6a770d7b59db6f14670c3
160,922
def expand_test_result_df(df_test): """Adds columns to a DataFrame with test results Args: df_test DataFrame as produced by trainer.ModelTrainer, i.e. with columns 'tp', 'fp', 'fn', 'correct', 'total_examples' and 'examples_above_threshold' Returns: the input DataFrame with additional columns for 'precision', 'recall', 'acc'uracy, 'f1' measure and 'coverage' percentage. """ #print('type of df_test', str(type(df_test))) #print('keys in df_test', df_test.keys()) df = df_test epsilon = 0.00001 # avoid division by zero df['precision'] = df['tp'] / (df['tp'] + df['fp'] + epsilon) df['recall'] = df['tp'] / (df['tp'] + df['fn'] + epsilon) df['acc'] = df['correct'] / df['examples_above_threshold'] df['f1'] = 2*df['tp'] / (2*df['tp'] + df['fp'] + df['fn'] + epsilon) df['coverage'] = df['examples_above_threshold']/ (df['total_examples'] + epsilon) return df
dea64054f8fb372d9777b6bdc9b0064843bbd459
17,673
def NOT(logical_expression): """ Returns the opposite of a logical value: `NOT(True)` returns `False`; `NOT(False)` returns `True`. Same as `not logical_expression`. >>> NOT(123) False >>> NOT(0) True """ return not logical_expression
83ae11eb225b274ded4173df0ea202ccb4248f4a
146,084
import math def determine_padding(filter_shape, output_shape="same"): """Method which calculates the padding based on the specified output shape and the shape of the filters.""" if output_shape == "valid": return (0, 0), (0, 0) elif output_shape == "same": filter_height, filter_width = filter_shape # output_height = (height + pad_h - filter_height) / stride + 1 pad_h1 = int(math.floor((filter_height - 1)/2)) pad_h2 = int(math.ceil((filter_height - 1)/2)) pad_w1 = int(math.floor((filter_width - 1)/2)) pad_w2 = int(math.ceil((filter_width - 1)/2)) return (pad_h1, pad_h2), (pad_w1, pad_w2)
12ef1c8d624f0dfd161c8723736610fa86c9ce1d
681,154
def align_structures(reference_traj, target_traj): """ Given a reference trajectory, this function performs a structural alignment for a second input trajectory, with respect to the reference. :param reference_traj: The trajectory to use as a reference for alignment. :type reference_traj: `MDTraj() trajectory <http://mdtraj.org/1.6.2/api/generated/mdtraj.Trajectory.html>`_ :param target_traj: The trajectory to align with the reference. :type target_traj: `MDTraj() trajectory <http://mdtraj.org/1.6.2/api/generated/mdtraj.Trajectory.html>`_ :returns: - aligned_target_traj ( `MDTraj() trajectory <http://mdtraj.org/1.6.2/api/generated/mdtraj.Trajectory.html>`_ ) - The coordinates for the aligned trajectory. """ aligned_target_traj = target_traj.superpose(reference_traj) return aligned_target_traj
ba23ee847deb2ac54cbc3c8d973ce02e0c7f7db2
591,548
def get_cleanup_step(project, build): """Returns the step for cleaning up after doing |build| of |project|.""" return { 'name': project.image, 'args': [ 'bash', '-c', 'rm -r ' + build.out, ], }
d3eb71a936fe482627c13b73a442eaf776061def
409,163
import json def fake_data_set_data(fake_data_set): """Fake suggest api data set data. Uses fake_data_set file fixture to generate the data. """ with open(fake_data_set) as f: return json.load(f)
729daaa2fcbc46baddcc9048125df87b93834b5c
389,929
def find_ns_subelements(element, subelement_name, ns_dict): """ retuns list of subelements with given name in any given namespace Arguments: - element (ElementTee.Element): main element to search in - subelement_name (string): searched name of element - ns_dict: dict of prefixes and corresponding full namespaces Hint: To search for parent element, add "/.." in the end. Example: fund_ns_subelements(root, "my_element/../..", ns_dict) will return list of grandparents of elements "my_element" """ subelements = [] # Search term explanation: # .//* read from right to left means: # all elements (*) in any level (//) in top-level element (.) # If you want to search nodes with some attribute: # .//*[@attr='something'] means: # nodes with attribute attr='something' located anywhere (.//*) # Our case: elements with namespace (in {}) and name, anywhere: # .//{namespace}name # We are looking for given element in any namespace. # In newer Python versions you can use * # and the solution would be one-liner: # return element.findall(f".//{{*}}{subelement_name}") # Below code should be compatible with older versions for prefix in ns_dict: ns = ns_dict[prefix] match = f".//{{{ns}}}{subelement_name}" found = element.findall(match) for subelement in found: subelements.append(subelement) return subelements
1bbd87025ba9cac503b4ceb7523485a9b33d5637
669,739
def number_with_precision(number, precision=3): """ Formats a ``number`` with a level of ``precision``. Example:: >>> number_with_precision(111.2345) 111.235 """ formstr = '%01.' + str(precision) + 'f' return formstr % number
b6b457d8ea0b69dc80d3c336f1232207d6aa357b
373,310
import requests def my_requests_request(method, url, **kwargs): """ :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response <Response>` object :rtype: requests.Response """ return requests.request(method=method, url=url, timeout=10, **kwargs)
8f86acea82c6cc8677f2a53ebcd30e4d0722c49d
122,415
def flatten_lists(*lists): """Flatten several lists into one list. Examples -------- >>> flatten_lists([0, 1, 2], [3, 4, 5]) [0, 1, 2, 3, 4, 5] Parameters ---------- lists : an arbitrary number of iterable collections The type of the collections is not limited to lists, they can also be sets, tuples, etc. Returns ------- flat_list : a flattened list """ return [x for sublist in lists for x in sublist]
23c25da8ac99c440543efdd52a7a98f2f378bd8b
117,086
def convert_to_oneline(multiline: str) -> str: """Converts a multiline Sleep command to a single line. Args: multiline (str): The multiline Sleep command. Returns: str: A single-lined version of the same Sleep command. """ # Format wrapper so it sends as one-line oneline = multiline.replace('\n', '') # Replace 4 spaces with nothing (if tabbed but using spaces as tabs) nospaces = oneline.replace(' ', '') # Replace tabs with nothing notabs = nospaces.replace('\t', '') return notabs
edf833ff0920673bbab1cff2122f66b22bac08cc
95,782
def filter_on_TCRB(df): """ Only take sequences that have a resolved TCRB gene for V and J. """ return df[df['v_gene'].str.contains('^TCRB') & df['j_gene'].str.contains('^TCRB')]
1fb6c8bddff58f178085b00db52589e14bd9fae1
359,423
def get_shap_values(model_manager, direct_id, target=None): """Get the SHAP values of features. Args: model_manager: ModelManager, an object containing all prediction models direct_id: the identifier of the patient's related entry in the target entity (e.g., the admission id). target: the identifier of the prediction target Returns: A dict mapping prediction target to features' SHAP values for this prediction. """ shap_values = model_manager.explain(id=direct_id, target=target) if target is None: return {target: sv.loc[0].to_dict() for target, sv in shap_values.items()} else: return shap_values.loc[0].to_dict()
201ecc08c87262f7dd3aa34f7b2164b6489de405
631,890
def o_to_matsubara_idx_b(o): """ Convert index in "o" convension to bosonic Matsubara index Parameters ---------- o 2*n Returns n ------- """ assert o%2 == 0 return int(o/2)
347313ac016033360910d94e19c7d3ef8bc3f7e3
16,408
from datetime import datetime import uuid def _get_file_name() -> str: """ Create a random file name. Returns: str: Randomised file name. """ return f'{datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")}_{str(uuid.uuid4())[:8]}.avro'
3fb18df2c9b6326037b4aa3d9576ef7f8b87209b
579,606
def Pack(flatten, nmap_list): """Packs the list of tensors according to `.NestedMap` in `nmap_list`. `Pack` is loosely the inverse of `Flatten`. Args: flatten: A list of tensors. nmap_list: A list of `.NestedMap`. Returns: A list of `.NestedMap`, say ret is the returned list. We have 1. len(ret) == len(nmap_list); 2. recursively, ret[i] has the same keys as nmap_list[i]; 3. Flatten(ret) == flatten; """ if not isinstance(flatten, (list, tuple)): flatten = [flatten] ret = [] for x in nmap_list: # x needs num values from the head of flatten. num = len(x.Flatten()) ret += [x.Pack(flatten[:num])] flatten = flatten[num:] assert not flatten, ('flatten does not match nmap_list.') return ret
4c0caff454f269dc91715d4dbd8a9a35fafabf86
642,994
import re def _escape(line): """ Escape the syntax-breaking characters. """ line = line.replace('[', r'\[').replace(']', r'\]') line = re.sub('\'', '', line) # ' is unescapable afaik return line
8b43b7ddf42af68866eb81fdf91036d28489f897
664,561
from datetime import datetime def convert_date_to_timestamp(date_to_convert): """ This function is useful to convert date (YYYYmmdd) to timestamp Args: date_to_convert (str): date (YYYYmmdd) to be converted in timestamp Returns: (timestamp): date_to_convert converted in timestamp """ return int(datetime.strptime(date_to_convert, '%Y%m%d').timestamp())
a43add163f3c303246ac92ad043cbff452e7cc6b
572,849
def passthrough_properties(field_name, *property_names): """ This function is designed to be used as a class decorator that takes the name of an attribute that different properties of the class pass through to. For example, if we have a self.foo object, and our property 'bar' would return self.foo.bar, then we'd set 'foo' as the field_name, and 'bar' would go in the property_names list. Args: field_name: The name of the attribute of the object we pass property calls through to. *property_names: List of property names that we do the pass-through to. Returns: A function that acts as a decorator for the class which attaches the properties to it. """ def wrapped(cls): """Wrapper around a class decorated with the properties""" for name in property_names: def generate_property(prop_name): """Helper function to generate the getter/setter functions for each property""" def get_func(self): """Getter function for the property""" parent = getattr(self, field_name) return getattr(parent, prop_name) def set_func(self, value): """Setter function for the property""" parent = getattr(self, field_name) setattr(parent, prop_name, value) setattr(cls, prop_name, property(get_func, set_func)) generate_property(name) return cls return wrapped
9771ff06c5645d48283d76757d48ad7989f69f9f
494,029
import itertools def all_pairs(seq): """Produce all pairs from seq, but not (a, a)""" return itertools.combinations(seq, 2)
b26676d21660f6aa1052d832e7f6bc3b7014befa
478,664
def capwords(s, sep=None): """capwords(s [,sep]) -> string Split the argument into words using split, capitalize each word using capitalize, and join the capitalized words using join. If the optional second argument sep is absent or None, runs of whitespace characters are replaced by a single space and leading and trailing whitespace are removed, otherwise sep is used to split and join the words. """ return (sep or ' ').join(x.capitalize() for x in s.split(sep))
5fe61ed2da2ea011e8f989c2b40a84a90cb1addc
580,957
def _is_nth_child_of_kind(stack, allowed_nums, kind): """ Checks if the stack contains a cursor which is of the given kind and the stack also has a child of this element which number is in the allowed_nums list. :param stack: The stack holding a tuple holding the parent cursors and the child number. :param allowed_nums: List/iterator of child numbers allowed. :param kind: The kind of the parent element. :return: Number of matches. """ is_kind_child = False count = 0 for elem, child_num in stack: if is_kind_child and child_num in allowed_nums: count += 1 if elem.kind == kind: is_kind_child = True else: is_kind_child = False return count
b03d28f7a0dde19d8bb585186944ceb5a3a9da7a
93,021