content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def raw_temperature(device): """ Get a raw temperature reading from the temperature sensor """ raw_reading = None with open(device, 'r') as sensor: raw_reading = sensor.readlines() return raw_reading
b4ab72a5cf816e7ae41908c4edf50923eae44820
500,616
def ParseLogLines(log_file_lines): """Parse a log file produced by the profiled run of clank. Args: log_file_lines: array of lines in log file produced by profiled run lib_name: library or executable containing symbols Below is an example of a small log file: 5086e000-52e92000 r-xp 00000000 b3:02 51276 libchromeview.so secs usecs pid:threadid func START 1314897086 795828 3587:1074648168 0x509e105c 1314897086 795874 3587:1074648168 0x509e0eb4 1314897086 796326 3587:1074648168 0x509e0e3c 1314897086 796552 3587:1074648168 0x509e07bc END Returns: call_info list with list of tuples of the format (sec, usec, call id, function address called) """ call_lines = [] has_started = False vm_start = 0 line = log_file_lines[0] assert("r-xp" in line) end_index = line.find('-') vm_start = int(line[:end_index], 16) for line in log_file_lines[2:]: # print hex(vm_start) fields = line.split() if len(fields) == 4: call_lines.append(fields) # Convert strings to int in fields. call_info = [] for call_line in call_lines: (sec_timestamp, usec_timestamp) = map(int, call_line[0:2]) callee_id = call_line[2] addr = int(call_line[3], 16) if vm_start < addr: addr -= vm_start call_info.append((sec_timestamp, usec_timestamp, callee_id, addr)) return call_info
c45de7c2f71dcb78cacb6c9380564dab8645e455
363,859
from typing import Tuple def get_offense_enrichment(enrichment: str) -> Tuple[bool, bool]: """ Receives enrichment asked by the user, returns true or false values indicating which enrichment should be done. Args: enrichment (Optional[str]): Enrichment argument. Returns: (bool, bool): Tuple of (ip_enrich, asset_enrich). """ if enrichment == 'IPs And Assets': return True, True if enrichment == 'IPs': return True, False return False, False
70f1b1708962f752fcb3541b009f0065e47f2844
206,387
def decimal_to_ip(ip_decimal): """ Submit a decimal ip value between 0 and 2*32 - 1. Returns the ip_address as a zeros padded string: nnn.nnn.nnn.nnn If ip_decimal is invalid returns -1 """ # Test it is decimal and in range, else return -1 try: ip_decimal = int(ip_decimal) except ValueError as e: print("ValueError: {}".format(e)) return -1 if ip_decimal < 0 or ip_decimal > 4294967295: #(2**32 - 1) return -1 # Convert. E.g. 511 returns 000.000.001.255 s = "" for i in reversed(range(4)): s += "{:>03}.".format(ip_decimal // 256**i) ip_decimal = ip_decimal % 256**i return s[:-1] """ # To check function: decimal_to_ip() test_data = [0,1,255,256,511,512,4294967294,4294967295,-1,4294967296,"qwerty"] for i in range(len(test_data)): print("{}: {}".format(test_data[i], decimal_to_ip(test_data[i]))) sys.exit() """
752cb9f826e18b782c17ee8dc3ec3fe2a2894849
484,952
def fib(n): """Return Fibonacci sequence with length "n". Args: n: The length of the sequence to return. Returns: A list containing the Fibonacci sequence. """ result = [] a, b = 0, 1 for _ in range(n): result.append(b) a, b = b, a + b return result
8cc39b4e351f6530b306999c5ee5e1c4a07ec153
613,526
def boxcox_reverse(x_t, lam): """Perform the inverse Box-Cox transform to return to original units. Parameters ---------- x_t : xarray.DataArray Contains transformed data, with standard dimensions time x lat x lon lam : xarray.DataArray Selected lambda values for the Box-Cox transform, with standard dimensions lat x lon Returns ------- orig_scale : xarray.DataArray Data in the original scale, of same dimension as x_t """ orig_scale = (lam*x_t + 1)**(1/lam) orig_scale = (orig_scale.fillna(0)).transpose('time', 'lat', 'lon') return orig_scale
91d6824e9831e7896b30ecf1ad8a4136733777cc
288,218
def format_percent(n, baseline): """Format a ratio as a percentage (showing two decimal places). Returns a string. Accepts baseline zero and returns '??' or '--'. """ if baseline == 0: if n == 0: return "--" else: return "??" return "%.2f%%" % (100 * n / baseline)
274c3dce2f88ec9fa5f0d4ea41890a8dfd7d79cb
643,186
import time def wait_until(some_predicate, timeout=5, period=0.25, *args, **kwargs): """ :param some_predicate: must be a callable who return a boolean condition :param timeout: timeout :param period: time between each check :param args: for predicate :param kwargs: for predicate :return: False on timeout """ must_end = time.time() + timeout while time.time() < must_end: if some_predicate(*args, **kwargs): return True time.sleep(period) return False
8b53e83a0fced1b4bc6a2ee02a92d0d2df5d01a2
475,459
import re def remove_comments(text: str) -> str: """Remove python-like comments from a string Args: text (str): String with python-like comments to be removed Returns: str: String with removed comments """ return re.sub(r"#.*", "", text)
21a831012f881aabd77df9267b991051b91f06d0
155,015
import re import logging def GetChromeosVersion(str_obj): """Helper method to parse output for CHROMEOS_VERSION_STRING. Args: str_obj: a string, which may contain Chrome OS version info. Returns: A string, value of CHROMEOS_VERSION_STRING environment variable set by chromeos_version.sh. Or None if not found. """ if str_obj is not None: match = re.search(r'CHROMEOS_VERSION_STRING=([0-9_.]+)', str_obj) if match and match.group(1): logging.info('CHROMEOS_VERSION_STRING = %s' % match.group(1)) return match.group(1) logging.info('CHROMEOS_VERSION_STRING NOT found') return None
d0dc48eb6c5f9c501024f155535e6e9adb1061c0
38,011
def _find_file_meta(metadata, container, saltenv, path): """ .. versionadded:: 3001 Looks for a file's metadata in the Azure Blob Container cache file. :param metadata: The metadata for the container files. :param container: The name of the target Azure Blob Container. :param saltenv: Specifies which environment the container represents. :param path: The path of the file in the container. """ env_meta = metadata[saltenv] if saltenv in metadata else {} container_meta = env_meta[container] if container in env_meta else {} for item_meta in container_meta: item_meta = dict(item_meta) if "name" in item_meta and item_meta["name"] == path: return item_meta
eda382721f73f77f8322375e7ae06040e327ddcd
179,716
def calculate_yearly_total(sales: list[list[str]]) -> int: """Calculate the total yearly sales.""" total: int = 0 for row in sales: total += int(row[-1]) return total
dbf5fbd038d0e7b2fad7e6794803b919e5902518
696,640
def site(url, site_number=None): """ Returns the site which the url is of. """ if site_number is not None: site_number = int(site_number) if "panda" in url or site_number is 1: return "Mangapanda" elif "mangasee" in url or site_number is 2: return "Mangasee" else: raise Exception("Site Not Supported. See Help / Readme.md for Supported sites.")
36625478d5a1813f409cb4de59667b3e1eccc275
82,470
def simtk_vec_to_list(vec): """Convert SimTK::Vec_<T> to Python list. """ temp = [] for i in range(vec.size()): temp.append(vec[i]) return temp
dae089de940b7ac2d725e948342e121ff69a4046
600,012
def normalize_newlines(s: str): """ normalizes new lines, i.e. '\r\n' to '\n' """ # note that web browsers send \r\n but our training data uses \n. return s.replace("\r\n", "\n").replace("\r", "\n")
f8b7a4f092b64b3f67245880d7d71bc5835014f3
293,034
import torch def tril_inverse(tril_matrix: torch.Tensor) -> torch.Tensor: """Invert a lower-triangular matrix. Args: tril_matrix (torch.Tensor): Lower-triangular matrix to invert. Shape should be `(*, matrix_dim, matrix_dim)`. Returns: torch.Tensor: Inverted matrix. Shape should be `(*, matrix_dim, matrix_dim)`. """ assert tril_matrix.shape[-1] == tril_matrix.shape[-2], "Input must be square!" matrix_dim = tril_matrix.shape[-1] batch_dims = tril_matrix.shape[:-2] # Get an identity matrix identity = torch.eye(tril_matrix.shape[-1], device=tril_matrix.device) # View identity w/ batch dimensions identity = identity.reshape( (1,) * len(batch_dims) + (matrix_dim, matrix_dim) ).expand(tril_matrix.shape) # Invert with triangular solve inverse = torch.triangular_solve(identity, tril_matrix, upper=False).solution assert inverse.shape == tril_matrix.shape return inverse
195309cf5d8ed21362137dae9833b259afd03ec9
549,552
def invert_dict(dictio): """ Invert dictionary whose values are lists and are not unique. Args: dictio (dictionary): the dictionary to invert. Returns: inv_dict (dictionary): a dictionary where the old values are now keys and the old keys are now in the value lists. """ inv_dict={} for k, v in dictio.items(): for g in v: if g not in inv_dict.keys(): inv_dict[g] = [] inv_dict[g].append(k) return inv_dict
65c3e68b85c8f3efac1892f0dffd735fd5d52d3b
184,485
def _sub2instance(initial_condition, perturbation_index, total_perturbations): """ Converts initial condition index (ci) and perturbation index (pi) subscripts to an instance number (ii) instances use 1-based indexes and vary according to this function: ii = ci * len(PERTURBATIONS) + pi + 1 where both pi and ci use 0-based indexes. """ instance = initial_condition * total_perturbations + perturbation_index + 1 return instance
7c29b8d8b43509c200201cd0d046f3c287ac0057
621,859
def kronecker_product(matrices): """ Performs Kronecker product of a list of matrices. Args: - matrices (a list of matrices) Returns: - matrix - the result of kronecker_product of matrices """ if len(matrices) < 1: return RuntimeError('The input should be a list of matrices.') if len(matrices) == 1: return matrices[0] if len(matrices) > 1: matrix_0 = matrices[0] matrix_1 = kronecker_product(matrices[1:]) size_0 = matrix_0.size() size_1 = matrix_1.size() res = matrix_0.contiguous().view(-1).unsqueeze(1) * matrix_1.contiguous().view(-1).unsqueeze(0) res = res.view(size_0[0], size_0[1], size_1[0], size_1[1]) res = res.transpose(1, 2).contiguous().view(size_0[0] * size_1[0], size_0[1] * size_1[1]) return res
f808a55270a5411279ba4d22ba7ead82661a6a91
607,193
def snippet(func): """Mark ``func`` as a snippet example function.""" func._snippet = True return func
2eccf19d866af8b44568ed82dc72cca45287d081
692,065
def two_pair(ranks): """If there are two pair, return the two ranks as a tuple: (highest, lowest); otherwise return None.""" two_pair = set() for r in ranks: if ranks.count(r) == 2: two_pair.add(r) two_pair_lst = list(two_pair) two_pair_lst.sort(reverse = True) return tuple(two_pair_lst) if len(two_pair) == 2 else None
bf5c7b5059cf9a83c2c77109b2e4fe76de487853
122,938
import time def nonce() -> str: """Return a nounce counter (monotonic clock). References: * https://support.kraken.com/hc/en-us/articles/360000906023-What-is-a-nonce- """ # pylint: disable=line-too-long return str(time.monotonic_ns())
fb6221fef4c2c8af66200c4c9da8f6253854b186
705,194
from typing import Tuple from typing import List def split_einsum_formula(formula: str) -> Tuple[List[str], str]: """Splits an einsum formula string into its component axis names.""" input_formula, output_formula = formula.split('->') return input_formula.split(','), output_formula
a99a12c15ffdc64958212efc9894eeddc2a28960
364,408
def bprop_scalar_exp(x, out, dout): """Backpropagator for primitive `scalar_exp`.""" return (dout * out,)
88951703e97d346fec233ba90bea3d3e5ba9c0a1
479,550
def manhattanDistance(x1, y1, x2, y2): """ This function returns manhattan distance between two points. """ return abs(x1-x2) + abs(y1-y2)
52dad7cea18369e6208db57c46c01b5a2f2a8e65
544,044
def from_epsg(code): """Given an integer code, returns an EPSG-like mapping. Note: the input code is not validated against an EPSG database. """ if int(code) <= 0: raise ValueError("EPSG codes are positive integers") return {'init': "epsg:%s" % code, 'no_defs': True}
1680a365ec01ddb81d314a899c6dc818d466bc65
598,977
def next_collatz(number): """ Get the next number in a collatz series. :param number: The previous number in the collatz series. :returns: The next number in the collatz series. """ if number % 2 == 0: number = number // 2 else: number = (3 * number) + 1 return number
33bc29787dd90b04e0be82dc43c39ecb7ad44710
209,887
def mergeDicts(*_dicts): """Function that takes any multiple of dictionaries as arguments and merges them together.""" result = {} for d in _dicts: result.update(d) return result
2ed13409bc341d5399c6741833ee9a9b103014aa
166,760
def _insert_statement(name, d): """Generate an insert statememt. ex) insert into foo values (:a, :b, :c, ...) """ keycols = ', '.join(":" + c.strip() for c in d) return "insert into %s values (%s)" % (name, keycols)
202c342a84695c5bd485c161e67a3acc14be3313
212,721
def corrcoef(pred, target): """ Torch implementation of pearson correlation coefficient. Args: pred (torch): torch model prediction target (torch): torch model label ::References:: # np.corrcoef in torch from @mdo # https://forum.numer.ai/t/custom-loss-functions-for-xgboost-using-pytorch/960 """ pred_n = pred - pred.mean() target_n = target - target.mean() pred_n = pred_n / pred_n.norm() target_n = target_n / target_n.norm() return (pred_n * target_n).sum()
b2c4c260bc0c750e4d8ebcd6199dd88abb52ae36
326,373
def split_entity_id(entity_id): """Split a state entity_id into domain, object_id.""" return entity_id.split(".", 1)
546b0809b3a9d5767c8c1ca2d2a67cd89593261c
352,285
def endf_float_str(value): """ Return the ENDF format string of a floating point number Parameters ---------- value : float A single floating point value Returns ------- valstring : str An ENDF format string of input par `value` """ if(abs(value) < 1e-9 or abs(value) > 9.999e9): raise ValueError("value is too small or too big") valstring = "{:>13.6e}".format(value).replace('e','').replace('+0','+').replace('-0','-') # with AMPX written files we use "-0" instead of "+0" for some reason if( '+0' in valstring ): valstring = valstring.replace('+0','-0') return valstring
502c7e777cc15da0ec0a9c033a536cea03306d40
635,784
def list_order_by(l,firstItems): """given a list and a list of items to be first, return the list in the same order except that it begins with each of the first items.""" l=list(l) for item in firstItems[::-1]: #backwards if item in l: l.remove(item) l.insert(0,item) return l
2179a18af22924dc021d86d94437318ce225f045
92,464
import datetime from typing import Optional from typing import List def get_years( begin: datetime.datetime, end: Optional[datetime.datetime] = None, ) -> List[int]: """ From the beginning datetime, get the years (as ints) that we are going to read. """ max_year = datetime.datetime.utcnow().year if end is None else end.year return list(range(begin.year, max_year + 1))
90bae34ba055a695b04278e78069186626818f82
154,017
def _index_tuple_literal_eval(string: str): """Evaluates a string literal of form 'recipe_num, malt', and returns a tuple (recipe_num(int), malt(str)).""" rec_malt = string[1:-1].split(', ') rec = int(rec_malt[0]) malt = ', '.join(rec_malt[1:]) return rec, malt
ffb8ca14ce16abff69c964016173c7703bbb76ed
140,858
def corrections(mean_onbit_density): """Calculate corrections See :func:`similarity` for explanation of corrections. Args: mean_onbit_density (float): Mean on bit density Returns: float: S\ :sub:`T` correction, S\ :sub:`T0` correction """ p0 = mean_onbit_density corr_st = (2 - p0) / 3 corr_sto = (1 + p0) / 3 return corr_st, corr_sto
6a1a8a75d05632026aad96ee68d50279c269c7ba
201,308
from typing import Any def squeeze_tuple(item: Any) -> Any: """Reduces a tuple to a single item if only it consists of a single item. Args: item: any sequence or a single item. Returns: a single item if possible, or an input sequence if not. >>> from redex import util >>> util.squeeze_tuple((1,)) 1 >>> util.squeeze_tuple((1,2)) (1, 2) """ return item[0] if isinstance(item, tuple) and len(item) == 1 else item
eb8142c640fb28d9448893ac7398dd0f0749b9ae
54,360
def parse_example(sample, verbose=False): """Parse samples into comment, premises, hypothesis, relation and validity.""" # Split sample into lines lines = sample.split('\n') # Discard empty lines lines = [line.strip() for line in lines if line.strip() != ''] # Get sample comment sample_comment = lines[0] # Get premises (starting with -) premises = [line.split('-')[1].strip() for line in lines if line.startswith('-')] # Get hypothesis hypothesis = lines[-3] hypothesis = hypothesis.split(':')[1].strip() # Get relation relation = lines[-2] relation = relation.split(':')[1].strip() # Get validity validity = lines[-1] validity = validity.split(':')[1].strip() # Print sample if verbose: print('\nExample', sample_comment) print('Premises:') for prem in premises: print('-', prem) print('Hypothesis:',hypothesis) print('Relation:',relation) print('Validity:',validity) # Return segmented and clean parts of sample return sample_comment, premises, hypothesis, relation, validity
789d9d967447bfb5fe1cd5113fb4b01cd82daddc
137,438
def value_index_map(array): """ Given input array, returns dict with key/values k,i, where i is the 0-index where value k appeared in the input array Assumes array elements are unique Used to get a mapping from pk's of an query set axis to the 0-index :param array: :return: dict """ output_map = {v: i for i, v in enumerate(array)} return output_map
aa623b0a6fed762caa005506f4729b079f97fbb2
14,817
def count(cls): """ Return the number of objects of this type that exist in the database """ return cls.objects.count()
dba5d6e08876803a67e3e93b60352e56dcc63eb4
444,204
import torch def log_softmax(x): """ Computes the log-softmax activation over the last dimension of x. Args: x: Tensor, data to compute the softmax activations for. Returns: log_x_softmax, Tensor same shape as x. log-softmax activations of x over the last dimension. """ log_denom = torch.logsumexp(x, dim=(len(x.shape) - 1), keepdim=True) log_softmax = x - log_denom return log_softmax
26f3349e767d09ebcf1b6a6615a19f2ccdd97313
482,074
def get_note_detail(notes): """ Iterate over note details from response and prepare RiskSense context. :param notes: note details from the response. :return: List of note elements that include required fields from resp. """ return [{ 'UserID': note.get('user', {}).get('id', ''), 'UserName': note.get('user', {}).get('name', ''), 'Note': note.get('note', ''), 'Date': note.get('date', '') } for note in notes]
6e19105d497079287618a455d0df988d904ef51c
474,651
def _update_indices(source_idcs, update_idcs, update): """ For every element s in source_idcs, change every element u in update_idcs according to update, if u is larger than s. """ if not update_idcs: return update_idcs for s in source_idcs: update_idcs = [u + update if u > s else u for u in update_idcs] return update_idcs
a934149dd48e3d61928be92e1cb656d394d7564c
447,483
import hashlib def sha1_file(filename): """ Return the hex string representation of the SHA1 checksum of the filename """ s = hashlib.sha1() with open(filename, "rb") as f: for line in f: s.update(line) return s.hexdigest()
b993ac9f025d69124962905f87b1968617bb33f5
1,032
import hashlib def valid_proof(last_hash, proof): """Validates the Proof. Multi-ouroborus: Do the last five characters of the hash of the last proof match the first five characters of the hash of the new proof? IE: last_hash: ...AE912345, new hash 12345E88... """ guess = str(proof).encode() guess_hash = hashlib.sha256(guess).hexdigest() return guess_hash[:5] == last_hash[-5:]
3fcd96379ff38e08bb3e760c14dd45a928cb2ba7
576,323
from typing import Dict from typing import Pattern import re def satisfy_regex_match(model: Dict[str, str], pattern: Pattern, field_to_check: str): """Check whether model (job or build) should be included according to the user input. The model should be added if the information provided field_to_check (the model name or url for example) is matches the regex pattern. :param model: model information obtained from jenkins :type model: str :param pattern: regex patter that the model name should match :type pattern: :class:`re.Pattern` :param field_to_check: model field to perform the check :param field_to_check: str :returns: Whether the model satisfies user input :rtype: bool """ return re.search(pattern, model[field_to_check]) is not None
0ff7724cbe640d41bb54c1a188438f71b29f7e0f
214,691
def intcode_six(parameter_list, code_list, i): """If first parameter is zero, sets instruction pointer to second parameter. Returns i""" if parameter_list[0] == 0: i = parameter_list[1] return i
c62910ab47d339b31972ea1afe1a3f31d849ad34
590,865
import networkx as nx def paga_degrees(adata): """Compute the degree of each node in the abstracted graph. Parameters ---------- adata : AnnData Annotated data matrix. Returns ------- degrees : list List of degrees for each node. """ g = nx.Graph(adata.uns['paga']['confidence']) degrees = [d for _, d in g.degree(weight='weight')] return degrees
2aec37a80ce5f58b17d5eb85f60fb3b75820dfdc
554,282
def get_upload_path(instance, filename): """Return media path for uploaded images.""" return f"uploads/images/{filename}"
23821722941e49ff30252bc3146c51011afa34a0
463,090
def send_to_right_side(targets, values): """Send the given target values to the right of all other values. Example: targets = ["b", "x","c"] values = ["a", "b", "c", "x", "y", "z"] send_to_right_side(targets, values) # -> ["a", "y", "z", "b", "x", "c"] Args: targets: Values to send to right side. values: The values of all elements. Returns: A list of elements of values in the desired permutation. """ target_set = set(targets) return tuple([x for x in values if x not in target_set] + list(targets))
5f63bbf32115a26f53b1dd3def5b2dc507c8bc06
358,930
def sort_dict_alphabetically(d: dict) -> dict: """ sort_dict_alphabetically sorts the content of a dictionary alphabetically by keys and returns the result as new dictionary. Parameters ========== d : dict Dictionary which should be sorted. Returns ======= dict Alphabetically sorted dictionary. """ out = {} keys = sorted(d.keys()) for k in keys: out[k] = d[k] return out
918cb44bd9db6f6f3b10d6e0276f2c5400902ffc
146,237
import urllib.request def urlopen(*args, **kwargs): """ Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of the stdlib. """ return urllib.request.urlopen(*args, **kwargs)
ad10527ae32e2323f85dc266516ceff36aa3497f
160,823
import json def load_train_config(filename): """Load a configuration file.""" with open(filename, 'r') as f: config = json.load(f) return config
c60a6f5893cff1008ec7ed20f0afc3cd53bca388
665,011
def laplacian(field, m, s, irho): """ Spacial discretization for the isotropic acoustic wave equation. For a 4th order in time formulation, the 4th order time derivative is replaced by a double laplacian: H = (laplacian + s**2/12 laplacian(1/m*laplacian)) Parameters ---------- field : TimeFunction The computed solution. m : Function or float Square slowness. s : float or Scalar The time dimension spacing. """ so = irho.space_order // 2 Lap = sum([getattr(irho * getattr(field, 'd%s'%d.name)(x0=d + d.spacing/2, fd_order=so), 'd%s'% d.name)(x0=d - d.spacing/2, fd_order=so) for d in irho.dimensions]) # noqa return Lap
c208f262731f4bcbeba55d13da8923b0e4fbd8ae
300,594
def get_comp_name(text): """ Extract relevant content of company name in a html tag Parameters ---------- text: str raw content in a html tag Returns ------- name: str a clean company name after junk texts are filtered """ name = '' count = 0 start = False stop = False for char in text: if count == 2: return name if start: name += char if char == '\n': start = True count +=1
04859b8c235c306aea7e9724b81c7e8005eb7d2e
383,238
import re def safe_split(string, sep=','): """Safe split using REGEX. Splits a string correctly with a separator when it has parenthesis and/or brackets using multiple negative lookahead. Args: string: A string to split into many strings. sep: The separator to use. Returns: A list of strings. Example: input: "foo, bar [foo, baz], baz (foo, bar)" returns: ["foo", "bar [foo, baz]", "baz (foo, bar)"] """ regex = re.escape(sep) + r'\s*(?![^\[\]]*\])(?![^()]*\))' return re.split(regex, string)
8214f68a28590c822a4c1e725fc325e9282de4c0
425,163
def pop(trace, instructions, reg): """Pop one item off the stack into reg""" instructions.append({"trace": trace, "op": "pop", "reg": reg}) return instructions
76b2a1e9370f228c8e438bf9778434d78a22c0f0
656,934
def nomalize_image(image): """Nomalize image from [0,255] to "[-1, 1]""" image = image / 255.0 image = 2 * (image - 0.5) return image
eb07880036567cdaa44b78531face24dd697745d
663,952
def centralmoment(vi, k): """ Converts raw distribution moments to central moments Parameters ---------- vi : array The first four raw distribution moments k : int The central moment (0 to 4) to calculate (i.e., k=2 is the variance) Returns ------- cm : scalar The central moment itself """ if k==0: ans = 1 elif k==1: ans = 0 elif k==2: ans = vi[2] - vi[1]**2 elif k==3: ans = vi[3] - 3*vi[2]*vi[1] + 2*vi[1]**3 elif k==4: ans = vi[4] - 4*vi[3]*vi[1] + 6*vi[2]*vi[1]**2 - 3*vi[1]**4 else: print('Can only calculate central moments k = 0 to 4. Sorry.') ans = None return ans
3ff6fa81c6334c1a334345f8293b0eb4c6a40476
118,395
def calc_gc(sequence): """ Calculates GC percentage from DNA sequence Does not consider IUPAC ambiguity codes :param sequence: :return: float """ if len(sequence) == 0: raise ValueError("Sequence must have minimum length 1") gc = 0 for char in sequence: if char.upper() == 'G' or char == 'C': gc += 1 return float(gc)/len(sequence) * 100.0
72d586911142ce8a93a2c19e87b4d55d00d01115
623,382
def find_subtree_indices(doc, dependency_type): """ This function finds and returns the indices of the entire clause (each token) in the subtree to be removed. Args: doc: spaCy Doc of the clean sentence dependency_type:str Options are "appos", "acl", "relcl", "advcl" Return: indices_to_remove_subtree: list of indices of the subtree """ # List of indices of clause tokens to be removed in the sentence indices_to_remove_subtree = [] # List of unique spaCy hashes for string tokens in the doc # Position remains the same from original doc hash_ids_of_tokens = [token.orth for token in doc] # Iterate through the doc to get the dep clause subtree for index, token in enumerate(doc): # Check for dependency label if token.dep_ == dependency_type: # Get the indices of subtree- all tokens of the clause for subtree_token in token.subtree: # Get the unique hash id for the subtree token subtree_token_id = subtree_token.orth # Look up the token's index in the doc subtree_token_index_in_doc = hash_ids_of_tokens.index(subtree_token_id) # Add to list of indices to be removed indices_to_remove_subtree.append(subtree_token_index_in_doc) # Return list of indices return indices_to_remove_subtree
31c6b3c7120075f5abf2ddada2113c73177b031b
23,840
import torch def permute_to_N_HWA_K(retina_tensor: torch.Tensor, num_classes: int) -> torch.Tensor: """ Transpose/reshape a tensor from (N, (A x K), H, W) to (N, (HxWxA), K). This is the tensor outputted from the RetinaNet head per pyramid level. Usage: >>> permute_to_N_HWA_K(torch.randn(1, 4 * 9, 4, 4), 4).shape torch.Size([1, 144, 4]) """ assert retina_tensor.dim() == 4, retina_tensor.shape N, _, H, W = retina_tensor.shape retina_tensor = retina_tensor.view(N, -1, num_classes, H, W) retina_tensor = retina_tensor.permute(0, 3, 4, 1, 2) retina_tensor = retina_tensor.reshape(N, -1, num_classes) # Size=(N,HWA,K) return retina_tensor
84f6768f38af2367d2c0d73a60282859d993331a
489,332
def get_classification_blob_names(is_training=True): """classification blob names.""" blob_names = ['rois'] if is_training: # labels_int32 blob: categorical labels blob_names += ['labels_int32'] return blob_names
128802242e1de499b261868b8f8adf28c3e20cf4
485,191
def parse_aunt_line(aunt_line): """ parse_aunt_line parses a string and return the aunt id and its attributes """ aunt_id = int(aunt_line[4:aunt_line.find(':')]) attributes = aunt_line[aunt_line.find(':')+1:].split(',') aunt = {} for attribute in attributes: name = attribute[:attribute.find(':')].strip() value = int(attribute[attribute.find(':')+1:].strip()) aunt[name] = value # print(f'Name: {name} {value}') return aunt_id, aunt
1990adc1977260376d10724a527e8a7973874849
254,126
def set_checksum_args(arguments): """ Argparse parses checksums as {'checksum_sha256': '<sha256_hash>'} Return a list of these arguments in a format the Identifiers Service understands: "checksums": [ { "function": "md5", "value": "fobarbas" }, { "function": "sha256", "value": "foobarbaz" } ], Note: This modifies the values in 'arguments' """ checksum_args = [ {'function': arg_name.replace('checksum_', '').replace('_', '-'), 'value': arguments.pop(arg_name)} for arg_name in list(arguments.keys()) if arg_name.startswith('checksum') and arguments[arg_name] is not None ] if checksum_args: arguments['checksums'] = checksum_args return arguments
1ca553f35b4753c93645c3d0482ded2c74595c02
413,683
def commandify(module_path: str) -> str: """Transform an input string into a command name usable in a CLI.""" # foo.bar.this_key => this-key return module_path.split(".", 1)[-1].replace("_", "-").lower()
b1c1e86fd60e6d6408e59c452c365f99858207a7
137,491
def fliplr_joints(_joints, _joints_vis, width, matched_parts): """ flip coords joints: numpy array, nJoints * dim, dim == 2 [x, y] or dim == 3 [x, y, z] joints_vis: same as joints width: image width matched_parts: list of pairs """ joints = _joints.copy() joints_vis = _joints_vis.copy() # Flip horizontal joints[:, 0] = width - joints[:, 0] - 1 # Change left-right parts for pair in matched_parts: joints[pair[0], :], joints[pair[1], :] = joints[pair[1], :], joints[pair[0], :].copy() joints_vis[pair[0], :], joints_vis[pair[1], :] = joints_vis[pair[1], :], joints_vis[pair[0], :].copy() return joints, joints_vis
05ec8003a90357b8fb0ad888d100f0b8ac50a0d7
592,372
import requests def get_user_id_to_team_name(league_id): """ Gets a map of fantasy player user id to their team name """ user_id_to_team_name = {} r = requests.get("https://api.sleeper.app/v1/league/%s/users" % league_id) user_data = r.json() for user in user_data: user_id_to_team_name[user['user_id']] = user['display_name'] return user_id_to_team_name
47ed910a3c888d20e9166d80675fb77bb20e8c3b
213,258
import uuid import copy def apply_uuid_to_elements(elements, uuid_to_apply=None): """Append a new UUID to the id of each cytoscape element This is used as a workaround to update the source/target of edges, and the parent/child relatioship of nodes. In Cytoscape.js, these relationships are immutable, and a move() function has to be called on the element to update the aforementioned properties. However, Dash Cytoscape doesn't expose this functionality. See https://github.com/plotly/dash-cytoscape/issues/106. By providing a new ID, we can avoid this restriction. Args: elements: a list of Cytoscape elements uuid_to_apply: a UUID to append. Defaults to None. If None is provided, this function generates a new UUID. Returns: A list of Cytoscape elements with an UUID appended to their ID fields. """ if uuid_to_apply is None: uuid_to_apply = uuid.uuid4() out_elements = [] # the properties of the element that need to have a UUID # appended UUID_KEYS = ["id", "source", "target", "parent"] for element in elements: new_element = copy.deepcopy(element) for key in UUID_KEYS: if key in new_element["data"]: new_element["data"][key] += f"#{uuid_to_apply}" out_elements.append(new_element) return out_elements
2696213ad04670c816fd8d7023ee4b419c849e15
502,441
def incr_id_after(id, start, n): """Perform the id adjustment necessary for adding n lines before start id. The exact logic is as follows: Suppose start has length k. Find all ids with length at least k, where the first k-1 numbers agree with start, and the k'th number is greater than or equal to start. Increment the k'th number by n and leave the rest unchanged. """ k = len(start) if len(id) >= k and id[:k-1] == start[:k-1] and id[k-1] >= start[k-1]: return id[:k-1] + (id[k-1] + n,) + id[k:] else: return id
1de3e296c3058c5a0279346583b962e7f8370e09
297,175
def numéroPlusGrande(tas): """ numéroPlusGrand(tas) donne l'indice dans la liste d'entiers tas du plus grand élément. Si la liste est vide, le résultat est -1 par convention. On suppose les entiers positifs ou nuls. """ # Si le tas est vide, on rend la valeur -1 if tas == []: return(-1) # On trouve l'élément maximum maxCrêpe = max(tas) # tas.index(v) donne le numéro de l'element de tas qui a la valeur v indexMax = tas.index(maxCrêpe) return(indexMax)
9807978935e4c80fa3de43b03f4c921538d16535
621,462
def group_technologies(df, settings): """ Group different technologies together based on parameters in the settings file. An example would be to put a bunch of different technologies under the umbrella category of "biomass" or "peaker". Parameters ---------- df : dataframe Pandas dataframe with settings : dictionary User-defined settings loaded from a YAML file. Must have key tech_groups. Returns ------- dataframe Same as incoming dataframe but with grouped technology types """ if settings.get("group_technologies"): df["_technology"] = df["technology_description"] for tech, group in settings["tech_groups"].items(): df.loc[df["technology_description"].isin(group), "_technology"] = tech for region, tech_list in (settings.get("regional_no_grouping") or {}).items(): df.loc[ (df["model_region"] == region) & (df["technology_description"].isin(tech_list)), "_technology", ] = df.loc[ (df["model_region"] == region) & (df["technology_description"].isin(tech_list)), "technology_description", ] df.loc[:, "technology_description"] = df.loc[:, "_technology"] df = df.drop(columns=["_technology"]) return df
c1425131a6dd3837ca282f160ef0e4f06db22e86
254,372
import socket import struct def get_ip_from_long(long_ip): """ Get IP from a long int Args: long_ip: IP in the long int format Returns: IP in the format x.x.x.x """ return socket.inet_ntoa(struct.pack('!L', long_ip))
f99f010525a4fd68c110448ae09cc6f3cc81361b
176,083
def import_data(file): """ This function imports the data into a list form a file name passed as an argument. The file should only the data seperated by a space.(or change the delimiter as required in split) """ data = [] f = open(str(file), 'r') for line in f: current = line.split() #enter your own delimiter like "," for j in range(0,len(current)): #current[j] = int(current[j]) current[j] = current[j] data.append(current) #print 'finished importing data' #print data return data
a5c8414d7ffad018e07bd45d46c3b7d0822e822d
50,262
def plastic(diffuse, specular, nonlinear, intior,extior): """[Plastic material dict] Args: diffuse ([list]): [rgb values] specular ([list]): [rgb values] nonlinear ([bool]): [description] intior ([float]): [description] extior ([list]): [description] Returns: [dict]: [material dict] """ return { "type" : "roughplastic", "diffuse_reflectance" : { "type" : "rgb", "value" : diffuse, }, 'nonlinear':False, 'int_ior':intior, 'ext_ior':extior, 'specular_reflectance':{ "type" : "rgb", "value" : specular, } }
5783a5feca30246f3d5f220ee39c7385ea8725bf
71,095
import pickle def load_selected_trajectory(traj_file_name): """Loads a pickled and saved trajectory from a file.""" with open(traj_file_name, 'rb') as traj_file: trajectory = pickle.load(traj_file) traj_file.close() return trajectory
8f6bce4ad7071e7b3947253caabdea361ba2567b
214,210
def get_all_predecessors(graph, node, start_node=0): """ Find all predecessor nodes of node in graph, given the start_node. Parameters ---------- graph: nx.DiGraph node: abc.hashable start_node: abc.hashable """ predecessors = [node] while True: pre_node = list(graph.predecessors(predecessors[-1]))[0] predecessors.append(pre_node) if pre_node == start_node: break predecessors.reverse() return predecessors
8f66f46a05ee54e5e5706a215e55a24890fa399a
624,176
def compute_trapped_rain_water(heights: list[int]) -> int: """ Given n non-negative integers representing an elevation map (height) where the width of each bar is 1, compute how much water it can trap after raining. Notes: if `left_max_height` and `right_max_height` are the maximum heights of the already-processed left and right elevations, respectively: For each index i: Since the amount of water we can trap is bounded by: `min(left_max_height, right_max_height)` and since no water can be trapped in space already taken up by: `heights[i]` this means that we trap exactly: `min(left_max_height, right_max_height) - heights[i]` water. heights = 2,0,1,0,3,1,0,2,2,0,4 [] [] [] [] [] [][] [] []__[]__[][]__[][]__[] 0|2|1|2|0|1|2|1|1|2|0 = trapped_rain_water_at_each_idx => sum(trapped_rain_water_at_each_idx) = trapped_rain_water Args: heights: list of non-negative integers [a1, a2, ..., an], where each represents a point at coordinate (i, ai) i.e., the height of a vertical line at y-coordinate i is ai Returns: Sum of areas between heights unbroken Examples: >>> compute_trapped_rain_water(heights=[0,1,0,2,1,0,1,3,2,1,2,1]) 6 >>> compute_trapped_rain_water(heights=[4,2,0,3,2,5]) 9 >>> compute_trapped_rain_water(heights=[1,0,1]) 1 >>> compute_trapped_rain_water(heights=[1,1]) 0 >>> compute_trapped_rain_water(heights=[1]) 0 >>> compute_trapped_rain_water(heights=[]) 0 """ ## EDGE CASES ## if not heights: return 0 ## INITIALIZE VARS ## l, r = 0, len(heights) - 1 left_max_height, right_max_height = 0, 0 # res trapped_water = 0 ## TWO POINTERS ## while l < r: left_height, right_height = heights[l], heights[r] left_max_height = max(left_max_height, left_height) right_max_height = max(right_max_height, right_height) ## MOVE POINTER(S) ## # Add `min(left_max_height, right_max_height) - heights[i]` if left_max_height <= right_max_height: trapped_water += left_max_height - left_height l += 1 else: trapped_water += right_max_height - right_height r -= 1 return trapped_water
520060057fc4526927f23ca398d9e0d096b9499e
670,187
def get_title(soup): """ Analyze "soup" to extract book's title. Args: soup -- bs4.BeautifulSoup from http request of book url. Return: book's title """ title = soup.h1.text return str(title)
bcdbf86fe9b353fa3accd49f9b81a582b26d73b3
459,029
def token_sub(request): """Returns the sub to include on the user token.""" return request.param if hasattr(request, 'param') else None
8cb5b0783e7333aa83c65ae8f57414e6f588550e
685,414
def merge_neuron_properties(neuron_df, conn_df, properties=['type', 'instance']): """ Merge neuron properties to a connection table. Given a table of neuron properties and a connection table, append ``_pre`` and ``_post`` columns to the connection table for each of the given properties via the appropriate merge operations. Args: neuron_df: DataFrame with columns for 'bodyId' and any properties you want to merge conn_df: DataFrame with columns ``bodyId_pre`` and ``bodyId_post`` properties: Column names from ``neuron_df`` to merge onto ``conn_df``. Returns: Updated ``conn_df`` with new columns. Example: .. code-block:: ipython In [1]: from neuprint import fetch_adjacencies, NeuronCriteria as NC, merge_neuron_properties ...: neuron_df, conn_df = fetch_adjacencies(rois='PB', min_roi_weight=120) ...: print(conn_df) bodyId_pre bodyId_post roi weight 0 880875736 1631450739 PB 123 1 880880259 849421763 PB 141 2 910442723 849421763 PB 139 3 910783961 5813070465 PB 184 4 911129204 724280817 PB 127 5 911134009 849421763 PB 125 6 911565419 5813070465 PB 141 7 911911004 1062526223 PB 125 8 911919044 973566036 PB 122 9 5813080838 974239375 PB 136 In [2]: merge_neuron_properties(neuron_df, conn_df, 'type') Out[2]: bodyId_pre bodyId_post roi weight type_pre type_post 0 880875736 1631450739 PB 123 Delta7_a PEN_b(PEN2) 1 880880259 849421763 PB 141 Delta7_a PEN_b(PEN2) 2 910442723 849421763 PB 139 Delta7_a PEN_b(PEN2) 3 910783961 5813070465 PB 184 Delta7_a PEN_b(PEN2) 4 911129204 724280817 PB 127 Delta7_a PEN_b(PEN2) 5 911134009 849421763 PB 125 Delta7_a PEN_b(PEN2) 6 911565419 5813070465 PB 141 Delta7_a PEN_b(PEN2) 7 911911004 1062526223 PB 125 Delta7_b PEN_b(PEN2) 8 911919044 973566036 PB 122 Delta7_a PEN_b(PEN2) 9 5813080838 974239375 PB 136 EPG PEG """ neuron_df = neuron_df[['bodyId', *properties]] newcols = [f'{prop}_pre' for prop in properties] newcols += [f'{prop}_post' for prop in properties] conn_df = conn_df.drop(columns=newcols, errors='ignore') conn_df = conn_df.merge(neuron_df, 'left', left_on='bodyId_pre', right_on='bodyId') del conn_df['bodyId'] conn_df = conn_df.merge(neuron_df, 'left', left_on='bodyId_post', right_on='bodyId', suffixes=['_pre', '_post']) del conn_df['bodyId'] return conn_df
666a51c2cd8d068973f103a43892962bd6c14754
539,896
def get_position(headings, name): """Get position of an entry in a list Arguments --------- headings : list List with names name : str Name of entry to find Returns ------- position : int Position in list """ return headings.index(name)
cc8ea4334300a4c25b1071f02183e2b38b78191c
201,313
def is_correct_educator_name(text): """ Checks if the text is correct :param text: input text :type text: str :return: True or False :rtype: bool """ return text.replace(".", "").replace("-", "").replace(" ", "").isalnum()
012fc24a552dad0a2a5aa9f7bcf3a17fe3863518
189,791
def to_bool(val): """ str -> bool Convert "true"/"false" strings in corresponding Python boolean literals """ return True if val == "true" else False
5514d5f04911568345db273737647eb3362245a2
189,139
def per_km(context): """Returns selected distance value.""" return 'km' if context['request'].user.details.si_units else 'mi'
12b07325cb991e5723c2f6f65cfdadb8531021d4
263,294
def get_crit_loss(crit_fake_pred, crit_real_pred, gp, c_lambda): """ Return the loss of critic given the critic's scores for fake and real images :param crit_fake_pred: the critic's scores of the fake images :param crit_real_pred: the critic's scores of the real images :param gp: the unweighted gradient penalty :param c_lambda: the current weight of the gradient penalty :return: crit_loss: a scalar for the critic's loss, accounting for the relevant factors """ # crit_loss = (crit_fake_pred - crit_real_pred + gp * c_lambda).mean() crit_loss = crit_fake_pred.mean() - crit_real_pred.mean() + gp * c_lambda return crit_loss
4054f38a91dbd7909e1b589a3d3120186122a55b
223,862
def Length(data): """Returns the number of samples in a time series""" return len(data)
c66df48523926ceb81d4b02015835c02e9492b29
224,045
def evaluate_functions_at_coordinates(list_of_functions,coordinates): """ =========================================== | evaluate_functions_at_coordinates | =========================================== Evaluate functions at the given coordinate points. Functions should be of the form v = f(x,y,z) """ return [tuple([f(x,y,z) for f in list_of_functions]) for x,y,z in coordinates]
4f1e97217eb3c741fbd36e6e70e99fbc13c45a70
113,203
def mean(pda): """ Return the mean of the array. """ return pda.sum() / pda.size
82070eb25b8fc7b7ec469d1e90c1013ccff2058f
267,190
def assemble_crabcups2_list(l, num_cups = 1_000_000): """Get a cups-list according to part 2 requirements (1mio cups).""" out_lst = l.copy() max_val = max(l) num_new_cups = num_cups - len(out_lst) out_lst += list(range(max_val+1, num_cups+1)) assert( num_cups == len(out_lst) ) return out_lst
c60cbe8b25108e40538007dd914443863d420844
430,960
def convertLanguage(language, format): """ Returns the given language converted to the given format as a string. :param language: string either as name in English, two letter code (ISO 639-1), or three letter code (ISO 639-2/T(B) :param format: format of the returned language string: - ``xbmc.ISO_639_1``: two letter code as defined in ISO 639-1 - ``xbmc.ISO_639_2``: three letter code as defined in ISO 639-2/T or ISO 639-2/B - ``xbmc.ENGLISH_NAME``: full language name in English (default) example:: language = xbmc.convertLanguage(English, xbmc.ISO_639_2) """ return str()
fa8f09b1a03c6ec1506b08390139064a1f57ad1a
251,267
def filter_arch_compatible(data, os_filter): """ Filter out incompatible versions of node.js from available distributions :param data: list -> json data fetched from nodejs.org :param os_filter: str -> filter for json data :return: filtered json data after extracting desired fields """ tmp_data = [] for item in data: if os_filter in item['files']: tmp_data.append(item) if not tmp_data: print('No suitable node.js versions/candidates found') return return tmp_data
de289f60b3e4e0783ebb4f10aa6e7637cb085add
369,780
import re def ecm_kw_regex_select(ecm_names_list, list_of_match_str): """Identify matching, non-matching ECM names using a list of search terms This function searches a list of ECM names to find all of the ECM names that have the words specified by the user as search terms. This function is used to identify the ECMs that should be moved from the active to inactive list or vice versa. The regex is set up to be case-insensitive. Args: ecm_names_list (list): A list of ECM names encoded as strings. list_of_match_str (list): A list with at least one entry, corresponding to the strings specified by the user to be used to select ECMs to move from the active to the inactive list, or vice versa. Returns: A list of all of the ECM names that matched with the search string(s) and a separate list of all of the ECM names that did not match with the search string. """ # If the list is not empty, identify the matching and non-matching # entries in the list of ECM names if list_of_match_str: # Construct the regex search pattern from the list of strings given nom_regex = re.compile(r'(?:%s)' % '|'.join(list_of_match_str), re.IGNORECASE).search # Construct the inverse regex search pattern from list of strings given nom_regex_inv = re.compile(r'(?:%s)' % '|'.join([ x.strip("! ") for x in list_of_match_str if "!" in x]), re.IGNORECASE).search # Construct a list of all of the ECM names that matched the regex matches = [ecm for ecm in ecm_names_list if nom_regex(ecm)] # Construct list of all ECM names that matched the inverse regex matches_inv = [ecm for ecm in ecm_names_list if nom_regex_inv(ecm)] # Add all ECMs that do NOT match the inverse regex search term(s) to # the list of matched ECMs if len(matches_inv) > 0: matches.extend([x for x in ecm_names_list if ( x not in matches_inv and x not in matches)]) # Construct list of all ECM names that were not matched non_matches = [ecm for ecm in ecm_names_list if ecm not in matches] # If the list is empty, running the above process would result in # all of the ECM names matching, which is the opposite of what is # desired for the case with no keywords, thus this case of no # search terms is handled explicitly else: matches = [] non_matches = ecm_names_list return matches, non_matches
dcc7dd512db184c7e09a91b0fed41151b0b420bd
645,607
from typing import Dict def compute_frequencies(text: str, k: int) -> Dict[str, int]: """Return the frequency array (count of each k-mer) in a given text Arguments: text {str} -- text to seacrh k {int} -- k-mer length Returns: Dict[str, int] -- dictionary of k-mers and their count within text Example: >>> compute_frequencies("ACGT", 2) {'AC': 1, 'CG': 1, 'GT': 1} """ frequency_dict = {} for i in range(len(text) - k + 1): pattern = text[i:i+k] frequency_dict[pattern] = frequency_dict.get(pattern, 0) + 1 return frequency_dict
40661b9db3b83fe49b9e6a47b182270875d9aded
504,788
def incr(num): """ Increment its argument by 1. :param num: number :return: number """ return num + 1
fd6e1868329b056edeaeab2741030f973a4ea704
679,117
import collections def convertindexlist(l): """convert the list of decoded ASCII headerinformation to a namedtupel Args: l (list): decoded header information Returns: namedtupel: headerinfo """ recordinfo = collections.namedtuple('recordinfo', 'y m d h fc lvl grid name exp prec initval') return recordinfo(y=l[0], m=l[1], d=l[2], h=l[3], fc=l[4], lvl=l[5], grid=l[6], name=l[7], exp=l[8], prec=l[9], initval=l[10])
30b2965539297a658c9914f9c4fc0cfbd90f1b78
295,711
from typing import Callable from typing import Tuple from typing import Any def recurse_eval(path: str, data: dict, fn: Callable) -> Tuple[str, Any]: """ Given a `path` such as `a.b.0.split(' ')` this function traverses the `data` dictionary using the path, stopping whenever a key cannot be found in the `data`. `fn` is then applied to the extracted data and the result is returned along with the part of the path which was traversed. In the following example, `a.b.0` is identified as the path to return since `.split()` is not an item in `data`. >>> recurse_eval( ... path="a.b.0.split(' ')", ... data={"a": {"b": [{"$eval": "'hey ' * 2"}]}}, ... fn=lambda node, _ : eval(node["$eval"]) if "$eval" in node else node ... ) ('a.b.0', 'hey hey ') Parameters ---------- path The path to fetch from in the data data Dictionary which should be traversed using the path fn function to call with the fetched data as parameter Returns ------- Tuple[str, Any] The path and the value after applying the `fn` """ tmp = data current_path = [] path = path.replace("[", ".[") for key in path.split("."): original_key = key if "[" in key: key = key.replace("[", "").replace("]", "").replace('"', "") try: tmp = tmp[key] current_path.append(original_key) except TypeError: try: tmp = tmp[int(key)] current_path.append(original_key) except ValueError: break except: break return ".".join(current_path).replace(".[", "["), fn(tmp, data)
636661654f245869b045a19cf1750d73514e9a28
81,208
def list_arg(raw_value): """argparse type for a list of strings""" return str(raw_value).split(",")
38e411618a0508c3639802fe67615aa29df8fcb2
684,609
def _tag_in_string(source: str, start: str, end: str) -> bool: """Determine if a specific tag is in a string. :param source: String to evaluate :param start: String that marks the start of a tag :param end: String that marks the end of a tag :returns: Decision """ if start not in source: return False if end not in source[source.index(start) + len(start) :]: return False return True
108dfa060113fdaed13b0dafbeaf86715ba7e974
529,737
import hashlib def caclulate_hash(path: str) -> str: """Calculate md5 hash of the file in binary mode Args: path (str): Path to the file to be hashed Returns: str: The calculated hash """ hash_md5 = hashlib.md5() with open(path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest()
5d4fd285a0921aaafe5d2387bfb7856d0e5d8621
577,592