content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def get_input_shape(inputs): """Gets the shape of the input excluding the batch size""" return tuple(inputs.size())[1:]
5e833cf59a50b5397072321166d8173dfde4f2ab
459,860
def dual_averaging(t0=10, kappa=0.75, gamma=0.05): """ Dual Averaging is a scheme to solve convex optimization problems. It belongs to a class of subgradient methods which uses subgradients (which lie in a dual space) to update states (in primal space) of a model. Under some conditions, the averages of generated parameters during the scheme are guaranteed to converge to an optimal value. However, a counter-intuitive aspect of traditional subgradient methods is "new subgradients enter the model with decreasing weights" (see reference [1]). Dual Averaging scheme resolves that issue by updating parameters using weights equally for subgradients, hence we have the name "dual averaging". This class implements a dual averaging scheme which is adapted for Markov chain Monte Carlo (MCMC) algorithms. To be more precise, we will replace subgradients by some statistics calculated at the end of MCMC trajectories. Following [2], we introduce some free parameters such as ``t0`` and ``kappa``, which is helpful and still guarantees the convergence of the scheme. **References:** 1. *Primal-dual subgradient methods for convex problems*, Yurii Nesterov 2. *The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo*, Matthew D. Hoffman, Andrew Gelman :param int t0: A free parameter introduced in reference [2] that stabilizes the initial steps of the scheme. Defaults to 10. :param float kappa: A free parameter introduced in reference [2] that controls the weights of steps of the scheme. For a small ``kappa``, the scheme will quickly forget states from early steps. This should be a number in :math:`(0.5, 1]`. Defaults to 0.75. :param float gamma: A free parameter introduced in reference [1] which controls the speed of the convergence of the scheme. Defaults to 0.05. :return: a (`init_fn`, `update_fn`) pair. """ def init_fn(prox_center=0.): """ :param float prox_center: A parameter introduced in reference [1] which pulls the primal sequence towards it. Defaults to 0. :return: initial state for the scheme. """ x_t = 0. x_avg = 0. # average of primal sequence g_avg = 0. # average of dual sequence t = 0 return x_t, x_avg, g_avg, t, prox_center def update_fn(g, state): """ :param float g: The current subgradient or statistics calculated during an MCMC trajectory. :param state: Current state of the scheme. :return: new state for the scheme. """ x_t, x_avg, g_avg, t, prox_center = state t = t + 1 # g_avg = (g_1 + ... + g_t) / t g_avg = (1 - 1 / (t + t0)) * g_avg + g / (t + t0) # According to formula (3.4) of [1], we have # x_t = argmin{ g_avg . x + loc_t . |x - x0|^2 }, # hence x_t = x0 - g_avg / (2 * loc_t), # where loc_t := beta_t / t, beta_t := (gamma/2) * sqrt(t). x_t = prox_center - (t ** 0.5) / gamma * g_avg # weight for the new x_t weight_t = t ** (-kappa) x_avg = (1 - weight_t) * x_avg + weight_t * x_t return x_t, x_avg, g_avg, t, prox_center return init_fn, update_fn
0c0a44d0f8a73e28e0dd9fc11c31c8343db9cffc
203,558
from typing import Dict from typing import Tuple def deserialize_fh(doc: Dict) -> Tuple[str, str]: """Get the file identifier and optional target path from an input file argument. Parameters ---------- doc: dict Input file argument value. Returns ------- string, string """ try: value = doc['value'] return value['fileId'], value.get('targetPath') except KeyError as ex: raise ValueError('missing element {}'.format(str(ex)))
2d36cbde4479b2efdb1c031be37d5c384d054273
477,752
from typing import Tuple from typing import Dict from typing import Optional def _prune_tree( tree: Tuple[Dict, Dict, Optional[str]] ) -> Tuple[Tuple[Dict, Dict, Optional[str]], int]: """ Remove any branches that don't have any URLs under them. :param tree: branch to prune :return: A 2-tuple containing (the pruned branch, number of urls below) """ num_urls = 0 for named_nodes in tree[1]: num_urls += len(named_nodes[1]) if tree[0]: to_delete = [] for nmsp, branch in tree[0].items(): branch, branch_urls = _prune_tree(branch) if branch_urls == 0: to_delete.append(nmsp) num_urls += branch_urls for nmsp in to_delete: del tree[0][nmsp] return tree, num_urls
bd023e8503fe50e42e676a3af77f24f2b7b9a932
357,747
def air2vac(air): """Convert from in-air wavelengths to vacuum wavelengths. Based on Allen's Astrophysical Quantities. :param air: The in-air wavelengths. :returns vac: The corresponding vacuum wavelengths. """ ss = 1e4 / air vac = air * (1 + 6.4328e-5 + 2.94981e-2 / (146 - ss**2) + 2.5540e-4 / (41 - ss**2)) return vac
5f66449ce2ea7a442cdf31fccb7f407eed50a0a2
470,389
import re def count_n_reps_or_n_chars_following(text, n=1, char=""): """ Counts how often characters are repeated for n times, or followed by char n times. text: UTF-8 compliant input text n: How often character should be repeated, defaults to 1 char: Character which also counts if repeated n times """ if char == "": findall_n_reps = re.findall(rf'([\s\S])(?=\1{{{n}}})', text) return len(findall_n_reps) else: if char in '<([{\\^-=$!|]})?*+.>': char = f'\\{char}' print(char) findall_n_chars_following = re.findall(rf'([\s\S])(?=\1{{{n}}})|([\s\S])(?={char}{{{n}}})', text) print(findall_n_chars_following) return len(findall_n_chars_following)
9d589ceea4092c5d951d92ef9be09bc3d84ad764
492,112
def safe_attribute_compare(this, that, attribute_name): """Compares the named attribute on the objects this and that. The attribute need not exist on either object. Returns True if -- (a) Neither object has the attribute, OR (b) Both objects have the attribute and the value of the attr is equal. Returns False if -- (c) One object has the attribute and the other doesn't, OR (d) Both objects have the attribute and the value of the attr differs. """ this_has = hasattr(this, attribute_name) that_has = hasattr(that, attribute_name) # For starters, both objects must have the attr. equal = (this_has == that_has) if equal and this_has: # Both this and that have the attr. This covers cases (b) and (d). equal = (getattr(this, attribute_name) == getattr(that, attribute_name)) #else: # They're not equal or they're both False. In both cases, there's # nothing further to be done; equal already holds the correct value. # This is cases (a) and (c). return equal
62a137053a06c1bbf80ace284c90e8f8c9b2be74
526,492
def justify_indel(start, end, indel, seq, justify): """ Justify an indel to the left or right along a sequence 'seq'. start, end: 0-based, end-exclusive coordinates of 'indel' within the sequence 'seq'. Inserts denote the insertion point using start=end and deletions indicate the deleted region with (start,end). indel: indel sequence, can be insertion or deletion. seq: a larger sequence containing the indel. Can be a fragment from the genome. justify: Which direction to justify the indel ('left', 'right'). """ # No justification needed for empty indel. if len(indel) == 0: return start, end, indel if justify == 'left': while start > 0 and seq[start - 1] == indel[-1]: seq_added = seq[start - 1] indel = seq_added + indel[:-1] start -= 1 end -= 1 elif justify == 'right': while end < len(seq) and seq[end] == indel[0]: seq_added = seq[end] indel = indel[1:] + seq_added start += 1 end += 1 else: raise ValueError('unknown justify "%s"' % justify) return start, end, indel
95da38d03f094e18a145d7e8f48ecb10710b6b9e
447,985
def devilry_grade_short(assignment, points): """ Renders a grade as in its shortest form - no information about passed or failed, only the grade text (E.g.: "passed", "8/10", "A"). Args: assignment: An :class:`devilry.apps.core.models.Assignment` object. points: The points to render the grade for. """ return { 'assignment': assignment, 'grade': assignment.points_to_grade(points=points), 'is_passing_grade': assignment.points_is_passing_grade(points=points), }
b118083155a253fa57b24022b24fa42163e355d2
534,043
def post_process_human_data(human_data): """Remove unwanted space and solves double shad(ཉིས་ཤད་) split cases Args: human_data (str): human segmented data Returns: str: clean human segmented data """ human_data = human_data.replace('། །', '།།') human_data = human_data.replace(' ', ' ') return human_data
ea4624b47a068d2de2b7f85e0bf8132bbb91e06e
642,289
def ensure_three_decimal_points_for_milliseconds_and_replace_z( datetimestring: str, ) -> str: """ To convert SciHub Datetimes to Python Datetimes, we need them in ISO format SciHub Datetimes can have milliseconds of less than 3 digits therefore we pad them with zeros to the right to make 3 digits, as required by `datetime` We also need to replace Z at the end with +00:00 :param datetimestring: Str representing a SciHub Datetime :returns: Str representing a correctly padded SciHub Datetime """ datetimestring_stripped = datetimestring.replace("Z", "") try: number_of_decimal_points = len(datetimestring_stripped.split(".")[1]) if number_of_decimal_points < 3: datetimestring_stripped = ( f"{datetimestring_stripped}{(3 - number_of_decimal_points) * '0'}" ) except IndexError: datetimestring_stripped = f"{datetimestring_stripped}.000" return f"{datetimestring_stripped}+00:00"
0b640f19681f7eed0490bb275be1dda2ae772552
106,668
from sympy.ntheory.modular import crt def _decipher_rsa_crt(i, d, factors): """Decipher RSA using chinese remainder theorem from the information of the relatively-prime factors of the modulus. Parameters ========== i : integer Ciphertext d : integer The exponent component factors : list of relatively-prime integers The integers given must be coprime and the product must equal the modulus component of the original RSA key. Examples ======== How to decrypt RSA with CRT: >>> from sympy.crypto.crypto import rsa_public_key, rsa_private_key >>> primes = [61, 53] >>> e = 17 >>> args = primes + [e] >>> puk = rsa_public_key(*args) >>> prk = rsa_private_key(*args) >>> from sympy.crypto.crypto import encipher_rsa, _decipher_rsa_crt >>> msg = 65 >>> crt_primes = primes >>> encrypted = encipher_rsa(msg, puk) >>> decrypted = _decipher_rsa_crt(encrypted, prk[1], primes) >>> decrypted 65 """ moduluses = [pow(i, d, p) for p in factors] result = crt(factors, moduluses) if not result: raise ValueError("CRT failed") return result[0]
17200b88d545d8c3d16a191e6e607389619694a9
15,190
import ipaddress def get_bridge_domain(aci_subnets, eip): """ function to return a bridge domain name from tenant, app and end device IP :param aci_subnets: List of aci subnets :param eip: ACO end device IP :return: Bridge domain name """ for bd_name, networks in aci_subnets.items(): for network in networks: if ipaddress.ip_address(eip) in ipaddress.IPv4Network(network.addr, strict=False): return bd_name return None
5ae315fa6baa3c1575a87074aa6cf684d4e0d2e7
123,022
def deep_merge(first: dict, second: dict) -> dict: """ Dict deep merge function Merge recursive second dict in first and return it >>> deep_merge({},{}) {} >>> deep_merge({'key': 'value'}, {'key': 'value'}) {'key': 'value'} >>> deep_merge({'key': 'value'}, {'key': 'new_value', 'second_key': 'second_value'}) {'key': 'new_value', 'second_key': 'second_value'} >>> deep_merge( ... {'key': 'value', 'dict_key': {'nested_key': 'nested_value'}}, ... { ... 'key': 'value', ... 'dict_key': {'nested_key': 'new_nested_value', 'nested_dict_key': {}}, ... 'flat_key': 'flat_value' ... } ... ) {'key': 'value', 'dict_key': {'nested_key': 'new_nested_value', 'nested_dict_key': {}}, 'flat_key': 'flat_value'} """ for key in second: if key in first: if isinstance(first[key], dict) and isinstance(second[key], dict): deep_merge(first[key], second[key]) else: first[key] = second[key] else: first[key] = second[key] return first
8306b925f8c7b69044135e0e8387d082553bda4e
633,161
from typing import Union from pathlib import Path from typing import Dict from typing import List from typing import Tuple import re def parse_rules(filename: Union[str, Path]) -> Dict[str, List[Tuple[str, int]]]: """ Parse the input text into a nice formatted set of rules. We assume: * Each type of bag appears on the left hand side of a rule * The rule list does not contain (directed) cycles * Each bag appears only once on the left hand side of a rule Input format: [BAG TYPE] bags contain (no other bags|[N] [BAG TYPE] bags?[, [N] [BAG TYPE] bags?...]). Output format: key: Left hand side bag type value: List of pairs (bag type, number of bags contained in parent bag of this type) """ rules: Dict[str, List[Tuple[str, int]]] = {} with open(filename, "rt") as infile: for line in infile: left, right = line.strip("\n.").split("contain") left = left.strip()[: -len(" bags")] right = right.strip() if right == "no other bags": rules[left] = [] else: children = [ (bag_type, int(bag_num)) for bag_num, bag_type in re.findall(r"(\d+) ([a-z ]+) bags?", right) ] if not children: raise ValueError(f"Something is wrong in regex for {right}") rules[left] = children return rules
7865cd9396c7eb0d6189a4763de59ef0ff235357
250,832
from typing import List from typing import Tuple def process_results(search_results: List) -> Tuple[dict, int]: """ Prints the search results in an appropriate format and maps them in a dictionary. """ print("Showing the most relevant fetched results.\n") result_index = {} for i, show in enumerate(search_results): # search_results is a list of tv show objects # map the shows result_index[i + 1] = show # print the name, year and country(country given to us in list form) if not show["origin_country"]: show["origin_country"].append("N/A") if not show["first_air_date"]: show["first_air_date"] = "N/A" print( f"{i+1}. {show['name']}, {show['first_air_date'][:4]}, \ {show['origin_country'][0]}" ) return result_index, len(result_index)
61956276f94dccc5d62d116505140fdf4dae8af9
541,795
def parse_key_value_config(config_value): """ Parses out key-value pairs from a string that has the following format: key: value, key2: value, key3: value :param string config_value: a string to parse key-value pairs from :returns dict: """ if not config_value: return {} key_values_unparsed = config_value.split(',') key_values = {} for key_value_pair in key_values_unparsed: key, value = key_value_pair.strip().split(':') key_values[key.strip()] = value.strip() return key_values
f00c79d85e71364db58bfb5b91fb2b8654ebe75c
691,436
def create_intersection(whole_dict, sub_dict): """ Reduces a dictionary to have the same keys as an another dict. :param whole_dict: dict, the dictionary to be reduced. :param sub_dict: dict, the dictionary with the required keys. :return: dict, the reduced dict. """ reduced_dict = {} for parameter in sub_dict.keys(): if whole_dict.get(parameter, None) is None: return {} reduced_dict[parameter] = whole_dict[parameter] return reduced_dict
da4960dd56f2c0f70cee871946e8a0b740c693d8
553,723
def empty_cells(keyword): """``empty-cells`` property validation.""" return keyword in ('show', 'hide')
2617db5346b269e8c443a292d457dedc76bc7f55
551,288
def get_runner_image_name(base_images_project, test_image_suffix): """Returns the runner image that should be used, based on |base_images_project|. Returns the testing image if |test_image_suffix|.""" image = f'gcr.io/{base_images_project}/base-runner' if test_image_suffix: image += '-' + test_image_suffix return image
1e8544b1441a243dceff9599a50d5deec0fe9fdb
176,893
import json def GetError(error): """Returns a ready-to-print string representation from the http response. Args: error: A string representing the raw json of the Http error response. Returns: A ready-to-print string representation of the error. """ status = error.response.status code = error.response.reason try: data = json.loads(error.content) message = data['error']['message'] except ValueError: message = error.content return ('ResponseError: status=%s, code=%s, message=%s' % (status, code, message))
81ccd73acf1f56d2ec1de40f71f61c770f6ea591
433,055
def get_swap_targets(node): """ All pairs of columns that do not have the same types in every cell """ pairs = [] n_col = node.table.n_col for i in range(n_col): col_i = node.table.get_col(i) for j in range(i + 1, n_col): col_j = node.table.get_col(j) same = True for ci, cj in zip(col_i, col_j): if not ci.t == cj.t: same = False break if not same: pairs.append((i, j)) return pairs
372d2fed3c2e1544f20f69a3166b5773f954736e
701,233
def get_weights(G): """ Given a graph G, returns a list of weights. If the graph is unweighted, returns a list of 1s the same length as the number of edges. """ try: # asssumes weight as an attribute name means graph is weighted. weights = G.es['weight'] except KeyError: #unweighted means all weights are 1. weights = [1 for e in G.es] return weights
db092208d20951966f956fe72244c8aa62cde27b
403,193
def is_hcj(character): """Test if a single character is a HCJ character. HCJ is defined as the U+313x to U+318x block, sans two non-assigned code points. """ return 0x3131 <= ord(character) <= 0x318E and ord(character) != 0x3164
c3c8ffc8cd250a9432b41c51c42ea6582e300f69
377,979
def get_sequential(x, layers): """Creates a sequential chain from given layers. """ for layer in layers: x = layer(x) return x
23f18c095cb39db39b21f9c05ad92235bb097baa
149,233
def increment_dictionary_with_dictionary(dict_a, dict_b): """ Function to add the values from one dictionary to the values from the same keys in another. Args: dict_a: First dictionary to be added dict_b: Second dictionary to be added Return: The combined dictionary """ all_keys = set(dict_a.keys()) # in Python 3 it can be simplified as `all_keys = set(a)` all_keys.update(dict_b.keys()) # in Python 3 it can be simplified as `all_keys.update(b)` #print(all_keys) #print(dict_a) #print('+++++++++++++++++') #print(dict_b) dict_c = {k: dict_a.get(k, 0) + dict_b.get(k, 0) for k in all_keys} #print('============') #print(dict_c) # converting dict_a.items() and dict_b.items() to list for python 3 #return dict(list(dict_a.items()) + list(dict_b.items()) + [(k, dict_a.get(k,0) + dict_b.get(k,0)) for k in set(dict_b) & set(dict_a)]) return dict_c
8d5ee0c905c61a8615e9ab177ed48494387c0c49
617,379
def load_fs_lut(filename): """ Reads a label lookup-table from file. File is expected to define the anatomical name and color for each label ID. Each line in the file should have the format: ``` ID AnatomicalName R G B ``` Parameters: filename (str): File to load. Returns: dict: Label lookup dictionary. """ label_table = {} with open(filename, 'r') as file: for line in file: line = line.rstrip() if not line or line[0] == '#': continue tokens = line.split() sid = int(tokens[0]) name = tokens[1] label_table[sid] = {'name': name} if len(tokens) > 2: label_table[sid]['color'] = [int(c) for c in tokens[2:5]] return label_table
e50f25d21449f607ffb9f3bde37a74107ebe39d3
284,627
from typing import Iterable from typing import Sequence from pathlib import Path from typing import Optional import csv def write_list_to_csv( data: Iterable[Sequence], file_path: Path, parents=False, exist_ok=True, has_header: bool = True, headers: Optional[Sequence[str]] = None, ) -> int: """ Writes an iterator of lists to a file in csv format. Parameters ---------- data : Iterable[Sequence] [description] file_path : Path [description] parents : bool, optional [description], by default False exist_ok : bool, optional [description], by default True has_header : bool, optional First row of supplied data is the header, by default True headers : Optional[Sequence[str]], optional Headers to use if not supplied in data, by default None Returns ------- int Rows saved, not including a header Raises ------ ValueError Number of items in a row does not match number of headers. """ if parents: file_path.parent.mkdir(parents=parents, exist_ok=exist_ok) with file_path.open("w", encoding="utf8", newline="") as file_out: writer = csv.writer(file_out) iterable_data = iter(data) if has_header: header_row = next(iterable_data) writer.writerow(header_row) else: if headers is not None: header_row = headers writer.writerow(header_row) else: header_row = [] total_count = 0 for count, item in enumerate(iterable_data): if count > 0 and has_header: if len(header_row) > 0 and len(header_row) != len(item): raise ValueError( f"Header has {len(header_row)} but row has {len(item)} items" ) writer.writerow(item) total_count = count return total_count + 1
fb9b55c9bc64ce3744fbe8bba75a783eb0368fe1
78,776
def get_db_path(spider_dir, db_id): """ Return path to SQLite database file. Args: spider_dir: path to SPIDER benchmark db_id: database identifier Returns: path to SQLite database file """ return f'{spider_dir}/database/{db_id}/{db_id}.sqlite'
9069b673df1b3c929c249319339a84eaaf398c33
699,070
def normalize(X, m, s): """ Normalizes (standardizes) a matrix X is the numpy.ndarray of shape (d, nx) to normalize d is the number of data points nx is the number of features m is a np.ndarray of shape (nx,) that contains the mean of features of X s is a np.ndarray of shape (nx,) that contains the stdv of features of X Returns: The normalized X matrix """ return (X - m) / s
64c537585f8f9f73a2a589ecc93757a2e1290e25
137,505
def get_input(question, answers, default=None): """Get User Input. Args: question (str): Question to ask user (passed directly to input()) answers (str[]): List of possible answers. Must be all lowercase. default (str, optional): If specified, the answer to have if none is supplied. Defaults to None. Returns: str: The answer from the user. """ answer = None while not answer or answer not in answers: answer = input(question).lower() if answer == "" and default: answer = default return answer
8956a28c2e59eecb3e8f62d27f11a585dff68524
467,325
def read_file_contents(path): """ Reads the contents of the given file. Returns: A str, the contents of the given file. """ with open(path) as file_handle: return file_handle.read()
b3bd16d9525d451d4dbf75558086b8f53bcd7021
269,562
def fixed_asset_turnover(revenue, average_net_fixed_assets): """Computes fixed asset turnover. Parameters ---------- revenue : int or float Revenue average_net_fixed_assets : Average net fixed assets in period Returns ------- out : int or float Fixed asset turnover """ return revenue / average_net_fixed_assets
715460872842f83a9ce998369263362d38f297db
605,292
def empty_cache(max_T, labeling_with_blanks): """Create empty cache.""" return [[None for _ in range(len(labeling_with_blanks))] for _ in range(max_T)]
e1071ef35fa562f71581391c02d5f8a823025699
287,509
import torch def euler_matrix(ai, aj, ak, repetition=True): """Return homogeneous rotation matrix from Euler angles and axis sequence. ai, aj, ak : Euler's roll, pitch and yaw angles Readapted for Pytorch: some tricks going on """ si, sj, sk = torch.sin(ai), torch.sin(aj), torch.sin(ak) ci, cj, ck = torch.cos(ai), torch.cos(aj), torch.cos(ak) cc, cs = ci * ck, ci * sk sc, ss = si * ck, si * sk i = 0; j = 1; k = 2 # indexing # Tricks to create batched matrix [...,3,3] # any suggestion to make code more readable is welcome! M = torch.cat(3*[torch.cat(3*[torch.zeros(ai.shape)[..., None, None]], -1)], -2) if repetition: M[..., i, i] = cj M[..., i, j] = sj * si M[..., i, k] = sj * ci M[..., j, i] = sj * sk M[..., j, j] = -cj * ss + cc M[..., j, k] = -cj * cs - sc M[..., k, i] = -sj * ck M[..., k, j] = cj * sc + cs M[..., k, k] = cj * cc - ss else: M[..., i, i] = cj * ck M[..., i, j] = sj * sc - cs M[..., i, k] = sj * cc + ss M[..., j, i] = cj * sk M[..., j, j] = sj * ss + cc M[..., j, k] = sj * cs - sc M[..., k, i] = -sj M[..., k, j] = cj * si M[..., k, k] = cj * ci return M
bb3ed6d3972bb5222b2aba59c174c5f5877ab093
290,719
def print_targets(targets, width, indent=0): """Print a list of targets.""" content = "" for target in targets: content += " " * indent content += target.print_name(width) content += f"{target.print_help()}\n" return content
8e4653c9194261571601309495c7151e6b53d122
290,020
def mangle_name(name, prefix='', postfix=''): """ "Mangles" *name* by putting a *prefix* and *postfix* around it. :param name: name to mangle :param prefix: *optional* - defaults to '' - prefix to put at the beginning of the name to mangle it :param postfix: *optional* - defaults to '' - postfix to put at the ending of the name to mangle it :return: prefix + name + postfix """ return prefix + name + postfix
c00aa345e5d8522846a31316eb18b0737e4573d2
256,580
import re def compress(text): """ Delete all whitespace characters except those followed by an upper or lowercase letter. :param text: The text to be compressed. :return: The compressed text. """ return re.sub(r'\s+(?![a-zA-Z])', '', text)
e5b0a8930a9e33219a5545e9b5628e606b3f08a8
491,883
import collections def get_duplicates(iterable): """Return a set of the elements which appear multiple times in iterable.""" counter = collections.Counter(iterable) return {key for key, count in counter.items() if count > 1}
a1c75047da431d9701201852bda3178765048a87
82,438
def rgb_to_hex(rgb): """ Convert RGB tuple to hex triplet """ rgb = tuple([bit*256 for bit in rgb]) # (0,1) -> (0, 256) return'#%02x%02x%02x' % rgb
9ca16d8f9bf1603ac42ab62a631eeabb42d94284
217,130
def user_bool(prompt: str) -> bool: """Get a boolean from the user""" lookup = {"y": True, "yes": True, "n": False, "no": False} options = "[y/n]: " value = input(f"{prompt} {options} ").lower().strip() while True: if value in lookup: return lookup[value] print(f"Please enter {options}") value = input(f"{prompt}: ").lower().strip()
2cff83a980b15ca703e56dcbf85299a70f0a6ae9
479,140
def _create_neighbor_dict(codons): """ Create minimum neighbor distances for each amino acid given the genetic code. Parameters ---------- codons : dict dictionary mapping codons to single-letter amino acid codes Returns ------- neighbor_dict : dict dictionary mapping all possible amino acid pairs (i,j) and (j,i) to their minium base pair difference. keys are tuples (e.g. ("A","V"), ("Q","R"); values are ints. """ neighbor_dict = {} codon_list = list(codons.keys()) for i in range(len(codon_list)): a = codon_list[i] for j in range(i,len(codon_list)): b = codon_list[j] # Count number of base-pair differences between these two dist = sum([a[k] != b[k] for k in range(len(a))]) # If this new distance is less than the previously observed distance # between this amino acid pair, record the new distance try: if dist < neighbor_dict[(codons[a],codons[b])]: neighbor_dict[(codons[a],codons[b])] = dist neighbor_dict[(codons[b],codons[a])] = dist # If we haven't seen pair before, record that distance except KeyError: neighbor_dict[(codons[a],codons[b])] = dist neighbor_dict[(codons[b],codons[a])] = dist return neighbor_dict
127ea6faf41dba526c3d7bcb43a1095560445c2c
616,291
import itertools def enumerate_sqs(structure, subl_model, scale_volume=True, skip_on_failure=False): """ Return a list of all of the concrete Structure objects from an abstract Structure and concrete sublattice model. Parameters ---------- structure : AbstractSQS SQS object. Must be abstract. subl_model : [[str]] List of strings of species names, in the style of ESPEI `input.json`. This sublattice model can be of higher dimension than the SQS, e.g. a [["Al", "Fe", "Ni"]] for a fcc 75/25 binary SQS will generate the following Structures: Al0.75Fe0.25, Al0.75Ni0.25 Fe0.75Al0.25, Fe0.75Ni0.25 Ni0.75Al0.25, Ni0.75Fe0.25 *Note that the ordering of species the sublattice model does not matter!* scale_volume : bool If True, scales the volume of the cell so the ions have at least their minimum atomic radii between them. skip_on_failure : bool If True, will skip if the sublattice model is lower order and return [] instead of raising Returns ------- [PRLStructure] List of all concrete PRLStructure objects that can be created from the sublattice model. """ if len(subl_model) != len(structure.sublattice_model): raise ValueError('Passed sublattice model ({}) does not agree with the passed structure ({})'.format(subl_model, structure.sublattice_model)) possible_subls = [] for subl, abstract_subl in zip(subl_model, structure.sublattice_model): subls = itertools.product(subl, repeat=len(abstract_subl)) possible_subls.append(subls) unique_subl_models = itertools.product(*possible_subls) # create a list of unique concrete structures with the generated sublattice models unique_sqs = [] unique_configurations_occupancies = [] for model in unique_subl_models: proposed_sqs = structure.get_concrete_sqs(model, scale_volume) proposed_config_occupancy = (proposed_sqs.sublattice_configuration, proposed_sqs.sublattice_occupancies) if proposed_config_occupancy not in unique_configurations_occupancies: unique_configurations_occupancies.append(proposed_config_occupancy) unique_sqs.append(proposed_sqs) return unique_sqs
9893a690643f8c731e065518e1fe2382c0e97486
382,620
def filter_true(c): """Filter solved conflicts""" return c.is_resolved is True
01b1d26dcf27660d5aace962f66e9af5f76269f6
348,139
def ncnv(self, kstop="", dlim="", itlim="", etlim="", cplim="", **kwargs): """Sets the key to terminate an analysis. APDL Command: NCNV Parameters ---------- kstop Program behavior upon nonconvergence: 0 - Do not terminate the analysis if the solution fails to converge. 1 - Terminate the analysis and the program execution if the solution fails to converge (default). 2 - Terminate the analysis, but not the program execution, if the solution fails to converge. dlim Terminates program execution if the largest nodal DOF solution value (displacement, temperature, etc.) exceeds this limit. Defaults to 1.0E6 for all DOF except MAG and A. Defaults to 1.0E10 for MAG and A. itlim Terminates program execution if the cumulative iteration number exceeds this limit (defaults to infinity). etlim Terminates program execution if the elapsed time (seconds) exceeds this limit (defaults to infinity). cplim Terminates program execution if the CPU time (seconds) exceeds this limit (defaults to infinity). Notes ----- Sets the key to terminate an analysis if not converged, or if any of the following limits are exceeded for nonlinear and full transient analyses: DOF (displacement), cumulative iteration, elapsed time, or CPU time limit. Applies only to static and transient analyses (ANTYPE,STATIC and ANTYPE,TRANS). Time limit checks are made at the end of each equilibrium iteration. This command is also valid in PREP7. """ command = f"NCNV,{kstop},{dlim},{itlim},{etlim},{cplim}" return self.run(command, **kwargs)
5b02ff73e807f3dc5b6ce620150040ad81acea1f
442,216
def StripDoubleUnderscorePrefixes(text: str) -> str: """Remove the optional __ qualifiers on OpenCL keywords. The OpenCL spec allows __ prefix for OpenCL keywords, e.g. '__global' and 'global' are equivalent. This preprocessor removes the '__' prefix on those keywords. Args: text: The OpenCL source to preprocess. Returns: OpenCL source with __ stripped from OpenCL keywords. """ # List of keywords taken from the OpenCL 1.2. specification, page 169. replacements = { "__const": "const", "__constant": "constant", "__global": "global", "__kernel": "kernel", "__local": "local", "__private": "private", "__read_only": "read_only", "__read_write": "read_write", "__restrict": "restrict", "__write_only": "write_only", } for old, new in replacements.items(): text = text.replace(old, new) return text
01985ef505c81de8cf0ab8fe8b8d1fe2cbd0c197
342,173
def digits_sum(n): """ Returns sum of digits of number n. For example: digits_sum(245) = 2 + 4 + 5 :param n: n :return: sum of digits """ return sum(int(ch) for ch in str(n))
12d3f1e35273d6b2d82938a0409f67ec2bea3865
175,403
def check_shots_vs_bounds(shot_dict, mosaic_bounds, max_out_of_bounds = 3): """Checks whether all but *max_out_of_bounds* shots are within mosaic bounds Parameters ---------- shot_dict : dict A dictionary (see czd_utils.scancsv_to_dict()) with coordinates of all shots in a .scancsv file: {shot: [x_coords, y_coords], ...} mosaic_bounds : list A list of bounds to a .Align file (see get_mos_bounds()): [min_x, max_x, min_y, max_y] max_out_of_bounds : int, optional Max number of out-of-bounds shots allowed for a \ 'match' between mosaic and .scancsv. The default is 3. Returns ------- Boolean True or False, depending on whether all but *max_out_of_bounds* \ shots are within mosaic bounds """ total_out_of_bounds = 0 min_x, max_x, min_y, max_y = mosaic_bounds for eachcoords in shot_dict.values(): if not min_x <= eachcoords[0] <= max_x or not min_y <= eachcoords[1] <= max_y: total_out_of_bounds += 1 return total_out_of_bounds <= max_out_of_bounds
de36f7f2a32a2a7120236d0bd5e43520de0c7ea5
707,647
def pubkey_from_point_to_bytes(x: int, y: int, compressed: bool = True) -> bytes: """ Constructs pubkey from its x y coordinates """ x_bytes = x.to_bytes(32, byteorder='big') y_bytes = y.to_bytes(32, byteorder='big') if compressed: parity = y & 1 return (2 + parity).to_bytes(1, byteorder='big') + x_bytes else: return b'\04' + x_bytes + y_bytes
27e2252047656e7be29159df22dd76758881ccfb
184,016
import random def random_list(xs): """ Return a random elements of a list. """ return random.choice(xs)
f679b0446e53ab37c430e01df91a6770caedee2b
137,461
def ppebwr_round_probs(L): """ Return 'round probabilities' for use in ppebwr method. These are just scaled precinct sizes. """ V = float(sum([x[0] for x in L])) return [x[0]/V for x in L]
133894b44ac5e8f03ef795c8ea453d482becb97c
310,806
def head(iterable): """ Gets the first element of the iterable. :param iterable: A non-empty iterable. If it is empty a StopIteration error will be raised :type x: iterable **(A)** :returns: A """ it = iter(iterable) return next(it)
a87f77deefd31b4620c13abf136dbb87ba95e619
582,587
def get_column(square_size, square, i): """Returns an i-th column of a square.""" expected_modulo = i % square_size col = [] for i, item in enumerate(square): if i % square_size == expected_modulo: col.append(item) return col
7d5a4d04408947f3f3d3db1ad8c00a13957c9dd2
558,227
def format_date_c19(date_in): """ Formats "m/d/y" to "yyyymmdd" """ month, day, year = date_in.split('/') return '20%s%02i%02i' % (year, int(month), int(day))
d4516c6d02e00b4de8b5a1cfbf760e8ca3085aec
449,126
def _bounds_vars_for_variable(ncfile, ncvar): """Return a list of names for a variable and its bounds""" variables = [ncvar.varname] if "cell_methods" not in ncvar.attrs: # no cell methods, so no need to look for bounds return variables # [cell methods] is a string attribute comprising a list of # blank-separated words of the form "name: method" cell_methods = iter(ncvar.attrs["cell_methods"].split()) # for the moment, we're only looking for a time mean for dim, method in zip(cell_methods, cell_methods): if not (dim[:-1] == "time" and method == "mean"): continue bounds_var = ncfile.ncvars["time"].attrs.get("bounds") if bounds_var is not None: variables.append(bounds_var) return variables
89546d31d8ec3921ea3a9ff2af13ee28e0ac8dd5
413,550
def format_time(t, format_spec='dhms'): """ Return a formatted time string describing the duration of the input in seconds in terms of days,hours,minutes and seconds. """ if format_spec == '': sb, mb, hb, db = True, True, True, True else: sb = 's' in format_spec mb = 'm' in format_spec hb = 'h' in format_spec db = 'd' in format_spec t = round(t, 2) if t >= 60 and (mb or hb or db): m = t // 60 s = t - m*60 if m >= 60 and (hb or db): h = m // 60 m = m - h*60 if h >= 24 and db: d = h // 24 h = h - d*24 else: d = 0 else: h,d = 0,0 else: s = t m,h,d = 0,0,0 time_str = "" #if format_spec == 's': # time_str += "{}s".format(t) if format_spec == '': db = not d == 0 hb = not h == 0 mb = not m == 0 sb = not s == 0 if not (db or hb or mb or sb): sb = True if db: time_str += "{:.0f}d ".format(d) if hb: time_str += "{:.0f}h ".format(h) if mb: time_str += "{:.0f}m ".format(m) if sb: time_str += "{:0=4.1f}s".format(s) #return "{}d {}h {}m {}s".format(d,h,m,s) return time_str
405e9e914cca2b620704f82c08826d50af7f30ce
633,802
def sumBase(n: int, k: int) : """ Given an integer n (in base 10) and a base k, return the sum of the digits of n after converting n from base 10 to base k. After converting, each digit should be interpreted as a base 10 number, and the sum should be returned in base 10. TC : O(N) SC : O(1) n : int (integer base 10) k : int (base to be converted to) return value : int """ summation=0 while n >= k : summation = summation + n%k n=n//k print(n) return (summation + n)
43dde6a93adc53e43becae46d8821a97b9468298
201,805
def _to_hours_mins_secs(time_taken): """Convert seconds to hours, mins, and seconds.""" mins, secs = divmod(time_taken, 60) hours, mins = divmod(mins, 60) return hours, mins, secs
230d9a8abfe7f44994b245e3a992195fcdc05fdf
448,844
def window_decay(d: float, a: float): """ Only considers customers that are at most distance 'a' from the current customer. f(d) = 1/[d < a] :param d: distance (non-negative finite value) :param a: maximum distance :return: decay """ return 1 if d < a else 0
eed1c22b88355b92ac50dc1afc950ecbbddc529e
509,337
def _progress_bar(count, total): """ Get a simple one-line string representing a progress bar. Arguments: count (int): Current count. Starts at 0. total (int): Total count. Returns: pbar_string (str): The string progress bar. """ bar_len = 60 filled_len = int(round(bar_len * (count + 1) / float(total))) bar = '=' * filled_len + '-' * (bar_len - filled_len) return '[{}] {}/{}'.format(bar, count + 1, total)
31af780be8486145afc5a2a81d73eae3fb50f841
57,292
from functools import reduce def vec_sum(vec): """Return the sum of the elements of a vector.""" return reduce(lambda x, y: x + y, vec)
2288b47495f416ee7258979c4b07e19362ef1622
279,796
def normalize_commit_message(commit_message): """ Return a tuple of title and body from the commit message """ split_commit_message = commit_message.split("\n") title = split_commit_message[0] body = "\n".join(split_commit_message[1:]) return title, body.lstrip("\n")
c93d05b16a8dcc02b030e6e5b2f6b05aaac5232b
663,631
import pickle def load_network(fpath): """ Utility function to load network found at file path `fpath`. """ with open(fpath, "rb") as f: network = pickle.load(f) return network
d632fd36517ab1a001725dfe1bc1f4ff271e830d
249,727
def get_gas_color_list(gas_list, gas_color_dict=None): """Map the gas name to preset color in gas_color_dict. Use the same color for the gas to make it consistence Args: gas_list: a name of the gas gas_color_dict(optional): color dictionary to map the gas to Returns: list a lit of color value """ if gas_color_dict is None: gas_color_dict = {'PM2.5': '#0000ff', 'PM10': '#660099', 'O3': '#cc0033', 'CO': '#cc3300', 'NO2': '#669900', 'SO2': '#00ff00'} return [gas_color_dict[gas] if gas in gas_color_dict.keys() else 'royalblue' for gas in gas_list]
f15d26a8e30495b86066b392298175de62680a1e
170,632
def colored_neighbours(pattern, index, color): """ Count the number of neighbours with the color :param pattern: Pattern :param index: Index of a Polygon :param color: color :return: Int """ polygon = pattern.list_polygons[index] count = 0 for neigh_index in polygon.list_neighbours: other_polygon = pattern.list_polygons[index] if other_polygon.color_index == color: count += 1 return count
10aab0a8595b7edb9507ec34d63ed625241fa619
390,743
from itertools import product def listCombination(lists) -> list: """ 输入多个列表组成的列表,返回多列表中元素的所有可能组合 :param lists: 多个列表组成的列表 :return: 所有元素可能的组合 """ result = [] resultAppend = result.append for i in product(*lists): resultAppend(i) return result
6023cdc205b2780c5cd2cf56113d48a0675b98bf
4,973
def add_to_start(solutions, add_num): """The current function gets a list of lists and number. The function will add all sub-lists into an updated list with each sub-list added at the beginning of the list the number the function is gets""" final_list = [] if solutions is None: return None for solution in solutions: final_list.append([add_num] + solution) return final_list
531f7c38b1b55f4675a935c98f1a77ab695d9c3c
446,725
def execute_move(board, move): """Executes a move on a board and gives new board as output.""" from_square = move[0] to_square = move[1] moving_piece = board[from_square[0]][from_square[1]] board[to_square[0]][to_square[1]] = moving_piece board[from_square[0]][from_square[1]] = "" return board
01007f1ae38e954ffcbf4b6e4ac05dc880021e5e
489,605
def construct_chip_dict(gameweeks, chip_gameweeks): """ Given a dict of form {<chip_name>: <chip_gw>,...} where <chip_name> is e.g. 'wildcard', and <chip_gw> is -1 if chip is not to be played, 0 if it is to be considered any week, or gw if it is definitely to be played that gw, return a dict { <gw>: {"chip_to_play": [<chip_name>], "chips_allowed": [<chip_name>,...]},...} """ chip_dict = {} # first fill in any allowed chips for gw in gameweeks: chip_dict[gw] = {"chip_to_play": None, "chips_allowed": []} for k, v in chip_gameweeks.items(): if int(v) == 0: chip_dict[gw]["chips_allowed"].append(k) # now go through again, for any definite ones, and remove # other allowed chips from those gameweeks for k, v in chip_gameweeks.items(): if v > 0 and v in gameweeks: # v is the gameweek # check we're not trying to play 2 chips if chip_dict[v]["chip_to_play"] is not None: raise RuntimeError( "Cannot play {} and {} in the same week".format( chip_dict[v]["chip_to_play"], k ) ) chip_dict[v]["chip_to_play"] = k chip_dict[v]["chips_allowed"] = [] return chip_dict
d0e73f896fb0225cfb23f84744fb1de9b84108ea
462,964
from typing import List def di_borders(di: List[float]) -> List[float]: """Function to return the list of borders based on the directional index vector. Threshold chosen at 1.96 to have a p-value below 0.05. As the directional index is a t-value, a value below -1.96 or above 1.96 will have a p-value below 0.05. Like that we took only significative changes. Parameters ---------- di : numpy.ndarray List of the directional index computed for each bin. Returns ------- list of int: Positions in bins of the detected borders. Example ------- >>> di = [0.5, 2., 3., 4., 0.1, -3.2, -3.5, 0.] >>> print(di_borders(di)) [] >>> di = [0.5, 2., 3., 4., 0.1, -3.2, 4., -2.] >>> print(di_borders(di)) [1, 6] """ # Initiation use last value as previous one as the genome is considered as # circular. borders = [] if di[-1] < -1.96: negative = True else: negative = False # Iterates on the DI values for i, curr_di in enumerate(di): if curr_di >= 1.96 and negative: borders.append(i) negative = False if curr_di <= -1.96: negative = True return borders
d75c7ec0962ce1cce74e640835ba1a26b218887c
174,784
import collections def parse_vmx(path): """Parse the virtual machine configuration (.vmx) file and return an ordered dictionary. """ vmx = collections.OrderedDict() with open(path) as the_file: for line in the_file: line = line.strip().split('=', 1) if len(line) > 1: vmx[line[0].rstrip()] = line[1].lstrip() return vmx
8901f8bbc9e77ba201d5733a9289761290c50fb3
518,265
def apply_transformations(transformations, lines): """Applies a list of transformations to a list of lines, generating a list of 'frames'. A 'frame' in this case is a list of lines each represented as a dual number ratio.""" frames = [] nframes= len(transformations) for i in range(nframes): frames.append([]) transformation = transformations[i] for line in lines: frames[-1].append(transformation @ line) return frames
8fc5002532b0e36d5eb82844c6e33d191224d4ae
376,506
import re def is_valid_node_name(name): """ Return True if the string is a valid Python identifier in lower ASCII range, False otherwise. The regular expression match pattern is r"^[a-z_][a-z0-9_]*". """ return isinstance(name, str) and name.isidentifier() and re.fullmatch(r"^[a-z_][a-z\d_]*", name) is not None
628fc8ad2fb0e25ad60b74c5b0e9001d76eb14a0
549,181
import importlib def get_class_from_qualname(name): """ Resolve a fully qualified class name to a class, attempting any imports that need to be done along the way. :raises: ImportError or AttributeError if we can't figure out how to import it """ tokens = name.split(".") module_name = ".".join(tokens[:-1]) class_name = tokens[-1] module = importlib.import_module(module_name) class_ref = getattr(module, class_name) return class_ref
5ee7ff2113ce9bcd6951b7f6a6906879b42444c2
645,702
def missing_layers(dm_layers, ds_layers): """Find missing datamodel-layers in datasets.""" layers = [i.lower() for i in ds_layers] layers = [i for i in dm_layers if i not in layers] return layers
ec805b48405e1eb56a04dc28768da0fe1bafee7a
626,397
def title_for(title): """ Create a title from a underscore-separated string. """ return title.replace("_", " ").capitalize()
247ee399af0b7e13eb05f016890da9600a733699
469,467
def format_result(result, info): """ Formats the given resultset. @param result: the result to format. @param info: return only the header information. """ if info: return '\n'.join([ 'Domain: ' + str(result.domain), 'Request ID: ' + str(result.request_id), 'State: ' + str(result.state), 'Comment: ' + str(result.comment), 'Created: ' + str(result.created)]) else: return str(result)
f95c7ef1ba2634721e681e1b6ef4949b67c4e825
108,914
import hashlib def file_checksum(fname): """ calculates the md5 checksum of a file and returns it as hex string (0-f). warning: reads the entire file into memory. see https://stackoverflow.com/a/3431835 in case you need a memory-efficient variant... """ return hashlib.md5(open(fname, 'rb').read()).hexdigest()
ba3d2eec10f610e6ad2b36910aefcc12fa462472
481,012
def base(code): """ Return letter code for integer encoded base """ return "ACGTN-"[code]
c7eabce74a3665db0e0c1c6f1f079edbee09dffe
137,278
import socket def get_ip_using_standard_if(host='8.8.8.8'): """ Gets the ip of the 'standard interface' Source: https://stackoverflow.com/a/7335145 Args: host (str): optional ip of a host to connect to. Default is '8.8.8.8' Returns: own ip address on standard interface """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect((host, 9)) client = s.getsockname()[0] except socket.error: client = "Unknown IP" finally: del s return client
965fb8c19b212b6a5be746c2d5932e55fb951100
642,220
def table_publish_format(name, timestamp, table, data): """ Return a dictionary for use in the results publish""" table_key = str(str(name) + '&' + str(timestamp)) return [table_key, [table, data]]
c30b345c0a22d93d41d131e009613d291af1efe5
573,416
import hashlib def _GenerateTokenHash(token): """Returns a MD5 hash of a token for integrity checking.""" return hashlib.md5(token).hexdigest()
3e67a9b1b5a10674a5600d70caee5ad6b4e5df87
260,289
def findMaxElementsInDict(dictToSearch: dict[str, int]) -> list[str]: """Given dictionary in form <str: int> find strings with highest int value.""" maxOccurences = max(list(dictToSearch.values())) return [key for key, value in dictToSearch.items() if value == maxOccurences]
6be287ae50a6b72020dbf4bb54c6d3c23d72952d
340,079
def isnumber(x): """Is x a number?""" return hasattr(x, '__int__')
71328c6d273f484f32db0ae9d1794c8538444dd8
388,016
def _is_cluster_done(cluster): """Return True if the given cluster is done running.""" return bool(cluster['Status']['State'] == 'TERMINATING' or cluster['Status']['Timeline'].get('EndDateTime'))
98343b1b2f97d6826903767bed57cd420a4e32d2
162,320
def smooth_array(input, smooth): """Smooth array using exponential moving average Args: input: input data smooth: smooth factor, 0<=smooth<1 Returns: Smoothed array """ return input.ewm(alpha=(1 - smooth)).mean()
dc964700de6fc1df7d638fa9472811231d512a65
103,439
import sqlite3 def open_sqlite(input_db): """Open a SQLite database Args: input_db: Path to a SQLite database to open Returns: A connection to a SQLite database """ print("Provided Database: {}".format(input_db)) return sqlite3.connect(input_db)
7830ec140b4c03efd769f8e025f0a2a85b74303f
89,050
def id(x): """Get the memory block address of an array.""" return x.__array_interface__['data'][0]
75fbdfdbebce5a8308e1206b543b161e8034d375
363,160
def _define_tabledict_keys(header, fields, separator): """ Define the keys for the tabledict dictionary. Note: this function is only used by parse_table_from_file(). :param header: header string. :param fields: header content string. :param separator: separator character (char). :return: tabledict (dictionary), keylist (ordered list with dictionary key names). """ tabledict = {} keylist = [] if not header: # get the dictionary keys from the header of the file for key in fields: # first line defines the header, whose elements will be used as dictionary keys if key == '': continue if key.endswith('\n'): key = key[:-1] tabledict[key] = [] keylist.append(key) else: # get the dictionary keys from the provided header keys = header.split(separator) for key in keys: if key == '': continue if key.endswith('\n'): key = key[:-1] tabledict[key] = [] keylist.append(key) return tabledict, keylist
6c41aa138597ca5b0915df0409381ea3caa17d94
25,793
def testdat(testdir): """Path to the testdat directory""" return testdir / "testdat"
d7c278fba718164d50863e3fb353155a1ff00eee
13,483
def _convert_title(title: str) -> str: """Converts a title to a filename that can be used.""" blacklist = ['\\', '/', ':', '*', '?', '"', '<', '>', '|', '\0'] download_prefix = 'Download ' title = title[len(download_prefix):] title = "".join(c for c in title if c not in blacklist) return title
48dd4ddddbac6a25a2f6ff2a875243e74c8aeb76
421,466
def indent(txt, n=4, s=" "): """Text indenter. Args: txt (:obj:`str`): The text to indent. n (:obj:`str`, optional): Number of s to indent txt. Defaults to: 4. s (:obj:`str`, optional): Char to use for indent. Defaults to: " ". Returns: (:obj:`str`) """ txt = "{t}".format(t=txt) tmpl = "{s}{line}".format lines = txt.splitlines() lines = [tmpl(s=s * n, line=l) for l in lines] lines = "\n".join(lines) return lines
d22d09e155b2ba64c8fe56c7bbb88e778ccdadf2
602,049
from typing import List def compare_lists(local_list: List[str], remote_list: List[str]) -> List[str]: """ Comapre local and remote list of files and return the list of local files not stored in remote database. :param local_list: list of names of local files :param remote_list: list of names of remote files :return: list of names of files not stored in remote database """ return list(set(local_list) - set(remote_list))
405a9e7e47292fcb93e4fbf07d09aead3023a14c
661,096
def sanitized(*args): """ Returns a sanitized string uniquely determined by the supplied args, any of which can be a string or C{None}. C{None} args are ignored. Any spaces within an arg are removed. The text of multiple args is joined with an underscore. For example, supplying "Santa Fe" and "New Mexico" as args results in "SantaFe_NewMexico". """ parts = [] for arg in args: if arg is None: continue parts.append(arg.replace(" ", "")) if not parts: parts.append("US") return "_".join(parts)
bf25f9e68889f4157663e0e09ec14d9668e41d6b
612,797
def get_multiple_ids_string(queryset): """Returns a string of ids from a queryset for use in a SQL query""" querystring = "" for counter, modobj in enumerate(queryset): querystring += ", %s" % modobj.id if counter > 0 else "%s" % modobj.id return querystring
f3fcd462efc939e55a0e972665f7db52400e740c
176,507
def is_bool_or_none(x): """Tests if something is a boolean or None.""" return (x is None) or isinstance(x, bool)
e4f99a998fa65f0f34a0daa30117bcc7b6279ec4
213,073
import re def validate_job_name(job_name: str) -> bool: """ EMR job name pattern from: https://docs.aws.amazon.com/emr/latest/APIReference/API_RunJobFlow.html#EMR-RunJobFlow-request-Name """ return re.compile(r"^[\u0020-\uD7FF\uE000-\uFFFD\uD800\uDBFF-\uDC00\uDFFF\r\n\t]*$").match(job_name) is not None
cec77e24c2f4e9ffde48b27563a400a3f11f8be6
343,787
def extractWholeLine(buf, isFirstLine): """ Return the next complete line of input if available. ``buf`` is the accumulated input buffer received from decode-978. The goal is to extract complete lines from this data and return them as output. ``buf`` is altered to reflect the line being removed. We are also careful (using ``isFirstLine``) to ensure that all lines are complete lines and that the initial characters we get are not sent until the first newline is captured. Clients will only be sent complete lines, from beginning to end. Args: buf (list): Current output buffer. isFirstLine (bool): True if we have not output any data. This is used to throw out any partial first line before a newline is received. After the initial trash data is thrown away, this will be False for the rest of the run. Returns: tuple: Tuple containing: * String containing a new line to send to clients, or ``None`` if no line available. * Updated version of ``buf`` to reflect any changes. * Updated version of ``isFirstLine``. Once set to ``False`` will remain that way. """ # If we are just starting, wait until the beginning of a # new line. if isFirstLine: # Have not found our first \n idx = buf.find('\n') if idx == -1: # Still don't have our first \n return None, buf, True # Get rid of any partial line if len(buf) == idx + 1: buf = '' else: buf = buf[idx + 1:] # See if we have an entire line idx = buf.find('\n') if idx == -1: return None, buf, False line = buf[:idx + 1] if len(buf) == idx + 1: buf = '' else: buf = buf[idx + 1:] return line, buf, False
2c5d02355ad6272db0d8bb2814ee9d8543316b51
323,206
def auth_required(function): """ Decorator to check the agent is authenticated. Unlike "login_required", if the agent is not authenticated it fails with a 401 error instead of redirecting. """ def wrap(request, *args, **kwargs): # if request.user.is_authenticated(): return function(request, *args, **kwargs) resp = HttpResponse() resp.status_code = 401 return resp return wrap
5a67589a041868148df9ae2053c220366fe7bd19
423,657
def identity(x): """Return its argument""" return x
eaf1c643784540bf4f2c6716c3530412e05abf21
590,592