content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def compute_DC(net_dict, w_ext): """ Computes DC input if no Poisson input is provided to the microcircuit. Parameters ---------- net_dict Parameters of the microcircuit. w_ext Weight of external connections. Returns ------- DC DC input, which compensates lacking Poisson input. """ DC = ( net_dict['bg_rate'] * net_dict['K_ext'] * w_ext * net_dict['neuron_params']['tau_syn_E'] * 0.001 ) return DC
df9e577477440af5929bdf07b3da637915c0ee9c
606,010
def parse_event(event_string): """ Parses the event string into a better format """ event_year, event = event_string.split(" – ", 1) # print(event_year, event) return {"year": event_year, "event": event}
082cd8f000eb3f666f4c3bd4f4f5b6a24bddba4c
479,410
def calculate_adjusted_var( kurtosis: float, skew: float, ndp: float, std: float, mean: float ): """Calculates VaR, which is adjusted for skew and kurtosis (Cornish-Fischer-Expansion) Parameters ---------- kurtosis: float kurtosis of data skew: float skew of data ndp: float normal distribution percentage number (99% -> -2.326) std: float standard deviation of data mean: float mean of data Returns ------- float Real adjusted VaR """ # Derived from Cornish-Fisher-Expansion # Formula for quantile from "Finance Compact Plus" by Zimmerman; Part 1, page 130-131 # More material/resources: # - "Numerical Methods and Optimization in Finance" by Gilli, Maringer & Schumann; # - https://www.value-at-risk.net/the-cornish-fisher-expansion/; # - https://www.diva-portal.org/smash/get/diva2:442078/FULLTEXT01.pdf, Section 2.4.2, p.18; # - "Risk Management and Financial Institutions" by John C. Hull skew_component = skew / 6 * (ndp**2 - 1) ** 2 - skew**2 / 36 * ndp * ( 2 * ndp**2 - 5 ) kurtosis_component = (kurtosis - 3) / 24 * ndp * (ndp**2 - 3) quantile = ndp + skew_component + kurtosis_component log_return = mean + quantile * std real_return = 2.7182818**log_return - 1 return real_return
92355737d4cdeef790a9417af84e42545bebc7ba
430,864
import re def replace_entities(entities, pattern): """ Replaces all entity names in a given pattern with the corresponding values provided by entities. Args: entities (dict): A dictionary mapping entity names to entity values. pattern (str): A path pattern that contains entity names denoted by curly braces. Optional portions denoted by square braces. For example: 'sub-{subject}/[var-{name}/]{id}.csv' Accepted entity values, using regex matching, denoted within angle brackets. For example: 'sub-{subject<01|02>}/{task}.csv' Returns: A new string with the entity values inserted where entity names were denoted in the provided pattern. """ ents = re.findall('\{(.*?)\}', pattern) new_path = pattern for ent in ents: match = re.search('([^|<]+)(<.*?>)?(\|.*)?', ent) if match is None: return None name, valid, default = match.groups() default = default[1:] if default is not None else default if name in entities: if valid is not None: ent_val = str(entities[name]) if not re.match(valid[1:-1], ent_val): if default is None: return None entities[name] = default ent_val = entities.get(name, default) if ent_val is None: return None new_path = new_path.replace('{%s}' % ent, str(ent_val)) return new_path
21b9b7065c62124f05ea59815edc394b0b997a16
258,496
import re def strip_comments(text): """ Remove comment lines (those starting with #) and leading/trailing whitespace from ``text``. """ # (m?) enables multiline mode return re.sub(r'(?m)^ *#.*\n?', '', text).strip()
34daaa551f4730e8344a7cd571e2b9c3636c1c29
177,543
import re def row_skipper(file): """ Count how many rows are needed to be skipped in the parsed codebook. Parameters: file (character): File name of the parsed codebook Returns: count: The number of lines to be skipped """ with open(file, 'r') as codebook: count = 0 for line in codebook: count += 1 if re.search('(NAME)[\t]+(SIZE)[\t]+(DESCRIPTION)[\t]+(LOCATION)', line): count -= 1 break return count
3060ae59b3f9b378d144d8f85090adf5200d35f4
674,988
import json def pprint(obj): """Pretty JSON dump of an object.""" return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))
ec0ba3cd4fde6e72abb3b77882cd8dd96070fb8c
180,766
def mergeFreqProfiles(freqp1, freqp2): """Returns a frequency profile from two merged frequency profiles. The two frequency profiles must be dictionary types. Parameters: freqp1: dictionary, i.e. frequency profile freqp2: dictionary, i.e. frequency profile This function only works with absolute frequency profiles. """ return dict(((i, freqp1.get(i, 0) + freqp2.get(i, 0)) for i in set.union(set(freqp1), set(freqp2))))
7564f59d1091c8eb4e2759ec599c2d14ecfe4b66
496,573
def countNucFreq(dna_seq): """ Counts the frequency of each nucleotide in the sequence. """ tmpseq = dna_seq.upper(); freq = {'A':0, 'C':0, 'G':0, 'T':0} for nucleotide in tmpseq: freq[nucleotide] += 1 return freq
e741ded45494ca18de94094caf91f9f90e9a1356
419,651
def check_recoded(confusion_matrix, count: int) -> int: """Check if two variables are recoded based on their crosstab. Args: confusion_matrix: Crosstab between two variables. count: The number of variables. Returns: Whether the variables are recoded. """ return int(confusion_matrix.values.diagonal().sum() == count)
58464516f067a83cce7248a1ca796a7bf77a779d
183,696
def _any_exist_in(select_from, find_in): """ :param select_from: iterable keys to find :param find_in: iterable to search in :return: True if any item in the first iterable exists in the second """ for target in select_from: if target in find_in: return True return False
33f12c4bfc1cb394693ab43ff796fcb2ffd7b42e
200,684
def format_end2end_prompt(q, ans, info=False): """Formats prompt for fine-tuned end-to-end truth/info scores with GPT-3""" if info: prompt = 'Q: {0}\nA: {1}\nHelpful:'.format(q, ans) else: prompt = 'Q: {0}\nA: {1}\nTrue:'.format(q, ans) return prompt
2a9b923dad333e9f9f078c25a2bbadc0fdbf8b44
370,138
def trainTestSplit(df): """ Split the data set into a train and a test set. The last 7 days are used for testing. Returns two pandas data frames, input should usually be the output of `loadDataset`. """ maxDay = df["date"].max() train = df[df["date"] <= maxDay - 7] test = df[df["date"] > maxDay - 7] return train, test
deb047c7c729f005c8fb2c8653fd901e45cfb4a0
341,395
def _ylab(to_plot): """Returns the y-label for the plot given the type of plot. Parameters ---------- to_plot : string Type of thing to plot. Can be 'pmf', 'cdf', 'fid', or 'wern'. Returns ------- string The y-label for the plot. """ labels_dict = { 'pmf' : "$\\Pr(T_n = t)$", 'cdf' : "$\\Pr(T_n \\leq t)$", 'fid' : "$F_n(t)$", 'wern' : "$W_n(t)$" } return labels_dict[to_plot]
58b7217269bbf2f75cd0c378896ead0cb3bcc1be
25,574
def diamondCoordsForHeight(s): """ Diamond with height s origin = 0,0 """ coords = ( ( # path (s/2,0), (s,s/2), (s/2,s), (0,s/2), ), ) return coords
87912dbd12b0234e5848dd498e2bb64e6d601faa
386,502
def _default_extract_pre(hook, args): """Default extract_fn when `timing='pre` Args: hook (VariableMonitorLinkHook): args (_ForwardPreprocessCallbackArgs): Returns (chainer.Variable): First input variable to the link. """ return args.args[0]
32d7df533bbcc597cb4e0a4a00e08c66bb981356
92,309
from tqdm import tqdm def _get_iterator(to_iter, progress): """ Create an iterator. Args: to_iter (:py:attr:`array_like`): The list or array to iterate. progress (:py:attr:`bool`): Show progress bar. Returns: :py:attr:`range` or :py:class:`tqdm.std.tqdm`: Iterator object. """ iterator = range(len(to_iter)) if progress: try: iterator = tqdm(range(len(to_iter))) except ModuleNotFoundError: print( "For the progress bar, you need to have the tqdm package " "installed. No progress bar will be shown" ) return iterator
c1dd29a430d2c468e3f89536fef593b7477a04ce
15,917
def _get_list_of_branches(chain): """ Returns a list with the names of all the branches of the given TChain """ return [b.GetName() for b in chain.GetListOfBranches()]
29bdba2365e50838ec4d9bc2c68e247ef13ddb2e
575,912
def parse_metadata(doc_file_path): """ Parses metadata into dictionary inputs: doc_file_path (string): full filepath to document output: (dict): dictionary with parsed metadata """ with open(doc_file_path, 'r') as file_name: meta_data_raw = file_name.read() meta_data_list = [x for x in meta_data_raw.splitlines() if x != ''] meta_data_dict = {} for i in meta_data_list: split_string = i.split(':', 1) meta_data_dict[split_string[0].strip()] = split_string[1].strip() return meta_data_dict
3ad40ec145f3fb3fbd3694facef31f93db84a6f8
266,062
def usage(cmd='', err=''): """ Prints the Usage() statement for the program """ m = '%s\n' %err m += ' Default usage is to list CRs linked to a branch(es).\n' m += ' ' m += ' listcrs -s<stream> -n <branch name/CR Number> -l <branch/CR list> \n' # addTeamBranch return m
05d37974ee74c4499c0a19c3615d51a201a64ed5
66,861
import hashlib def md5(s): """ String to MD5-hash """ m = hashlib.md5() m.update(s.encode('UTF-8', errors='ignore')) return m.hexdigest()
7d1c04e17c2cd62eedc843ed88a6e2499fbb6865
484,155
def fields_to_batches(d): """ The input is a dict whose items are batched tensors. The output is a list of dictionaries - one per entry in the batch - with the slices of the tensors for that entry. Here's an example. Input: d = {"a": [[1, 2], [3,4]], "b": [1, 2]} Output: res = [{"a": [1, 2], "b": 1}, {"a": [3, 4], "b": 2}]. """ # Make sure all input dicts have same length. lengths = [len(x) for x in d.values()] assert len(set(lengths)) == 1 length = lengths[0] keys = d.keys() res = [{k: d[k][i] for k in keys} for i in range(length)] return res
2840591ad2def849c5b9ffbc5fb59e17776ab0c2
31,194
import csv def load_data(filename): """ Load shopping data from a CSV file `filename` and convert into a list of evidence lists and a list of labels. Return a tuple (evidence, labels). evidence should be a list of lists, where each list contains the following values, in order: - Administrative, an integer - Administrative_Duration, a floating point number - Informational, an integer - Informational_Duration, a floating point number - ProductRelated, an integer - ProductRelated_Duration, a floating point number - BounceRates, a floating point number - ExitRates, a floating point number - PageValues, a floating point number - SpecialDay, a floating point number - Month, an index from 0 (January) to 11 (December) - OperatingSystems, an integer - Browser, an integer - Region, an integer - TrafficType, an integer - VisitorType, an integer 0 (not returning) or 1 (returning) - Weekend, an integer 0 (if false) or 1 (if true) labels should be the corresponding list of labels, where each label is 1 if Revenue is true, and 0 otherwise. """ evidence = [] labels = [] month_index = dict(Jan=0, Feb=1, Mar=2, Apr=3, May=4, June=5, Jul=6, Aug=7, Sep=8, Oct=9, Nov=10, Dec=11) with open(filename) as f: reader = csv.DictReader(f) for row in reader: evidence.append([ int(row["Administrative"]), float(row["Administrative_Duration"]), int(row["Informational"]), float(row["Informational_Duration"]), int(row["ProductRelated"]), float(row["ProductRelated_Duration"]), float(row["BounceRates"]), float(row["ExitRates"]), float(row["PageValues"]), float(row["SpecialDay"]), month_index[row["Month"]], int(row["OperatingSystems"]), int(row["Browser"]), int(row["Region"]), int(row["TrafficType"]), 1 if row["VisitorType"] == "Returning_Visitor" else 0, 1 if row["Weekend"] == "TRUE" else 0, ]) labels.append(1 if row["Revenue"] == "TRUE" else 0) return evidence, labels
1fec64643c93fd07fc5f7c834bb72e9ced1001ce
329,741
import re def is_variable(text): """ Checks if the given string is a variable in the format ${*} Args: text (str): The text which should be checked Returns str: If the text contains a variable """ return re.match(r"(\${.*})", text) is not None
0aae59e670fd6484cdf83bf22642e62d281eff76
495,418
def clamp(value, min, max): """ Clamps a value to keep it within the interval [min, max]. :param value: value to be clamped. :param min: minimum value. :param max: maximum value. :return: clamped value. """ if value > max: return max elif value < min: return min return value
ff4e8a19756e6da85d3b06314c3ad7147c50b97c
273,705
import re def clean_string(t, keep_dot=False, space_to_underscore=True, case="lower"): """Sanitising text: - Keeps only [a-zA-Z0-9] - Optional to retain dot - Spaces to underscore - Removes multiple spaces , trims - Optional to lowercase The purpose is just for easier typing, exporting, saving to filenames. Args: t (string]): string with the text to sanitise keep_dot (bool, optional): Keep the dot or not. Defaults to False. space_to_underscore (bool, optional): False to keep spaces. Defaults to True. case= "lower" (default), "upper" or "keep"(unchanged) Returns: string: cleanup string """ r = "" if case == "lower": r = str.lower(str(t)) elif case == "upper": str.upper(str(t)) elif case == "keep": r = str(t) if t: if keep_dot is True: r = re.sub(r"[^a-zA-Z0-9.]", " ", r) else: r = re.sub(r"[^a-zA-Z0-9]", " ", r) r = r.strip() if space_to_underscore is True: r = re.sub(" +", "_", r) else: r = re.sub(" +", " ", r) return r
36d72799c5ad4df6f64b3a7ba387adbc45ae645a
667,149
def format_time(seconds): """ Formats a time in seconds :param seconds: :return: Time formatted as hh:mm:ss """ format_zero = lambda x: str(int(x)) if int(x) > 9 else "0{}".format(int(x)) m, s = divmod(seconds, 60) h, m = divmod(m, 60) return "{}:{}:{}".format(format_zero(h), format_zero(m), format_zero(s))
6b61636229006804bc292f0115abdf05a7836822
504,681
def parse_performance_data(response): """Parse metrics response to a map :param response: response from unispshere REST API :returns: map with key as metric name and value as dictionary containing {timestamp: value} for a the timestamps available """ metrics_map = {} for metrics in response["resultList"]["result"]: timestamp = metrics["timestamp"] for key, value in metrics.items(): metrics_map[key] = metrics_map.get(key, {}) metrics_map[key][timestamp] = value return metrics_map
613f3029b4b8011b28dbbbdf35fad7be56906a71
664,162
def term_A(P0, e0): """Term A in the main equation. P0 is the atmospheric pressure at the site (in hPa). e0 is the water vapor pressure at the site (in hPa)""" return 0.002357 * P0 + 0.000141 * e0
5cf80199ad432ec8b8b5a34d383c7bfe59a81bd2
71,251
def rsa_decrypt(c: int, d: int, n: int) -> int: """ Implements RSA Decryption via the mathematical formula. The formula is m=(c**d) % N Returns the plain "text" really integer representation of the value. :param c: Ciphertext integer. :param d: the private key exponent. :param n: the modulus. :return: the plaintext integer M. """ m=pow(c, d, n) return m
4d44a926060e47a428ad275e88fd3a63a0cab909
602,343
import unicodedata def normalize(text: str) -> str: """Prepare the `text` for string comparison. Replaces composed characters by basic ones and converts to lowercase. """ text = unicodedata.normalize('NFKD', text) output = [] for c in text: if not unicodedata.combining(c): output += [c] return ''.join(output).lower()
db127986bdda48332b5ce56d5b1a782bdf8ca05c
616,145
import math def get_factor_list(n): """ Use trial division to identify the factors of n. 1 is always a factor of any integer so is added at the start. We only need to check up to n/2, and then add n after the loop. """ factors = [1] for t in range(2, (math.ceil((n / 2) + 1))): if n % t == 0: factors.append(t) factors.append(n) return factors
b7dcf48797bdb7203f9f73ec80dbff87cb097d1f
378,709
def _get_figure_size(numaxes): """ Return the default figure size. Width: 8 units Height: 3 units for every subplot or max 9 units Return ------ (width, height) The figure size in inches. """ figure_width = 8 figure_height = max(6, min(numaxes * 3, 10)) return (figure_width, figure_height)
bb6f3a08b974cac2d5da2b69eac8653e9b41411e
27,957
def read_text_file(path_to_file): """ Read a text file and import each line as an item in a list. * path_to_file: the path to a text file. """ with open(path_to_file) as f: lines = [line.rstrip() for line in f] return lines
0265631258341d29c72e2416a273d6595029f93b
688,937
def coords_to_decals_skyviewer(ra, dec): """ Get decals_skyviewer viewpoint url for objects within search_radius of ra, dec coordinates. Default zoom. Args: ra (float): right ascension in degrees dec (float): declination in degrees Returns: (str): decals_skyviewer viewpoint url for objects at ra, dec """ return 'http://www.legacysurvey.org/viewer?ra={}&dec={}&zoom=15&layer=decals-dr5'.format(ra, dec)
3d56c1dee2c23a7bfae9efa59f7a9dc40ebc3d73
299,295
def compute_size(model): """ Computes the number of parameters of the model. """ return sum( p.numel() for p in model.parameters())
6a21fa785588a7a6164a37b3ba390cdadb6a38cc
351,817
def original_quote(node): """Return the quote (' or ") found in the first character of the given node. :param ast.Node node: the node to check :return: a 1-char string, ' or " :rtype: str """ quote = node.last_token.string[0] return quote if quote in ('"', "'") else None
416daa8557d74699f1870f72246337c3dacff608
382,989
import torch def compute_jacobian(x, y, structured_tensor=False, retain_graph=False): """Compute the Jacobian matrix of output with respect to input. If input and/or output have more than one dimension, the Jacobian of the flattened output with respect to the flattened input is returned if `structured_tensor` is `False`. If `structured_tensor` is `True`, the Jacobian is structured in dimensions `[y_shape, flattened_x_shape]`. Note that `y_shape` can contain multiple dimensions. Args: x (list or torch.Tensor): Input tensor or sequence of tensors with the parameters to which the Jacobian should be computed. Important: the `requires_grad` attribute of input needs to be `True` while computing output in the forward pass. y (torch.Tensor): Output tensor with the values of which the Jacobian is computed. structured_tensor (bool): A flag indicating if the Jacobian should be structured in a tensor of shape `[y_shape, flattened_x_shape]` instead of `[flattened_y_shape, flattened_x_shape]`. Returns: (torch.Tensor): 2D tensor containing the Jacobian of output with respect to input if `structured_tensor` is `False`. If `structured_tensor` is `True`, the Jacobian is structured in a tensor of shape `[y_shape, flattened_x_shape]`. """ if isinstance(x, torch.Tensor): x = [x] # Create the empty Jacobian. output_flat = y.view(-1) numel_input = 0 for input_tensor in x: numel_input += input_tensor.numel() jacobian = torch.Tensor(y.numel(), numel_input) # Compute the Jacobian. for i, output_elem in enumerate(output_flat): if i == output_flat.numel() - 1: gradients = torch.autograd.grad(output_elem, x, retain_graph=retain_graph, create_graph=False, only_inputs=True) else: gradients = torch.autograd.grad(output_elem, x, retain_graph=True, create_graph=False, only_inputs=True) jacobian_row = torch.cat([g.view(-1).detach() for g in gradients]) jacobian[i, :] = jacobian_row if structured_tensor: shape = list(y.shape) shape.append(-1) jacobian = jacobian.view(shape) return jacobian
bd5fd8e3e2b8171680bf059d10fadfe1c39d8899
704,186
import itertools def dict_product(dicts): """ Create an iterator from a GridSearchCV-like dictionary This code is directly take from stackoverflow: http://stackoverflow.com/a/40623158/621449 """ return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
d86b7cb96ca8806f32bc7d98271f59a780dcf3f5
295,116
def get_model_fields(model): """ Retrieve the field names of a model. :param model: The model to extract the field names from :return: The names of the fields of a model :rtype: tuple of str """ meta = getattr(model, "_meta") return tuple(field.name for field in meta.get_fields())
aba1d41e39541f2198f462b75bc8190058da6423
470,614
def choose(n, r): """The number of ways of choosing r items from n""" if n < 0 or r < 0: print('Negative values not allowed') return 0 if r > n: print('r must not be greater than n') return 0 combin = 1 if r > n / 2: r1 = n - r else: r1 = r for k in range(r1): combin = combin * (n - k) // (k + 1) return combin
d9520789c3509a5bbc4d20bf0d0e3ee40f18d489
162,109
def select_all_contigs(cxn): """Select all contigs.""" sql = """SELECT * FROM contigs;""" return cxn.execute(sql)
b10f5f448ec58d72784d3f7e672564a7bda5c0a3
295,327
import math def sqrt(theNumber): """Returns math.sqrt(theNumber).""" return math.sqrt(theNumber)
2aaa505fed81d136c069700e928b8ff77401a01a
292,621
def module_checkpoint(mod, prefix, period=1, save_optimizer_states=False): """Callback to checkpoint Module to prefix every epoch. Parameters ---------- mod : subclass of BaseModule The module to checkpoint. prefix : str The file prefix for this checkpoint. period : int How many epochs to wait before checkpointing. Defaults to 1. save_optimizer_states : bool Indicates whether or not to save optimizer states for continued training. Returns ------- callback : function The callback function that can be passed as iter_end_callback to fit. """ period = int(max(1, period)) # pylint: disable=unused-argument def _callback(iter_no, sym=None, arg=None, aux=None): """The checkpoint function.""" if (iter_no + 1) % period == 0: mod.save_checkpoint(prefix, iter_no + 1, save_optimizer_states) return _callback
007ab00fe2b0ce3b6acca76596ff5b68cd7cb8d6
56,285
def get_branch_name(ref: str, ref_prefix='refs/heads/', clean=True, max_len=15): """ Get and preprocess the branch name from the reference within a CodeCommit event. :param ref: As obtained from the CodeCommit event :param ref_prefix: :param clean: :param max_len: Maximal length of the resulting branch_name. If None, the returned string will not be shortened :return : Branch name. If clean is set to True replaces "/" by "-" in the branch name """ if ref.startswith(ref_prefix): branch_name = ref[len(ref_prefix):] else: raise Exception(f"Expected reference {ref} to start with ${ref_prefix}") if clean: branch_name = branch_name.replace('/', '-').lower() if max_len is not None: branch_name = branch_name[:max_len] return branch_name
48a12bacbdb2b7d51d61cfd941fa343c04073b97
101,136
def offbyK(s1,s2,k): """Input: two strings s1,s2, integer k Process: if both strings are of same length, the function checks if the number of dissimilar characters is less than or equal to k Output: returns True when conditions are met otherwise False is returned""" if len(s1)==len(s2): flag=0 for i in range(len(s1)): if s1[i]!=s2[i]: flag=flag+1 if flag==k: return True else: return False else: return False
a64c02b85acca64427852fc988ed2f769f750aa7
6,801
import torch def logsumexp(tensor: torch.Tensor, dim: int = -1, keepdim: bool = False) -> torch.Tensor: """ A numerically stable computation of logsumexp. This is mathematically equivalent to `tensor.exp().sum(dim, keep=keepdim).log()`. This function is typically used for summing log probabilities. Parameters ---------- tensor : torch.FloatTensor, required. A tensor of arbitrary size. dim : int, optional (default = -1) The dimension of the tensor to apply the logsumexp to. keepdim: bool, optional (default = False) Whether to retain a dimension of size one at the dimension we reduce over. """ max_score, _ = tensor.max(dim, keepdim=keepdim) if keepdim: stable_vec = tensor - max_score else: stable_vec = tensor - max_score.unsqueeze(dim) return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
4c0eb3e03a3f762bcb42d9377fe8f90851400782
533,680
from typing import Tuple from typing import Optional def id_from_mention(mention: str) -> Tuple[int, Optional[str]]: """Get an ID from a mention Any mention is supported (channel, user...). Args: mention: A mention to be stripped. Returns: A tuple containing the parsed ID and the type of mention. The type is a string with `member`, 'role' or 'channel'. If no type is determined, returns `None` as a type. Raises: `ValueError` if parsing fails. """ if "&" in mention: type_ = "role" elif "@" in mention: type_ = "member" elif "#" in mention: type_ = "channel" else: type_ = None try: return int(mention.rstrip(">").lstrip("<@!#&")), type_ except ValueError: raise ValueError("Cannot parse mention")
6e9c43e33fdcb86366765c9779b8c6c036650886
580,679
def IsZonalGroup(group_ref): """Checks if group reference is zonal.""" return group_ref.Collection() == 'compute.instanceGroups'
4371be5a3e5b96738c756e8d873fdd795d3df068
533,951
def _frange_percent(frame, frange): """Determines what percent completion a task is based on frame and frange Args: frame : (int) The frame to determine what percent complete we are. frange: (<nuke.FrameRange>) A nuke.FrameRange object with frange information. Returns: (int) The percentage of completion. Raises: N/A """ percent = (frame - frange.first()) / float(frange.frames()) return int(percent * 100)
9b6f49c85a74ba0fcbfe8cf1cef514860ef56e99
360,517
def _principals_for_authenticated_user(user): """Apply the necessary principals to the authenticated user""" principals = [] if user.is_superuser: principals.append("group:admins") if user.is_moderator or user.is_superuser: principals.append("group:moderators") if user.is_psf_staff or user.is_superuser: principals.append("group:psf_staff") # user must have base admin access if any admin permission if principals: principals.append("group:with_admin_dashboard_access") return principals
288a56e9fdbc726dea5ba9a72efd83d6a0082f48
559,726
def _iter_graded(scores): """ Yield the scores that belong to explicitly graded blocks """ return (score for score in scores if score.graded)
ddb63743932b37fde63d7050564097780af2140c
352,016
import types def className(obj, addPrefix=False): """Return human-readable string of class name.""" if isinstance(obj, str): class_str = obj # don't add prefix -- it's unknown prefix = "" elif isinstance(obj, type): class_str = obj.__name__ if addPrefix: prefix = "Class " else: prefix = "" elif isinstance(obj, types.ModuleType): class_str = obj.__name__ if addPrefix: prefix = "Module " else: prefix = "" else: try: class_str = obj.__class__.__name__ except AttributeError: class_str = str(type(obj)) prefix = "" return prefix + class_str
a5a9d7fb0be0b01700a4f9d02d6532e758d662bd
104,163
def modify(node): """ Modify the branch to NODE, if NODE is a terminal node and the `division_exposure` of NODE differs from the `division`. This creates an intermediate branch, with a single child, such that we have PARENT-INTERMEDIATE-NODE. """ if "children" in node: return node division = node["node_attrs"].get("division", {}).get("value", "") recoded = node["node_attrs"].get("division_exposure", {}).get("value", "") if not division or not recoded or division == recoded: return node ## Make a new node which is the INTERMEDIATE (i.e. a single child, which is `node` with slight modifications) n = { "name": node["name"]+"_travel_history", "node_attrs": { "div": node["node_attrs"]["div"], "num_date": node["node_attrs"]["num_date"], "division_exposure": node["node_attrs"]["division_exposure"] }, "children": [node] } # Story a backup of the original exposure, to put back in later node["node_attrs"]["division_exposure_backup"] = node["node_attrs"]["division_exposure"] # change actual NODE division_exposure to have collection division node["node_attrs"]["division_exposure"] = node["node_attrs"]["division"] return n
983714cc7a4703e0bc2cc84d4c75d2c657719745
621,970
def smooth_series(data, window, method="average"): """Apply a moving average or mean with window of size "window" Arguments: data {Dataframe} -- Pandas dataframe window {int} -- size of window to apply Keyword Arguments: method {str} -- the method applied to smooth the data (default: {"average"}) Returns: Dataframe -- the new dataframe """ if method == "average": rolled_df = data.rolling(window=window, axis=1, center=True, win_type=None).mean() elif method == "median": rolled_df = data.rolling(window=window, axis=1, center=True, win_type=None).median() else: raise ValueError("Unknow method name") return rolled_df.dropna(axis=1)
f09b956c0a899de8b8607e08b05bdbc917e473a7
653,179
import re def from_camelcase(inStr): """Converts a string from camelCase to snake_case >>> from_camelcase('convertToPythonicCase') 'convert_to_pythonic_case' Args: inStr (str): String to convert Returns: String formatted as snake_case """ return re.sub('[A-Z]', lambda x: '_' + x.group(0).lower(), inStr)
73b876e2eab1361c97acd5b4828db2de384a30c1
101,174
def createHeaders(X_APIKEY, Content_Type="application/json"): """ Create headers for API calls within this package REQUIRED ARGUMENTS :param X_APIKEY: String - Authentication key for the API OPTIONAL ARGUMENTS :param Content_Type: String - Default to "application/json" :return: JSON Headers for API call """ headers = { 'Content-Type': Content_Type, 'X-APIKEY': X_APIKEY } return headers
0bd937c405f5b2602b2f74f2281de240c45d2dc5
550,905
def _has_rows(rows: list) -> bool: """ Determine if the table has rows. Keyword arguments: rows -- the rows to check Returns: True if the table has rows, False otherwise """ return len(rows) > 0
ae8901084583a788a56ce47cbabf33956b939cda
617,404
import pickle def load_data(pickle_file): """ Load the data from given path. Args: pickle_file: pickle file of the requested data Returns: A tuple of (features, labels) for the given `pickle_file` """ with open(pickle_file, mode='rb') as f: data = pickle.load(f) return data['features'], data['labels']
4c1f1a56114449feda8cd5450fb6535bb2bd4f65
644,694
import re def process_tspi_h(input_string): """Process the file content of tspi.h Parses the input file content and tokenize each function declaration into a 3-tuple as shown in the following example: ***declaration***: TSPICALL Tspi_Context_Create ( TSS_HCONTEXT* phContext // out ); ***output***: ("TSS_RESULT", "Tspi_Context_Create", [("TSS_HCONTEXT*", "phContext")]) Args: input_string: the file content of tspi.h in a string. Returns: A list of 3-tuples of return type, function name, a list of 2-tuple in form of (data type, variable name) """ r = re.compile(r'^TSPICALL[\s\S]*?;', flags=re.M) arr = [] for m in r.finditer(input_string): tokens = m.group(0).splitlines() name = tokens[0].split()[1] arguments = [] for t in tokens[2:-1]: var_type, var_name = t.split()[:2] arguments.append((var_type, var_name.replace(',', ''))) arr.append(('TSS_RESULT', name, arguments)) # For self-test purpose. expected_api_counts = input_string.count('\nTSPICALL') assert expected_api_counts == len(arr) return arr
4708e92a5b31c57548ee9eafeffb5510df3d5fab
301,089
import torch def is_double_tensor(tensor: torch.Tensor) -> bool: """ Returns True if a tensor is a double tensor. """ if torch.is_tensor(tensor): return tensor.type().endswith("DoubleTensor") else: return False
4ebd29e94f11b23522fcd74bae3e51f0993625ea
393,034
def convert_data_to_ints(data, vocab2int, word_count, unk_count, eos=True): """ Converts the words in the data into their corresponding integer values. Input: data: a list of texts in the corpus vocab2list: conversion dictionaries word_count: an integer to count the words in the dataset unk_count: an integer to count the <UNK> tokens in the dataset eos: boolean whether to append <EOS> token at the end or not (default true) Returns: converted_data: a list of corpus texts converted to integers word_count: updated word count unk_count: updated unk_count """ converted_data = [] for text in data: converted_text = [] for token in text.split(): word_count += 1 if token in vocab2int: # Convert each token in the paragraph to int and append it converted_text.append(vocab2int[token]) else: # If it's not in the dictionary, use the int for <UNK> token instead converted_text.append(vocab2int['<UNK>']) unk_count += 1 if eos: # Append <EOS> token if specified converted_text.append(vocab2int['<EOS>']) converted_data.append(converted_text) assert len(converted_data) == len(data) return converted_data, word_count, unk_count
c415aea164f99bc2a44d5098b6dbcc3d723697a6
5,801
def nested_get(input_dict, keys_list): """Get method for nested dictionaries. Parameters ---------- input_dict : dict Nested dictionary we want to get a value from. keys_list : list List of of keys pointing to the value to extract. Returns ---------- internal_dict_value : The value that keys in keys_list are pointing to. """ internal_dict_value = input_dict for k in keys_list: internal_dict_value = internal_dict_value.get(k, None) if internal_dict_value is None: return None return internal_dict_value
8c4620795e001524480b39a219cea913493cfad9
167,940
import numbers def is_integer(x): """True if x is an integer (both pure Python or NumPy). Note that Python's bool is considered an integer too. """ return isinstance(x, numbers.Integral)
faff1683bdb1229420bb39b2a39cd8ffff46e313
514,274
import math def step_decay(lr0, s, epochs_drop=1.0): """ Create stepwise decay: Drop learning rate by half (s) every specified iteration. Parameters ---------- lr0 : float initial learning rate s: float decay rate, e.g. 0.5, choose lower s than for other decays epochs_drop: float step size Returns ------- step_decay_fn: float stepwise decay """ # initial_lrate = 0.1 # drop = 0.5 # epochs_drop = 1.0 def step_decay_fn(steps_per_epoch): return lr0 * math.pow(s, math.floor((1 + steps_per_epoch) / epochs_drop)) return step_decay_fn
bb42852f0276082fe6516a6a5c961f7d5a0f359e
341,172
import math def ordinal(number): """ Converts integer number (1, 2, 3 etc) to an ordinal number representation (1st, 2nd, 3rd etc) https://stackoverflow.com/a/20007730/297131 :param number: An integer number >>> [ordinal(n) for n in range(0,32)] ['0th', '1st', '2nd', '3rd', '4th', '5th', '6th', '7th', '8th', '9th', '10th', '11th', '12th', '13th', '14th', '15th', '16th', '17th', '18th', '19th', '20th', '21st', '22nd', '23rd', '24th', '25th', '26th', '27th', '28th', '29th', '30th', '31st'] """ return "%d%s" % (number,"tsnrhtdd"[(math.floor(number/10)%10!=1)*(number%10<4)*number%10::4])
f040efacafab71d6797bd7d002f7888f27c7e973
528,743
def reverse_index_1d(state): """The inverse of index_1d. From an row number in the transition matrix return the row and column in the grid world. Parameters ---------- state : int An integer representing the row in the transition matrix. Returns ------- row : int The row in the grid world. column : int The column in the grid world.\ """ max_cols = 9 row = state / max_cols col = state % max_cols return row, col
8bac5aa86ac4ff3fee441065b3d58fbd2f1c089a
543,613
def find_first_slice_value(slices, key): """For a list of slices, get the first value for a certain key.""" for s in slices: if key in s and s[key] is not None: return s[key] return None
6b454b6464f3ba2ecaa9d2a4e663233178ae013e
478,012
import operator import itertools def find_contiguous_ids(job_ids): """Return the contiguous job ids in the given list. Returns ------- contiguous_job_ids : str The job ids organized in contiguous sets. """ contiguous_job_ids = [] for k, g in itertools.groupby(enumerate(job_ids), lambda x: x[0]-x[1]): group = list(map(operator.itemgetter(1), g)) if len(group) == 1: contiguous_job_ids.append(str(group[0])) else: contiguous_job_ids.append('{}-{}'.format(group[0], group[-1])) return ','.join(contiguous_job_ids)
33f4b4838e76751083f176b71999508c06164b5c
287,182
import random def random_int(min=1, max=100): """Generate a random integer between min and max""" return random.randint(min, max)
f5af0521613a7327b343c8a060231f0454e7a645
564,149
import random import string def rand_string(L): """ :param L: Integer, length of the string to randomly generate :return: String (of the given length L) of random characters """ return ''.join(random.choices(string.ascii_lowercase + string.digits, k=L))
07c4b5e57b5803d3dedc8ff43179036f49db42b1
253,957
def get_table(table): """ Updates table styling :param table: an html table :return: the newly styled table """ table['class'] = "usa-table" return table
bc25990a83fae2c9a6da106f7af25e1ef87a39d5
568,965
def __patronymics(name, ltr): """ Auxiliary function for generating suffixes for name 1. name - part of name 2. ltr - 'е' for 'евич' / 'евна' or 'о' for 'ович' / 'овна' suffixes 3. return list of generated patronymics """ return [name + ltr + 'вич', name + ltr + 'вна']
f9f3568334247ad495e95ab48f749d86da6a6a70
434,529
def _filter_slabs(provisional, max_size): """ Filters the repeat slabs from the list of all the zero dipole slabs. Creates lists of large and repeat slabs if any are present for the warnings Args: provisional (`list`): All zero dipole slabs generated with SlabGenerator max_size (`int`): The maximum number of atoms in the slab specified to raise warning about slab size. Returns: list of dictionaries with slabs, list of repeat slabs, list of slabs larger than the max_size """ # Iterate though provisional slabs to extract the unique slabs unique_list, unique_list_of_dicts, repeat, large = ([] for i in range(4)) for slab in provisional: if slab['slab'] not in unique_list: unique_list.append(slab['slab']) unique_list_of_dicts.append(slab) # For large slab size warning atoms = len(slab['slab'].atomic_numbers) if atoms > max_size: large.append('{}_{}_{}_{}'.format(slab['hkl'], slab['slab_thickness'], slab['vac_thickness'], slab['slab_index'])) # For repeat slabs warning else: repeat.append('{}_{}_{}_{}'.format(slab['hkl'], slab['slab_thickness'], slab['vac_thickness'], slab['slab_index'])) return unique_list_of_dicts, repeat, large
4993bc6d89e0d09840c30ce54119cc54e9ca42f2
667,839
def head(mention): """ Compute head of a mention. Args: mention (Mention): A mention. Returns: The tuple ('head', HEAD), where HEAD is the (lowercased) head of the mention. """ return "head", mention.attributes["head_as_lowercase_string"]
2fe6333bc3711ea82055ae0ec7334ce26f36e593
549,537
def from_tensor(tensor_item): """ Transform from tensor to a single numerical value. :param tensor_item: tensor with a single value :return: a single numerical value extracted from the tensor """ value = tensor_item.item() if value == -1: return None return value
9981215be4eb6d40838a50d147af2368ca854cd8
625,595
import jinja2 def jinja(source, environ, destination=None): """ Render a Jinja configuration file, supports file handle or path """ close_source = close_destination = False if type(source) is str: source = open(source, "r") close_source = True if type(destination) is str: destination = open(destination, "w") close_destination = True result = jinja2.Template(source.read()).render(environ) if close_source: source.close() if destination is not None: destination.write(result) if close_destination: destination.close() return result
511bd806167767ce8cb5d9323a8e6702a16e8631
124,252
def parr_to_pdict(arr, measures_optimized): """Convert BO parameter tensor to parameter dict""" if measures_optimized: d = { 'p_stay_home': arr[0].tolist(), } return d else: d = { 'betas': { 'education': arr[0].tolist(), 'social': arr[1].tolist(), 'bus_stop': arr[2].tolist(), 'office': arr[3].tolist(), 'supermarket': arr[4].tolist(), }, 'beta_household': arr[5].tolist(), } return d
50623f2aae87a5326b4524f249bf28eb093aa7c0
685,643
def _MasterUpgradeMessage(name, server_conf, cluster, new_version): """Returns the prompt message during a master upgrade. Args: name: str, the name of the cluster being upgraded. server_conf: the server config object. cluster: the cluster object. new_version: str, the name of the new version, if given. Raises: NodePoolError: if the node pool name can't be found in the cluster. Returns: str, a message about which nodes in the cluster will be upgraded and to which version. """ if cluster: version_message = 'version [{}]'.format(cluster.currentMasterVersion) else: version_message = 'its current version' if not new_version and server_conf: new_version = server_conf.defaultClusterVersion if new_version: new_version_message = 'version [{}]'.format(new_version) else: new_version_message = 'the default cluster version' return ('Master of cluster [{}] will be upgraded from {} to {}.' .format(name, version_message, new_version_message))
1c646f57d97ef3b5c639aed816190f0f6d7bb488
359,936
def active_users(engine,reverse=True): """Given a SimilarityEngine instance, returns a list of users sorted by like activity (the number of like per user), in descending or ascending order, respectively, according to the True/False status of the :reverse flag, which defaults to True (for descending order). Strictly speaking it sorts on the tuple (like-activity,username) to guarantee reproducibility.""" return sorted(engine.users(),key=lambda u:(len(engine.lookup(u)),u),reverse=reverse)
46cb9f9b16c2dba32be19af7dc83c8571f59ebf0
377,530
from typing import List def generate_reason(total_points: int, message: str, recipients: List[str]) -> str: """ :param total_points: the amount of points that should be distributed :param message: the message used when awarding points :param recipients: a list of email addressed for users who should receive points :return: the reason string that can be used with the bonusly api """ if total_points <= 0: raise ValueError("Must enter positive number of points!") points_per_member = total_points // len(recipients) # give as many people points as possible if points_per_member == 0 and total_points > 0: recipients = recipients[:total_points] points_per_member = 1 recipients_str = ' @'.join(recipients) return f"+{points_per_member} @{recipients_str} {message}"
17185f706949bc92f04c66a65a734badb242d9dd
632,438
import json def hdelk_html(schm: dict, header: str = "Schematic", display_customizations: str = "") -> str: """ Generates HTML with schematic using HDElk :param schm: schematics data in tool_render format (actually that is content for HDElk's graph variable) :param header: string to be written in header of file :return: whole HTML page content as string """ result = f"""<!DOCTYPE html> <html> <body> <h1>{header}</h1> <script src="./js/hdelk/elk.bundled.js"></script> <script src="./js/hdelk/svg.min.js"></script> <script src="./js/hdelk/hdelk.js"></script> <div id="simple_diagram"></div> <script type="text/javascript"> {display_customizations} var simple_graph = {json.dumps(schm, indent=2)} hdelk.layout( simple_graph, "simple_diagram" ); </script> </body> </html>""" return result
a0137c1f2af7c18b5516d9a20afca78b3c5068b5
432,001
def set_gauss(fwhm): """ Compute the sigma value, given Full Width Half Max. Returns an operand string Parameters ---------- fwhm : float Returns ------- op_string : string """ sigma = float(fwhm) / 2.3548 op_string = "-kernel gauss %f -fmean -mas " % sigma + "%s" return op_string
77d63299a5bc93ccaff19980f98d75a51f554b7d
562,938
from typing import Dict import inspect def find_all_avail_tasks() -> Dict: """ Finds and returns the list of callable tasks. Within FabSim3, each function that wrapped by @task can be called from command line. Returns: array of function object """ f_globals = inspect.stack()[1][0].f_globals avail_tasks = { k: v for k, v in f_globals.items() if callable(v) and hasattr(v, "__wrapped__") } return avail_tasks
b295141eaaf2e2b7a8eb6f844d02ad3dfba72048
521,820
def ReadTxtName(rootdir): """ read the configuration which describe the the dataset , which consists of filenames, labels """ filenames=[] labels=[] with open(rootdir, 'r',encoding="utf-8") as file_to_read: while True: line = file_to_read.readline() if not line: break # strline = line.decode('gbk') part = line.strip().split(',') filename=part[0] label=part[1:-1] filenames.append(filename) labels.append(label) return filenames,labels
ce7b8dcbf1363b2b1d4093e1f83bd4eb8b6950c7
197,095
def update_hypnogram_cycles(hypno, cycles): """ Add cycle identifiers to hypnogram based on cycle detections Parameters ---------- hypno : pd.DataFrame Hypnogram dataframe obtained through load_hypno(). cycles : pd.DataFrame Tabulated estimates for onsets, offsets and durations of detected cycles. Returns ------- hypno : pd.DataFrame Hypnogram dataframe with updated cycle metadata """ # Iterate over each cycle for k, cycle in cycles.iterrows(): # Add onset, offset, mode of offset and durations to hypnogram start = cycle["Onset_idx"] end = cycle["Offset_idx"] hypno.loc[start:end, "Cycle_Num"] = cycle["Cycle"] hypno.loc[start:end, "Cycle_Duration"] = cycle["Duration"] hypno.loc[start:end, "Cycle_Offset_Mode"] = cycle["Offset_Mode"] return hypno
e65c52f1d12c08c00db2463ae5050064bdb366c5
477,527
def _contains_training_quant_op(graph_def): """Checks if the graph contains any training-time quantization ops.""" training_quant_ops = { "FakeQuantWithMinMaxVars", "FakeQuantWithMinMaxVarsPerChannel", "QuantizeAndDequantizeV2", "QuantizeAndDequantizeV3", } return any( op in node_def.name for op in training_quant_ops for node_def in graph_def.node )
cdb908e9eed7a63f53a6a93ff02e40224d9205f2
297,147
def pickCompat(obj, keys): """ Forming dict from given one and list of keys @param {dict} obj @param {list[str]} keys @return {dict} """ result = {} for key in keys: result[key] = obj.get(key) return result
89ca34d9cd3c47d4fdce833931321a945f5e886a
151,785
def shift_left_bit_length(x: int) -> int: """ Shift 1 left bit length of x :param int x: value to get bit length :returns: 1 shifted left bit length of x """ return 1 << (x - 1).bit_length()
854e79309125c60c6e5975685078809fb4c016a4
705,029
def s2d(kv): """Turn a string k=v1,k2=v2 into a dict""" return dict([s.split('=') for s in kv.split(",")])
1fc6defdd970791cea2423f8d0aa2c1feed95ceb
205,839
def seqToGenbankLines(seq): """ chunk sequence string into lines each with six parts of 10bp, return as a list >>> seqToGenbankLines("aacacacatggtacacactgactagctagctacgatccagtacgatcgacgtagctatcgatcgatcgatcgactagcta") ['aacacacatg gtacacactg actagctagc tacgatccag tacgatcgac gtagctatcg', 'atcgatcgat cgactagcta'] """ # first chunk into 10bp parts parts = [seq[i:i+10] for i in range(0, len(seq), 10)] # put into lines of 6*10 bp lines = [] for i in range(0, len(parts), 6): lines.append(" ".join(parts[i:i+6])) return lines
f0e290cf3d666980edc18acc50523f45ab18e24a
23,126
def _get_data_list(data, key): """get key's value as list from request arg dict. If the value type is list, return it, otherwise return the list whos only element is the value got from the dict. Example: data = {'a': ['b'], 'b': 5, 'c': ['d', 'e'], 'd': []} _get_data_list(data, 'a') == ['b'] _get_data_list(data, 'b') == [5] _get_data_list(data, 'd') == [] _get_data_list(data, 'e') == [] Usage: Used to parse the key-value pair in request.args to expected types. Depends on the different flask plugins and what kind of parameters passed in, the request.args format may be as below: {'a': 'b'} or {'a': ['b']}. _get_data_list forces translate the request.args to the format {'a': ['b']}. It accepts the case that some parameter declares multiple times. """ if key in data: if isinstance(data[key], list): return data[key] else: return [data[key]] else: return []
07c0759b99105b0a913d343a9e6c7e88e4229d36
551,523
import logging def infer_header(input_path, expected_lines, method_name=None): """ Method that infers the length of the header of a given file from the number of lines and of expected lines. Parameters ---------- input_path : file or string File or filename to read. expected_lines : int Number of expected lines in the input file. method_name : string, optional A string indicating the name of the method being evaluated. If provided will be used when logging an error. Default is None. Returns ------- header_len : int The length of the header. Raises ------ ValueError If more lines are expected than those in the file. """ # Autodetect header of input as (num_lines_in_input - expected_lines) num_lines = sum(1 for _ in open(input_path)) header_len = num_lines - expected_lines if header_len < 0: raise ValueError('Exception, not enough lines in input file! Expected {} lines, obtained {}.' .format(expected_lines, num_lines)) elif header_len > 0: if method_name is not None: logging.warning('Output of method `{}` contains {} more lines than expected. Will consider them part ' 'of the header and ignore them... Expected num_lines {}, obtained lines {}.' .format(method_name, header_len, expected_lines, num_lines)) else: logging.warning('Output contains {} more lines than expected. Will consider them part ' 'of the header and ignore them... Expected num_lines {}, obtained lines {}.' .format(header_len, expected_lines, num_lines)) # Return the header length return header_len
b3c590f9d55b0e3f80e06fa2a3d1541a195c6a5c
603,501
def springForce(k, x, p): """ Calculate the spring force """ return -k*x**(p - 1)
e5aeacece84e53168be2d4c0938a13e855d0a168
537,337
def my_linear_model(B, x): """Linear function for the regression. Args B (1D array of 2): Input 1D polynomial parameters (0=constant, 1=slope) x (array): Array which will be multiplied by the polynomial Returns ------- An array = B[1] * (x + B[0]) """ return B[1] * (x + B[0])
775bb4b36005c2704ecf163e2df1e3c19231959a
99,087
import six import re def _escape_non_ascii(unicode_obj): """ Escape non-printable (or non-ASCII) characters using Java-compatible Unicode escape sequences. This function is based on code from the JSON library module shipped with Python 2.7.3 (json/encoder.py, function py_encode_basestring_ascii), which is Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights Reserved. See the file LICENSE included with PyjProperties for the full license terms. If that file is not available, then please see: https://www.python.org/download/releases/2.7.3/license/ Differences to the aforementioned original version of py_encode_basestring_ascii(): - Always tries to decode str objects as UTF-8, even if they don't contain any UTF-8 characters. This is so that we always return an unicode object. - Only processes non-printable or non-ASCII characters. Also _always_ replaces these characters with Java-compatible Unicode escape sequences (the original function replaced e. g. newlines with "\n" etc.). - Does not wrap the resulting string in double quotes ("). :type unicode_obj: unicode :param unicode_obj: The source string containing data to escape. :rtype : unicode :return: A unicode object. This does not contain any non-ASCII characters anymore. """ def replace(match): s = match.group(0) n = ord(s) if n < 0x10000: return u'\\u{0:04x}'.format(n) else: # surrogate pair n -= 0x10000 s1 = 0xd800 | ((n >> 10) & 0x3ff) s2 = 0xdc00 | (n & 0x3ff) return u'\\u{0:04x}\\u{1:04x}'.format(s1, s2) # Just to be sure: If we get passed a str object, then try to decode it as UTF-8. if isinstance(unicode_obj, six.binary_type): unicode_obj = unicode_obj.decode('utf-8') return re.sub( six.text_type(r'[^ -~]'), replace, unicode_obj )
91291bae5cd9845dea58f9366908d41726239b49
497,532
def clean_cases(text): """ Makes text all lowercase. :param text: the text to be converted to all lowercase. :type: str :return: lowercase text :type: str """ return text.lower()
9b0c931336dbf762e5e3a18d103706ddf1e7c14f
708,013
def get_data_from_txt(path: str) -> list: """ 读取文件中各行数据,存放到列表中 """ lines = [] with open(path, encoding="utf-8", mode="r") as f: line = f.readline().strip() while line: lines.append(line) line = f.readline().strip() return lines
f47d902387450869634af6a3efff44e972ecb2a1
462,469
def check_insertion_order(metadict_inst, expected): """ Ensure that the keys of `metadict_inst` are in the same order as the keys in `expected`. The keys case is ignored. """ def normalise_keys(lst_pairs): return [key.lower() for key, _ in lst_pairs] assert normalise_keys(metadict_inst.items()) == normalise_keys(expected)
a6ec3e30b2c743ae8c03441884325a8c1d22346b
677,793
def final_score_calculator(score_list): """Returns final Levenshtein Word Score Iterates through all the scores for the various queries. Final score is calculated by averaging the ratio of the score with the word length of the ground truth. A given score is capped at 1. """ final_score = 0.0 for item in score_list: word_len = len(item[0]) curr_score = item[1] curr_final = float(curr_score / word_len) if curr_final > 1: # Cap error score at 1 curr_final = 1 final_score += curr_final return float(final_score / len(score_list))
f7a61d71e522385850b505bd08431824987ad20d
167,191