content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def getconfirmation_input(action): """ Method collects user confirmation to proceed with action INPUTS: action as str, description of the action OUTPUT: Returns boolean, True to proceed, False to not proceed. """ loop = True while loop: user_input = input(f"Confirm to proceed with '{action}'? [y/N]: ") if (user_input == "Y") | (user_input == "y"): return True elif (user_input == "") | (user_input == "N") | (user_input == "n"): return False else: print(f"Invalid input '{user_input}' >> Expecting [y/Y/n/N].")
7d54bf72ff35afc2f180f5c9509dbdf0812118d0
46,549
def _split_feature_trait(ft): """Feature is up to first '_'. Ex. 'line_color' => ['line', 'color']""" ft = ft.split('_', 1) return ft if len(ft)==2 else ft+[None]
913a520599e7ea1f5880bd172345b41ee8d248ea
484,096
import time def get_timestamp() -> str: """return a custom time stamp string: YYY-MM-DD_HHMM""" return "{0:04d}-{1:02d}-{2:02d}_{3:02d}{4:02d}".format(*time.localtime())
990070e0e40bb7c4a7ef880a75bc7cc0b2667e97
451,861
def cys_noncys_filter(peptides): """ Assigns cys_rank, then separates cys and noncys peptides. Collects only peptides from proteins that have at least one cys and one non-cys peptide""" # Add cys rank to identify cys peptides peptides['cys_rank'] = [ 1 if 'C' in sequence else 0 for sequence in peptides['Sequence']] # separate cys and noncys peptides cys_peptides = peptides[peptides['cys_rank'] == 1] noncys_peptides = peptides[peptides['cys_rank'] == 0] # Collect only proteins with at least one cys, one noncys common_prots = set(cys_peptides['Proteins']).intersection( set(noncys_peptides['Proteins'])) cys_peptides = cys_peptides[cys_peptides['Proteins'].isin(common_prots)] noncys_peptides = noncys_peptides[noncys_peptides['Proteins'].isin( common_prots)] return cys_peptides, noncys_peptides
faf33e2cb669f040d90f8967ca2f6a9491e7fea5
322,622
def should_scrolling_continue(rule_conf): """ Tells about a rule config if it can scroll still or should stop the scrolling. :param: rule_conf as dict :rtype: bool """ max_scrolling = rule_conf.get('max_scrolling_count') stop_the_scroll = 0 < max_scrolling <= rule_conf.get('scrolling_cycle') return not stop_the_scroll
764ab6fb95e1228d488e57aa28136a1270e64b31
61,603
def presplit_textwrap(words, margin=79, *, two_spaces=True): """ Combines "words" into lines and returns the result as a string. "words" should be an iterator containing pre-split text. "margin" specifies the maximum length of each line. If "two_spaces" is true, words that end in sentence-ending punctuation ('.', '?', and '!') will be followed by two spaces, not one. Elements in "words" are not modified; any leading or trailing whitespace will be preserved. This is used for the "don't reformat indented 'code'" feature: "code" lines start with '\n', and the last one ends with '\n' too. """ words = iter(words) col = 0 lastword = '' text = [] for word in words: l = len(word) if not l: lastword = word col = 0 text.append('\n') continue if two_spaces and lastword.endswith(('.', '?', '!')): space = " " len_space = 2 else: space = " " len_space = 1 if (l + len_space + col) > margin: if col: text.append('\n') col = 0 elif col: text.append(space) col += len_space text.append(word) col += len(word) lastword = word return "".join(text)
f34a1be537b8c5548a29c086f9861b85e95b8c4a
429,257
def get_stat_value_from_game(game, pfr_stat_id): """ Function that extracts a specific stat from a set of game stats :param game: Table Row extracted by BeautifulSoup containing all player's stats for single game :param pfr_stat_id: PFR string element ID for the stat we want to extract :return: Extracted stat for provided game """ data_stat_rec = game.find("td", {"data-stat": pfr_stat_id}) stat_val = data_stat_rec.renderContents().strip() return stat_val.decode("utf-8")
4b072a017d74d74279fdbc71bc4798787659dee1
277,328
def annualize(df, period_label='fqtr', period_val=4): # Tested [Y] """ Select only a subset of a DataFrame which has a particular value in a selected column. Used for selecting only 4th quarter observations from Compustat in Sauder's 2018 Capstone Project Args: df (pd.DataFrame): DataFrame containing observations to be subset. period_label (str or int): Default="fqtr". Label of a dataframe column to use for annualizing data period_val (str or int): Default=4. Value of the column to select by Returns: pd.DataFrame with only observations specified by period_val in period_label's column. """ # Only select observations of firms final quarter of their fiscal year out = df[df[period_label] == period_val] return out
4cf36403c7e83ac9e81e3d2b764a00ae918ac666
135,364
import aiohttp import base64 async def render(eqn: str, **kwargs) -> bytes: """ Render LaTeX using Matthew Mirvish's "TeX renderer slave microservice thing". Returns raw image data or raises ValueError if an error occurred. """ kwargs["source"] = eqn async with aiohttp.request("POST", "http://tex-slave/render", json=kwargs) as resp: result = await resp.json() if result["status"] != "ok": if "internal_error" in result: raise ValueError(f"Internal error: `{result['internal_error']}`") raise ValueError(result["reason"]) data = result["result"] return base64.b64decode(data)
c53a8cd5639f854301fafea72902f11427ca6dab
680,379
from typing import Any def get_category(parameter_meta: Any, input_name: str, category_key: str = "category", fallback_category: str = "other") -> str: """ :param parameter_meta: A dictionary containing the parameter_meta information. :param input_name: The name of the input for which the category should be retrieved (the key in the parameter_meta dictionary). :param category_key: If the parameter_meta[input_name] is a dict, the key which contains the category. :param fallback_category: A category to return of no description is found. :return: The category of the input. """ try: return parameter_meta.get(input_name, {}).get(category_key, fallback_category) except AttributeError: return fallback_category
6eba16b078f0bf5ae5fabf0e46621d2fe3aadc9b
627,095
def _find_name_from_blame(blame_line): """ Finds the name of the committer of code given a blame line from git Args: blame_line: A string from the git output of the blame for a file. Returns: The username as a string of the user to blame """ blame_info = blame_line[blame_line.find('(')+1:] blame_info = blame_info[:blame_info.find(')')] blame_components = blame_info.split() name_components = blame_components[:len(blame_components)-4] return ' '.join(name_components)
4cc985b97ade4ff6a94ac218207bfd557b6cdef4
181,200
def sign(x: float) -> int: """Returns 1 if x is positive, 0 if x is 0, and -1 otherwise""" retVal = 0 if x > 0: retVal = 1 elif x < 0: retVal = -1 return retVal
435d5d73e130cde9ab4674dc1eae1ce2833cad1e
538,158
def state_to_int(p, statelist): """ Converts array of fermion-configuration into integer Args: p - dictionary that contains the relevant system parameters statelist - fermion configuration Returns: out - integer corresponding to state """ # construct unique integer for the fermion configuration defined # in statelist out = 0 for ind, val in enumerate(statelist): out += val * 4**(p['N'] - ind - 1) out = int(out) return out
c07cba94e669e12fd82e6f6f46163661405f99ca
672,677
import re def get_svg_file_d_attribute(path2file: str): """Returns d attribute content""" def find_d_attribute(text): return re.findall(r'<path d=\"(.*?)\"', text) with open(path2file, "r") as input_file: content = "".join(input_file.readlines()) d_contents = find_d_attribute(content) if len(d_contents) != 1: raise ValueError("For now it should be one d attribute in svg.") return d_contents
40628da5da62b72b06f917ea26f2c6b5ba044640
139,538
def isnonempty(value): """ Return whether the value is not empty Examples:: >>> isnonempty('a') True >>> isnonempty('') False :param value: string to validate whether value is not empty """ return value != ''
0250cb455d8f77027d5cde9101a24683950bbdb2
709,289
def is_list_view(path, method, view): """ Return True if the given path/method appears to represent a list view. """ if hasattr(view, 'action'): # Viewsets have an explicitly defined action, which we can inspect. return view.action == 'list' if method.lower() != 'get': return False path_components = path.strip('/').split('/') if path_components and '{' in path_components[-1]: return False return True
536d06d2a0a93e93745c42d81ab95db731c472ad
259,639
def recup_mot_masque(mot_complet, lettres_trouvees): """Cette fonction renvoie un mot masqué tout ou en partie, en fonction : - du mot d'origine (type str) - des lettres déjà trouvées (type list) On renvoie le mot d'origine avec des * remplaçant les lettres que l'on n'a pas encore trouvées.""" mot_masque = "" for lettre in mot_complet: if lettre in lettres_trouvees: mot_masque += lettre else: mot_masque += "*" return mot_masque
24ad5813ceb71763f80426c6e7dd9e24b4139109
304,085
def flatten_dict(to_flatten): """Flattens nested dictionaries, removing keys of the nested elements. Useful for flattening API responses for prefilling forms on the dashboard. """ flattened = {} for key, value in to_flatten.items(): if isinstance(value, dict): flattened.update(flatten_dict(value)) else: flattened[key] = value return flattened
a70c888fae129c92e93e02bbcea2aedfa3e5e5cb
97,412
def is_dict(obj): """ Check if an object is a dict. """ return isinstance(obj, dict)
7424fa383aedacffaf0118efb5f55af6317d39bb
663,067
def pull_service_id(arn): """ pulls the ecs service id from the full arn """ return arn.split('service/', 1)[-1]
2358f6ff9a8286ad7b19b45054daa01b8c62a5b6
388,902
def determine_output_hash(crate_root, label): """Generates a hash of the crate root file's path. Args: crate_root (File): The crate's root file (typically `lib.rs`). label (Label): The label of the target. Returns: str: A string representation of the hash. """ # Take the absolute value of hash() since it could be negative. h = abs(hash(crate_root.path) + hash(repr(label))) return repr(h)
87c1d5017672e8f0789c88bffcf928f1fd9f5030
118,891
def iscode(c): """ Tests if argument type could be lines of code, i.e. list of strings """ if type(c) == type([]): if c: return type(c[0]) == type('') else: return True else: return False
e60da6c05922ff1e67db15fa4caa1500a8f470c7
1,259
def find_event_producer(tenant, producer_id=None, producer_name=None): """ searches the given tenant for a producer matching either the id or name """ if producer_id: producer_id = int(producer_id) for producer in tenant.event_producers: if producer_id == producer.get_id(): return producer if producer_name: for producer in tenant.event_producers: if producer_name == producer.name: return producer return None
209cf3610e4f18fb5e06d50768d3bbc72472aed7
315,162
def generate_colorbar_label(standard_name, units): """ Generate and return a label for a colorbar. """ return standard_name.replace('_', ' ') + ' (' + units + ')'
a42c97ec673c882aabaeb54090e253a6bb46d645
685,570
def _is_numeric(v): """ Returns True if the given value is numeric. :param v: the value to check. :return: True if the value is numeric, False if not. """ try: float(v) return True except ValueError: return False
689cbeb32c0c552769d43d055ddb458dd64896c0
370,973
def _get_even_or_odd_nodes(graph, mod): """ Helper function for get_even_nodes. Given a networkx object, return names of the odd or even nodes Args: graph (networkx graph): determine the degree of nodes in this graph mod (int): 0 for even, 1 for odd Returns: list[str]: list of node names of odd or even degree """ degree_nodes = [] for v, d in graph.degree(): if d % 2 == mod: degree_nodes.append(v) return degree_nodes
4bec651d747ebfccb74ca7c2db815947a7f57393
303,047
def erized(syllable: str) -> bool: """Whether the syllable contains erhua effect. Example -------- huar -> True guanr -> True er -> False """ # note: for pinyin, len(syllable) >=2 is always true # if not: there is something wrong in the data assert len(syllable) >= 2, f"inavlid syllable {syllable}" return syllable[:2] != "er" and syllable[-2] == 'r'
8c9cf6889788e6b3789d53bbe5eaff0d7d4778d6
298,698
def _PrefixMatches(prefix, possible_matches): """Returns the subset of possible_matches that start with prefix. Args: prefix: str, The prefix to match. possible_matches: [str], The list of possible matching strings. Returns: [str], The subset of possible_matches that start with prefix. """ return [x for x in possible_matches if x.startswith(prefix)]
2cbf221c00eb32da2c81e6650180c9fc5f2b3058
182,140
def url_to_repo_org(url): """Extract owner and repository from GitHub url.""" # check that the upstream_repo is a github repo if 'github.com' not in url: raise RuntimeError( 'Extraction of repository and owner info from non-GitHub' 'repositories is not yet supported!' ) url = url.replace('https://github.com/', '').split('/') return url[0], url[1]
ddce17a0785ce0e84420e6565fa7be3a7bb95cd8
460,945
from datetime import datetime def get_database_backup_statement(filename, dbname, as_username='postgres'): """Returns the line to be executed to create a database dump file from postgresql. Arguments: filename: Name of the file to create. dbname: Name from the database. as_username: Name from the username to use when connecting to postgresql """ now = datetime.now() statement = 'sudo -u {username} pg_dump {dbname} > {filename}'.format( username=as_username, dbname=dbname, filename=filename ) return statement
6ee739ff573f0c668fb7459d04ad74370370eccf
561,553
from typing import List from typing import Counter def get_characters(lang_data: List[str]) -> Counter: """ Return a sorted list of characters in the language corpus. Parameters ---------- lang_data : List[str] A list of all paragraphs Returns ------- characters : Counter """ # maps the character to the count characters = Counter() # type: Counter for paragraph in lang_data: characters += Counter(paragraph) return characters
77aad0cb4146cb9bf64eae217c2dc65364f6e081
635,558
def sort_list(player_data_list): """Sort list based on qualifer. Args: player_data_list: player data list Returns: player data list properly sorted """ return sorted(player_data_list, key=lambda x: x[-1])
7164d2851ca6c7557e8a9e10c45a25243254180d
702,560
def _read_file(path): """Read lines from a file. Arguments: path: string file filepath Returns: data: [string] list of string lines from file """ with open(path) as f: data = f.read().splitlines() return data
5f581af8bf92f4b43c7e8e6461b1c22ee77a3d25
187,249
def indent_all_lines(text, number_of_spaces=3): """Indent all lines in a string by a certain number of spaces""" return "\n".join(number_of_spaces * " " + line for line in text.split("\n"))
558c63664885789b33dad9383d18e794b6664e90
199,447
import pytz def ensure_utc(time, tz='UTC'): """ Normalize a time. If the time is tz-naive, assume it is UTC. """ if not time.tzinfo: time = time.replace(tzinfo=pytz.timezone(tz)) return time.replace(tzinfo=pytz.utc)
a293707517d2f5345e23c502cfb0d9cd69be912a
440,283
def reactions(mu, states): """Executes Michaelis-Menten chemical reactions. :Arguments: mu : int Index of the equation states : NumPy array Current states of the system (E, S, ES, P) :Returns: NumPy array : Updated state vector (E, S, ES, P) """ if mu == 1: # E + S -> ES if states[0] > 0 and states[1] > 0: states[0] -= 1 states[1] -= 1 states[2] += 1 elif mu == 2: # ES -> E + S if states[2] > 0: states[0] += 1 states[1] += 1 states[2] -= 1 elif mu == 3: # ES -> E + P if states[2] > 0: states[0] += 1 states[2] -= 1 states[3] += 1 else: raise ValueError('Reaction mu = %d does not exist.' % mu) return states
4c74dd9c67a3cbc8c3550dfac16edd95db8d630a
559,755
def get_experiment_time_series(time, data, exp): """Returns time and data slice between experiment start and end times.""" t0, t1 = exp.runs[0].start_time, exp.runs[-1].end_time mask = (time >= t0) & (time <= t1) return time[mask], data[mask]
2d29343b8dc9e3936b4bdc45f5f1f8526ae0f460
496,423
import math def ellipse_perimeter(maj_ax, min_ax): """ Returns an estimate for the perimeter of an ellipse of semi-major axis maj_ax and semi-minor axis min_ax""" # https://www.universoformulas.com/matematicas/geometria/perimetro-elipse/ H = ((maj_ax - min_ax)/(maj_ax + min_ax))**2 return math.pi*(maj_ax + min_ax)*(1 + 3*H/(10 + (4 - 3*H)**.5))
323c78f1b39cf4480c30a8da3d338af1f1249f26
378,208
import tempfile import csv def _create_row_delete_csv(row_id_vers_iterable): """ creates a temporary csv used for deleting rows :param row_id_vers_iterable: an iterable containing tuples with format: (row_id, row_version) :return: filepath of created csv file """ with tempfile.NamedTemporaryFile("w", suffix=".csv", delete=False) as temp_csv: csv_writer = csv.writer(temp_csv) csv_writer.writerow(("ROW_ID", "ROW_VERSION")) csv_writer.writerows(row_id_vers_iterable) return temp_csv.name
2f010bf1bccc57a808f8fc988233bf44c00ac95a
400,724
import random def id_generator(size=8): """Randomly generate an id of length N (size) we can recognize. Think Italian or Japanese or Native American. Modified from: `Stackoverflow <http://stackoverflow.com/questions/2257441>`_ and `ActiveState <http://code.activestate.com/recipes/526619/>`_. Args: size (int, optional): length of id, number of characters Returns: str: randomly generated string Examples: .. code-block:: python >>> id_generator() 'palevedu' >>> id_generator(3) 'sun' """ variables = 'aeiou' consonants = 'bdfghklmnprstvw' return ''.join([random.choice(variables if i % 2 else consonants) for i in range(size)])
b351bad76cbeb10dc16b5ea169f0f194a4a13966
373,039
def flatten_hierarchical_dict(original_dict, separator='.', max_recursion_depth=None): """Flatten a dict. Inputs ------ original_dict: dict the dictionary to flatten separator: string, optional the separator item in the keys of the flattened dictionary max_recursion_depth: positive integer, optional the number of recursions to be done. None is infinte. Output ------ the flattened dictionary Notes ----- Each element of `original_dict` which is not an instance of `dict` (or of a subclass of it) is kept as is. The others are treated as follows. If ``original_dict['key_dict']`` is an instance of `dict` (or of a subclass of `dict`), a corresponding key of the form ``key_dict<separator><key_in_key_dict>`` will be created in ``original_dict`` with the value of ``original_dict['key_dict']['key_in_key_dict']``. If that value is a subclass of `dict` as well, the same procedure is repeated until the maximum recursion depth is reached. Only string keys are supported. """ if max_recursion_depth is not None and max_recursion_depth <= 0: # we reached the maximum recursion depth, refuse to go further return original_dict if max_recursion_depth is None: next_recursion_depth = None else: next_recursion_depth = max_recursion_depth - 1 dict1 = {} for k in original_dict: if not isinstance(original_dict[k], dict): dict1[k] = original_dict[k] else: dict_recursed = flatten_hierarchical_dict( original_dict[k], separator, next_recursion_depth) dict1.update( dict([(k + separator + x, dict_recursed[x]) for x in dict_recursed])) return dict1
0cade4607073bcaf71e42244666f1f044f2ad2fa
342,217
import pathlib def root_dir() -> pathlib.Path: """Returns root directory""" return pathlib.Path(__file__).parent.parent
af2edceba8c0e23a1ca901c6d8aa4ec3a5c67e0d
578,496
def remove_blank_chars(text): """ (str) -> str Removes superfluous blank characters from text, leaving at most a single space behind where there was more than one (space or newline) >>> remove_blank_chars('Happy \n \n Birthday') "Happy Birthday" :param text: text from which to remove superfluous blanks :returns: text with superfluous blanks removed """ text = text.replace('\n', ' ') while ' ' in text: text = text.replace(' ', ' ') return text
afc2901c557f4fc9b688683fd1487dac7d93bbae
348,960
def f3(x): """Evaluate the estimate x**3+x**2+x.""" return x*(x*x+x)+x
b52f928f69c7f937cc07996ccb016e54539b426e
214,880
def figsize(rows=1, cols=1): """Default figsize for a plot with given subplot rows and columns.""" return (7 * rows, 5 * cols)
1269f7e6400f903249b3de5857f355aad9a53d46
36,664
def get_environment_id(request): """Get the environment ID from request. :param request: The falcon request object. :type request: :obj:`falcon.request` :return: The ID of the environment. :rtype: str """ return request.get_param("id")
03885f8b6c51ba68530ad411b0be58f778f0b0ef
422,874
def sort_hyps(hyps): """Return a list of Hypothesis objects, sorted by descending average log probability""" return sorted(hyps, key=lambda h: h.avg_log_prob, reverse=True)
62ccc090b26185460b5554b1c53f5e437787df99
341,827
from typing import Union def sign(x: Union[float, int], zero_is_positive: bool = False) -> int: """ Gets the sign of a number. Args: x (Union[float, int]): A float or int representing the value you want to check the sign of. zero_is_positive (bool): A boolean determining whether or not to think of 0 as a positive number. Raises: TypeError: Raised if x is None. TypeError: Raised if x is not a float or int. Returns: int: -1 if x < 0 0 if x == 0 1 if x > 0 (or x >= 0 if zero_is_positive is True) """ if x is None: raise TypeError("Argument x mustn't be None!") if not isinstance(x, (float, int)): raise TypeError( f"Expected argument x to be a float or int, instead it is {type(x)}." ) return -1 if x < 0 else 1 if x > 0 else 1 if zero_is_positive else 0
36ebeb78591f9c251f6dc28e2f34f2aca9da5bd5
478,524
def extract_keywords(lst_dict, kw): """Extract the value associated to a specific keyword in a list of dictionaries. Returns the list of values extracted from the keywords. Parameters ---------- lst_dict : python list of dictionaries list to extract keywords from kw : string keyword to extract from dictionary """ lst = [di[kw] for di in lst_dict] return lst
28b97459dca558e245fa552a18711d5f5ce7a802
674,782
def initialize_LIP_dict(LIP_feature_collection): """ Initialize the dictionary which contains the LIP fraction remaining for all LIPs. Parameters ---------- LIP_feature_collection : feature collection feature collection of LIPs Returns ------- LIP_fracs : dictionary with keys = LIP Ids, values = LIP fraction remaining """ # get the unique ID associated with each LIP geometry LIP_Ids = [] for feature in LIP_feature_collection: LIP_Id = feature.get_feature_id().get_string() LIP_Ids.append(LIP_Id) # create a dictionary: key = LIP Id, value = LIP fraction remaining ones = [1]*len(LIP_Ids) LIP_fracs = dict(zip(LIP_Ids, ones)) return LIP_fracs
e105b84781105599bc92a6e1eece9b4f8ef2e4e9
695,307
import typing def interpolate_between_two_points(x0: float, y0: float, x1: float, y1: float, proportion: float) -> typing.Tuple[float, float]: """ Given two points and a proportion of the distance between them this return the interpolated point. """ if proportion == 0: return x0, y0 if proportion == 1: return x1, y1 return x0 + (x1 - x0) * proportion,\ y0 + (y1 - y0) * proportion
e5800de30cfb3938bf1b3a8900d8b09d6e99e927
169,573
import math import warnings import torch def trunc_normal(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): """ Fills the input tensor with`values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution(`N(mu, var)`) with values outside(`[a, b]`) redrawn until they are within the bounds. The method used for generating the random values works best when `a <= mean <= b`. Args: tensor (torch.Tensor): an n-dimensional tensor mean (float): the mean of normal distribution std (float): the standard deviation of the normal distribution a (float): the minimum cutoff value b (float): the maximum cutoff value Returns: [torch.Tensor]: an n-dimensional tensor which is truncated """ def norm_cdf(x): """Computes the standard normal cumulative distribution function""" return (1. + math.erf(x / math.sqrt(2.))) / 2. if (mean < a - 2 * std) or (mean > b + 2 * std): warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " "The distribution of values may be incorrect.", stacklevel=2) with torch.no_grad(): # Values are generated by using truncated uniform distribution # and then using the inverse CDF for the normal distribution. # Get upper and lower CDF values lower = norm_cdf((a - mean) / std) upper = norm_cdf((b - mean) / std) # Uniformly fill tensor with the values from [lower, upper] # and then translate to [2*lower-1, 2*upper-1]. tensor.uniform_(2 * lower - 1, 2 * upper - 1) # Use inverse CDF transform for normal distribution # to get truncated standard normal tensor.erfinv_() # Transform to proper mean, std tensor.mul_(std * math.sqrt(2.)) tensor.add_(mean) # Clamp to ensure it's in the proper range tensor.clamp_(min=a, max=b) return tensor
52872e638240bae45a380f19af822dbe09af81ee
335,302
def block(content, lang=''): """Returns a codeblock""" return f"```{lang}\n{content}```"
a358c1bc4015fe0a5d03375661c25e1bc6eb8874
400,002
def content_is_changed(remote_md: str, local_md: str) -> bool: """Return whether remote wiki page contents is functionally equivalent to the local wiki page contents. Args: remote_md (str): Remote wiki page content local_md (str): Local wiki page content Returns: bool: True if contents are different, else False. """ return remote_md.strip() != local_md.strip()
0f799138bbeae54541737bc9ddce6db2633549ca
440,247
def quals2ints(quals, base=33): """Convert an iterable of quality characters to phred-scale ints. Args: quals: The qualities. base: The offset of the first quality value (Old Illumina = 64, new Illumina = 33). Returns: A tuple of integer qualities. """ return (ord(q) - base for q in quals)
885671b6a71c9936740090b264caa301c45178c7
521,427
from typing import List def make_code_block(text: str, language: str = "gdscript") -> str: """Returns the text formatted as a reStructured code block """ lines: List[str] = text.split("\n") code: str = " " + "\n ".join(lines) return ".. code-block:: {}\n\n{}\n".format(language, code)
881e0159b56ea767c5bb8c880b1c8584182b38f8
65,933
def orientation(u, v, w): """Computes the orientation (+/-1) of a basis""" det = u.dot(v.cross(w)) if det >= 0: return +1 else: return -1
1b6d99c1f38ca944d9a535ce48e065ff821dbaa9
641,119
def infer_free_values(A, b): """ Infer the indices of fixed values in an optimization vector. Parameters ---------- A : np.ndarray The constraint matrix. b : np.ndarray The constraint values. Returns ------- fixed : list The list of fixed indices. """ # find locations of b == 0, since pmf values are non-negative, this means they are identically zero. free = [i for i, n in enumerate(A[b == 0, :].sum(axis=0)) if n == 0] while True: # now find rows of A with only a single free value in them. those values must also be fixed. fixed = A[:, free].sum(axis=1) == 1 new_fixed = [[i for i, n in enumerate(row) if n and (i in free)][0] for i, row in enumerate(A) if fixed[i]] free = list(sorted(set(free) - set(new_fixed))) if not new_fixed: break return free
ba3313f96387c6e55a29220b748a61d226548c0f
56,809
def isFloat(input: str): """ 检测输入是否为float :param input:字符串 :return:True 是; False 否 """ try: float(input) except: return False else: return True
2c9dfa29caadbf4f759bc7b700a7f5ae2491f0e4
571,317
from datetime import datetime def parse_steam_date(steam_date: str): """ Parse a textual release date from Steam. """ try: return datetime.strptime(steam_date, "%b %d, %Y").date() except ValueError: pass try: return datetime.strptime(steam_date, "%b %Y").date() except ValueError: pass return None
afcf28ab9a1fc814bf7f700ec90be6e29a16070e
451,655
def parse_and_validate_longitude(request): """Extract and Check the validity of longitude. Args: request: HTTP request. Returns: longitude(float) if valid. Raises: ValueError: if longitude is not float, or outside range [-180, 180]. """ lon = float(request.rel_url.query.get("long")) if lon > 180 or lon < -180: raise ValueError return lon
3a9d191be67e3b091ddcb7354bb8fc0ff2985b43
644,998
import copy def injected_cube_position(example_dataset_ifs_crop): """ Inject a fake companion into an example cube. Parameters ---------- example_dataset_ifs_crop : fixture Taken automatically from ``conftest.py``. Returns ------- dsi : VIP Dataset injected_position_yx : tuple(y, x) """ dsi = copy.copy(example_dataset_ifs_crop) # we chose a shallow copy, as we will not use any in-place operations # (like +=). Using `deepcopy` would be safer, but consume more memory. gt = (30, 0, 50) # flux of 50 in all channels dsi.inject_companions(gt[2], rad_dists=gt[0], theta=gt[1]) return dsi, dsi.injections_yx[0], gt
4a04deb841d440a4ae9b54180ab0a57d3533edba
615,173
def _decode_field(s, prev=None): """ Decode a single field according to the Lightstreamer encoding rules. 1. Literal '$' is the empty string. 2. Literal '#' is null (None). 3. Literal '' indicates unchanged since previous update. 4. If the string starts with either '$' or '#', but is not length 1, trim the first character. 5. Unicode escapes of the form uXXXX are unescaped. Returns the decoded Unicode string. """ if s == "$": return "" elif s == "#": return None elif s == "": return prev elif s[0] in "$#": s = s[1:] return s.decode("unicode_escape")
caee5dac6aaf14782d9c6c6d2b8bb865434d7ae2
208,563
import random def setRandomParameters(net,seed=None,randFunc=random.random): """ Sets parameters to random values given by the function randFunc (by default, uniformly distributed on [0,1) ). """ random.seed(seed) net.setOptimizables( randFunc(len(net.GetParameters())) ) return net.GetParameters()
628425843b0c683362ef8f479c410c1c68a4333c
105,116
def identity_abs(aseq, bseq): """Compute absolute identity (# matching sites) between sequence strings.""" assert len(aseq) == len(bseq) return sum(a == b for a, b in zip(aseq, bseq) if not (a in '-.' and b in '-.'))
9a58a5ddb1e47f0dd35434ddfc9e31a0a21e072d
543,575
def get_task_id(headers, body): """ Across Celery versions, the task id can exist in a couple of places. """ id = headers.get('id', None) if id is None: id = body.get('id', None) return id
e0d1fe891f3d6f6313cebb9fcb74bc3222b9b1ca
513,401
from datetime import datetime def start_and_end_of_the_month(dt: datetime): """Get first of month and first of next month for a given datetime. """ start = dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0) if start.month == 12: end = start.replace(year=start.year + 1, month=1, day=1, hour=0, minute=0, second=0, microsecond=0) else: end = start.replace(month=start.month + 1) return start, end
9f1001c325d04a4754b666477292788f620596f7
34,047
import re def _get_filename_from_response(response): """Gets filename from requests response object Args: response: requests.Response() object that contains the server's response to the HTTP request. Returns: filename (str): Name of the file to be downloaded """ cd = response.headers.get("content-disposition") if not cd: return None file_name = re.findall("filename=(.+)", cd) if len(file_name) == 0: return None return file_name[0]
45f5440390ad7279283ad5f50f7433b5ba7602cf
84,341
def filter_notrailingslash(host): """ Removes a trailing slash from URLs (or anything, really) Usage: {{ 'www.example.com/' | notrailingslash }} Output: 'www.example.com' """ if host.endswith("/"): host = host[0:-1] return host
d81e96afb90c92e969536473821e05adb10d436d
497,175
def _rst_escape_string(string: str) -> str: """ Escape special characters. :param str string: Input string :return: String with escaped characters """ return string.replace("*", "\\*")
5eb4844649aa1f77eaf54055ec419f4e032c6de4
350,798
def totalLinks(n): """Gives the number of possible links for a linkograph of size n.""" # The number of possible links is just the triangle number for # n-1. return int(n*(n-1)/2)
d473432b6b40e3db4e7e898128a9a45a8ce798cc
475,113
def to_full_html_document(html_template_chunk: str) -> str: """ Convert a HTML chunk into a full, valid html document. """ result = "" result += "<!DOCTYPE html>\n" result += "<html>\n" result += "<body>\n" result += html_template_chunk + '\n' result += "</body>\n" result += "</html>\n" return result
7069b7d07ac8df5a3b02058b5fd98260ee1bef9c
155,769
def map_users_to_team_name(users): """ Maps user_id to team_name :param users: https://docs.sleeper.app/#getting-users-in-a-league :return: Dict {user_id:team_name} """ users_dict = {} # Maps the user_id to team name for easy lookup for user in users: try: users_dict[user["user_id"]] = user["metadata"]["team_name"] except: users_dict[user["user_id"]] = user["display_name"] return users_dict
4420d8304d0c06a6ac25ac6d7abcc2e199c15c8b
524,215
def acc(observed_seq, hidden_seq): """ Compute the accuracy of the hidden labels. """ return (hidden_seq == observed_seq).mean()
4f5553881bbebc57083c2a95d93d329f81e4e062
199,387
import struct def uint256_from_str(s): """Convert bytes to uint256""" r = 0 t = struct.unpack(b"<IIIIIIII", s[:32]) for i in range(8): r += t[i] << (i * 32) return r
6ea6fc552dd39a5dfccc35bc57548da607b51ecd
231,440
from pathlib import Path def _getsourcetype(infiles): """ Determine the type of fieldmap estimation strategy. Example ------- >>> _getsourcetype(["path/some_epi.nii.gz"]) 'epi' >>> _getsourcetype(["path/some_notepi.nii.gz"]) 'magnitude' """ fname = Path(infiles[0]).name return "epi" if fname.endswith(("_epi.nii.gz", "_epi.nii")) else "magnitude"
009ef9833cc074bae7e17f5413a18194439d8c53
481,374
def rotate_layer(layer: tuple, rotation_diff: int) -> tuple: """Rotate layer by given rotation angle""" _, _, rotation, distribution = layer new_rotation = rotation + rotation_diff new_rotation = new_rotation % 180 if distribution == "Normal" else new_rotation % 360 new_layer = list(layer) new_layer[2] = new_rotation return tuple(new_layer)
41355468101de138d49700b5d595115b74201d93
84,684
import urllib3 import re def _request_exc_message(exc): """ Return a reasonable exception message from a :exc:`request.exceptions.RequestException` exception. The approach is to dig deep to the original reason, if the original exception is present, skipping irrelevant exceptions such as `urllib3.exceptions.MaxRetryError`, and eliminating useless object representations such as the connection pool object in `urllib3.exceptions.NewConnectionError`. Parameters: exc (:exc:`~request.exceptions.RequestException`): Exception Returns: string: A reasonable exception message from the specified exception. """ if exc.args: if isinstance(exc.args[0], Exception): org_exc = exc.args[0] if isinstance(org_exc, urllib3.exceptions.MaxRetryError): reason_exc = org_exc.reason message = str(reason_exc) else: message = str(org_exc.args[0]) else: message = str(exc.args[0]) # Eliminate useless object repr at begin of the message m = re.match(r'^(\(<[^>]+>, \'(.*)\'\)|<[^>]+>: (.*))$', message) if m: message = m.group(2) or m.group(3) else: message = "" return message
ad4ed8a70b8a19607a19d7f1345d0e1c3da11b6a
592,873
def int_to_tuple(number): """ Change the int into a tupple if applicable. If the arg is already a tuple, return it directly. Args: number (int or tuple): The number to convert in tuple if not already a tuple. Returns: tuple: The number formated into tuple to return. """ if isinstance(number, int): return (number,) return number
2e06f86e9dcd6630c369431441ef8876f241e390
562,640
def get_base_req(req): """Get the name of the required package for the given requirement.""" if isinstance(req, tuple): req = req[0] return req.split(":", 1)[0]
03642dedb41cda6841e6a63e3c9c50e2e8676234
645,411
import ast def _parse_answer_text(answer_text_str): """Parsing `answer_text` field to list of answers. The original code is from https://github.com/google-research/tapas. Args: answer_text_str: A string representation of a Python list of strings. For example: "[u'test', u'hello', ...]" Returns: answer_texts: A list of answers. """ try: answer_texts = [] for value in ast.literal_eval(answer_text_str): answer_texts.append(value) return answer_texts except SyntaxError: raise ValueError("Unable to evaluate %s" % answer_text_str)
c22198a012048eb483f6b0fed2f6dc4373f6ba6d
413,116
def num_examples_per_epoch(split): """Returns the number of examples in the data set. Args: split: name of the split, "train" or "validation". Raises: ValueError: if split name is incorrect. Returns: Number of example in the split. """ if split.lower().startswith('train'): return 100000 elif split.lower().startswith('validation'): return 10000 else: raise ValueError('Invalid split: %s' % split)
05362a25d28838994d3cf0b6dbc3818cff6a7e37
662,135
def _stmt_from_rule(model, rule_name, stmts): """Return the INDRA Statement corresponding to a given rule by name.""" stmt_uuid = None for ann in model.annotations: if ann.predicate == 'from_indra_statement': if ann.subject == rule_name: stmt_uuid = ann.object break if stmt_uuid: for stmt in stmts: if stmt.uuid == stmt_uuid: return stmt
1447b1f8a54129928ce949fb915c1185e1f2aed7
237,106
def _remove_extension(path, extension): """ Removes the extension from a path. """ return path[:-len(extension)] if path.endswith(extension) else path
e819949ec396f70029df40e4a4aa3469e28ea613
634,361
import json def load_bigbird_config(config_filepath): """Load bigbird config from original tf checkpoint. (pretrain_config.json)""" config_key_lst = [ "attention_probs_dropout_prob", "attention_type", "block_size", "pad_token_id", "bos_token_id", "eos_token_id", "sep_token_id", "gradient_checkpointing", # Originally `use_gradient_checkpointing` "hidden_dropout_prob", "hidden_size", "initializer_range", "intermediate_size", "max_position_embeddings", "num_attention_heads", "num_hidden_layers", "num_random_blocks", "pad_token_id", "rescale_embeddings", "type_vocab_size", "use_bias", "vocab_size", ] config = { "position_embedding_type": "absolute", "tokenizer_class": "BertTokenizer", # NOTE Remove this one if you use other tokenizer. } with open(config_filepath, "r", encoding="utf-8") as f: tf_config = json.load(f) for config_key in config_key_lst: if config_key in tf_config: config[config_key] = tf_config[config_key] else: if config_key == "gradient_checkpointing": config[config_key] = tf_config["use_gradient_checkpointing"] elif config_key == "num_random_blocks": config[config_key] = tf_config["num_rand_blocks"] elif config_key == "rescale_embeddings": config[config_key] = tf_config["rescale_embedding"] else: raise KeyError(f"{config_key} not in tensorflow config!!") return dict(sorted(config.items()))
1623d2d9f8327b2fba936c999a6b34e7a7ad8bf8
390,649
def get_remote_address(request): """ :param: request: request object of sanic :return: the ip address of given request (or 127.0.0.1 if none found) """ return request.remote_addr or request.ip
039dbc86fb5af1cda79ff5d42882ddadc17e5da8
351,379
def shortest_paths(graph, vertex_key): """Uses Dijkstra's algorithm to find the shortest path from `vertex_key` to all other vertices. If we have no lengths, then each edge has length 1. :return: `(lengths, prevs)` where `lengths` is a dictionary from key to length. A length of -1 means that the vertex is not connected to `vertex_key`. `prevs` is a dictionary from key to key, giving for each vertex the previous vertex in the path from `vertex_key` to that vertex. Working backwards, you can hence construct all shortest paths. """ shortest_length = { k : -1 for k in graph.vertices } shortest_length[vertex_key] = 0 candidates = {vertex_key} done = set() prevs = {vertex_key:vertex_key} while len(candidates) > 0: next_vertex, min_dist = None, -1 for v in candidates: dist = shortest_length[v] if min_dist == -1 or dist < min_dist: min_dist = dist next_vertex = v candidates.discard(next_vertex) done.add(next_vertex) for v in graph.neighbours(next_vertex): edge_index, _ = graph.find_edge(next_vertex, v) dist = min_dist + graph.length(edge_index) current_dist = shortest_length[v] if current_dist == -1 or current_dist > dist: shortest_length[v] = dist prevs[v] = next_vertex if v not in done: candidates.add(v) return shortest_length, prevs
f2ac9abf9292364099748475988d4ee1dbeb4b23
699,744
def set_show_viewport_size_on_resize(show: bool) -> dict: """Paints viewport size upon main frame resize. Parameters ---------- show: bool Whether to paint size or not. """ return {"method": "Overlay.setShowViewportSizeOnResize", "params": {"show": show}}
d0d1f4ede8995b660f9d1abaabe18a5fdadc8d0b
66,574
def generate_hexagonal_board(radius=2): """ Creates a board with hexagonal shape. The board includes all the field within radius from center of the board. Setting radius to 0 generates a board with 1 hexagon. """ def hex_distance(a, b): return int(abs(a[0] - b[0]) + abs(a[1] - b[1]) + abs(a[0] + a[1] - b[0] - b[1])) / 2 width = height = 2 * radius + 1 board = [[0] * height for _ in range(width)] center = (radius, radius) for x in range(width): for y in range(height): board[x][y] = int(hex_distance((x, y), center) <= radius) return board
5471628fe0cc6601412fcbce61cb8be9325641f1
448,061
def samps2ms(samples: float, sr: int) -> float: """samples to milliseconds given a sampling rate""" return (samples / sr) * 1000.0
49e07ee02984bf0e9a0a54715ef6b6e5a3c87798
709,572
import gzip def return_filehandle(open_me): """return file handle for gz compressed or text file""" magic_dict = { # headers for compression b"\x1f\x8b\x08": "gz", # '\x42\x5a\x68': 'bz2', # '\x50\x4b\x03\x04': 'zip' } max_bytes = max(len(t) for t in magic_dict) with open(open_me, "rb") as f: s = f.read(max_bytes) for m in magic_dict: if s.startswith(m): # check file header for match with m t = magic_dict[m] if t == "gz": return gzip.open(open_me, "rt") return open(open_me)
6e6f39706a29613332b22c438ad0d16630310cc1
527,928
def isolatedfile_to_state(filename): """For a '.isolate' file, returns the path to the saved '.state' file.""" return filename + '.state'
8cd2456f2ec167ae1462c3b85fda2dfd52408dd7
58,151
import math def sunset_hour_angle(lat, dec): """ Calculate the sunset hour angle for a given latitude and solar declination (Allen et al. 1998). Parameters ---------- lat : float Latitude (rad). dec : float Solar declination (rad). Returns ------- float Sunset hour angle (rad). """ return math.acos(-math.tan(lat) * math.tan(dec))
c6bf293c513bf7a54a9fc85de8f6b7360bc602f8
246,497
def get_class_dict(class_tree): """Get dictionary of article ids and their hyperpartisan class. Args: class_tree (xml.etree.ElementTree): class xml document tree Returns: dict: dictionary of article ids and their hyperpartisan class """ class_dict = {} for article in class_tree.xpath("/articles/article"): class_dict[article.get("id")] = article.get("hyperpartisan") return class_dict
977d3ddac76cd476c68d1c87ac8ef08993d2c787
218,929
def switch_players(current_player): """ Toggle between player one and player two. Returns the toggled value 0 or 1. """ return not current_player
3e55c55fcc052b9e2321d00c7e919daf28cbfbee
149,379
import hashlib def sha512(data): """Compute the SHA-512 hash.""" return hashlib.sha512(data).digest()
f7c40d5e8de82ce208be4f32518e6cc5252c0374
298,857
import csv def read_list_csv(filename): """ Use Python's native CSV reader to load list. """ with open(filename) as f: loaded = [{k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True)] return loaded
c85638b379297389dc9ce3fe88cb784f178424fa
169,662
def nosuperuser_settings(settings): """Return settings that have default superuser users disabled.""" settings.NSOT_NEW_USERS_AS_SUPERUSER = False return settings
23259f6e5e6954ee0133359f4b9a1f0472885ad3
265,559
def cache_key(package_name): """Return cache key for a package.""" return "package_monitor.cache:%s" % package_name
a5e06477da3f58749e9d63a57fede7f1baff41c2
388,331
from typing import Union from typing import Tuple import math def geotile_to_lat_lon( tile: Union[str, Tuple[int, int, int]], offset: Tuple[float, float] = (.5, .5), ) -> Tuple[float, float]: """ Convert an elasticsearch geotile key to a latitude/longitude tuple Specific implementation is adapted from bing-map's `quadtile example code <https://docs.microsoft.com/en-us/bingmaps/articles/bing-maps-tile-system`__. :param tile: - a **string** in the form ``<zoom>/<x>/<y>`` - or a **tuple** containing ``zoom``, ``x`` and ``y`` as integers :param offset: A float tuple that defines the offset inside the map-tile in range ``[0, 1]``. Defaults to the center of the tile: ``(.5, .5)`` :return: tuple of latitude and longitude as float """ if isinstance(tile, tuple): zoom, x, y = tile else: zoom, x, y = (int(i) for i in tile.split("/")) num_tiles = 2 ** zoom x = (x + offset[0]) / num_tiles y = (y + offset[1]) / num_tiles lon = 360. * x - 180. lat = 90. - 360. * math.atan(math.exp((y - .5) * 2 * math.pi)) / math.pi return lat, lon
e93277846cafd61963cb5656b484c6460bbf446c
508,010