content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def tokenize(s): """Convert a string into a list of tokens.""" return s.replace('(', ' ( ').replace(')', ' ) ').split()
db2f27ab9512bc200f80f7cd10e0678301be47f0
386,086
def distance_from_speed_and_time(movement_speed, movement_time): """Returns distance from speed and time""" return movement_time * movement_speed * 1000 / 3600
8cc1a9745c4f03ffe47e6e872a04435beb82a15f
643,908
import hashlib def getIdHash(id): """ Return md5 prefix based on id value""" m = hashlib.new('md5') m.update(id.encode('utf8')) hexdigest = m.hexdigest() return hexdigest[:5]
a9e8d67fae494cd2eaac41b6258be69ed10b667a
15,860
def nbr(value=0): """ Formata um número decimal para o padrão brasileiro, utilizando vírgula para separar a parte inteira da parte decimal e ponto para separar classes numéricas :param value: o valor a ser formatado :return: o valor formatado """ num = f'{value:,}' r1 = num.replace(',', ' ') r2 = r1.replace('.', ',') r = r2.replace(' ', '.') return r
73f2e97968207279af4ea4f6f859bcd7c87f3e95
412,008
def _spl_call_ ( spline , x ) : """ Use `TSpline`as a function >>> spline = ... >>> value = spline ( 10 ) - see ROOT.TSpline - see ROOT.TSpline3 - see ROOT.TSpline5 """ return spline.Eval ( x )
2d624fb64b876c14c1739eb5ff61b70da15d1cab
452,353
from typing import Dict def to_label_selector(tags: Dict[str, str]) -> str: """Convert tags to label selector to embed in query to K8s API server.""" label_selector = "" for k, v in tags.items(): if label_selector != "": label_selector += "," label_selector += "{}={}".format(k, v) return label_selector
faa9bf49f879a455893ee10220262fa9c240b203
259,219
def flip_bbox_xy(bbox): """ flips x and y positions for all parts of a bounding box """ return (bbox[1], bbox[0], bbox[3], bbox[2])
36fc75c2d28eab62e7b216bd347db90454a52e82
103,593
import ctypes def pycapsule_new(ptr, name, destructor=None) -> ctypes.py_object: """ Wraps a C function pointer into an XLA-compatible PyCapsule. Args: ptr: A CFFI pointer to a function name: A binary string destructor: Optional PyCapsule object run at destruction Returns a PyCapsule (ctypes.py_object) """ return ctypes.pythonapi.PyCapsule_New(ptr, name, None)
dbfb92a8e60af149aecdcefac66aeb5befc05517
58,774
def get_sorted_data(data: list, sort_by: str, reverse=True) -> list: """Get sorted data by column and order Parameters ---------- data : list Data stored in list of dicts sort_by : str Sort data by specific column reverse : bool, optional Flag to determinate order of sorting (False - asc, True - desc), by default True Returns ------- list Sorted data stored in list of dicts """ return sorted(data, key=lambda k: (k[sort_by] is not None, k[sort_by]), reverse=reverse)
3e501d4a1b654412c3af25778804b085b6e82001
532,091
def splice_before(base, search, splice, post_splice="_"): """Splice in a string before a given substring. Args: base: String in which to splice. search: Splice before this substring. splice: Splice in this string; falls back to a "." if not found. post_splice: String to add after the spliced string if found. If only a "." is found, ``post_splice`` will be added before ``splice`` instead. Defaults to "_". Returns: ``base`` with ``splice`` spliced in before ``search`` if found, separated by ``post_splice``, falling back to splicing before the first "." with ``post_splice`` placed in front of ``splice`` instead. If neither ``search`` nor ``.`` are found, simply returns ``base``. """ i = base.rfind(search) if i == -1: # fallback to splicing before extension i = base.rfind(".") if i == -1: return base else: # turn post-splice into pre-splice delimiter, assuming that the # absence of search string means delimiter is not before the ext splice = post_splice + splice post_splice = "" return base[0:i] + splice + post_splice + base[i:]
f8f5bf3c2355c38d16157836863e501cbc846d40
700,787
from typing import Counter def check_permutation(string1, string2): """ this is equivalent to have the same number of letters in both strings -> doable Pythonically with a one liner """ return Counter(string1) == Counter(string2)
97ee2cd00d258c4ee74b863cdd76633b93249013
194,121
import tempfile, os, shutil def createSampleAssembly(workdir, template_dir, sa_xml): """Create a sampleassembly folder in the workdir using files in template directory and the given sampleassembly.xml file return the path to the new directory """ d = tempfile.mkdtemp(dir=workdir) for fn in os.listdir(template_dir): p = os.path.join(template_dir, fn) if os.path.isfile(p): shutil.copy(p, d) continue sa_dest = os.path.join(d, 'sampleassembly.xml') shutil.copy(sa_xml, sa_dest) return sa_dest
3ee793ba7f2b2173959d23719047ff7ebe5e74fa
649,990
def conn_links(nodes, links, node_idx): """ Returns the first and last pixels of links connected to node_idx. """ link_ids = nodes['conn'][nodes['idx'].index(node_idx)] link_pix = [] for l in link_ids: link_pix.extend([links['idx'][l][-1], links['idx'][l][0]]) return link_pix
38011de0bd825fa20d544a7a243eb0ebcdc36c8a
521,812
def get_repo_url(repository): """ Functions return URL for repository passed as parameter :param repository: object representing Repository model :return: str - concatenated URL """ url_data = { 'protocol': repository.protocol.lower(), 'user': ''.join((repository.user, '@')) if repository.user else '', 'host': repository.host.lower(), 'port': ''.join((':', str(repository.port))) if repository.port else '', 'repo': ''.join(('/', repository.repo_remote_path)) } return '{protocol}://{user}{host}{port}{repo}'.format(**url_data)
7594d34de8b27f11243eba3b0a97913a3d1e8771
217,203
import re def flatten(char_sequences, indices=False): """ Function to split and flatten character sequences Args: char_sequences (list[str]): character sequences indices (bool): True to return a list of indices Returns: flat (list[str]): flattened arguments character sequences indices (list[int]): optional list of indices """ flat = [] if indices: indices = [] for i, speech in enumerate(char_sequences): split = re.split(r"\r\n|\n|\r|\n\r", speech) for j, segment in enumerate(split): if segment != "": flat.append(segment) indices.append([i, j]) # return both lists return indices, flat else: for speech in char_sequences: split = re.split(r"\r\n|\n|\r|\n\r", speech) for segment in split: if segment != "": flat.append(segment) return flat
03e1bbbc30be7c7a013cc348473e4eee554a4e63
394,885
def compass_angle(data: list, angles: tuple = (0.0, 360.0)) -> list: """ Filter out images that do not lie within compass angle range :param data: The data to be filtered :type data: list :param angles: The compass angle range to filter through :type angle: tuple of floats :return: A feature list :rtype: list """ if len(angles) != 2: raise ValueError("Angles must be a tuple of length 2") if angles[0] > angles[1]: raise ValueError("First angle must be less than second angle") if angles[0] < 0.0 or angles[1] > 360.0: raise ValueError("Angles must be between 0 and 360") return [ feature for feature in data if angles[0] <= feature["properties"]["compass_angle"] <= angles[1] ]
2e9d9bbcdbdd84b6d782d83b1898faf9c11e9434
414,449
import time def parse_date(value, dateformat='%Y-%m-%d'): """Parse a string into a date""" return time.strptime(value.strip(), dateformat)
87f382c74365e8a273355a6efe56ff94def5f9c0
653,956
def clean_empty(data): """Remove empty entries in a nested list/dictionary of items, deep removing nested empty entries. """ if isinstance(data, dict): cleaned = {k: clean_empty(v) for k, v in data.items()} return {k: v for k, v in cleaned.items() if v} if isinstance(data, list): return [v for v in map(clean_empty, data) if v] return data
e72a16879022908e5a425c2215225519a0442dc0
447,273
def raw_string(txt): """ Python automatically converts escape characters (i.e. \\n), which causes problems when inputing latex strings since they are full of backslashes. This function returns a raw string representation of text Parameters ---------- txt : string string that possibly contains escape characters Returns ------- new_text : string same as 'text' but without any escape characters """ escape_dict={'\a':r'\a', '\b':r'\b', '\c':r'\c', '\f':r'\f', '\n':r'\n', '\r':r'\r', '\t':r'\t', '\v':r'\v', '\'':r'\'', '\"':r'\"'} #I used to have '\1' thru '\9' in the list above, but for some strange #reason '\a' and '\7' both get mapped to '\x07'. Thus, when someone inputs #'\alpha', it gets mapped to '\x07lpha'. If '\7' is listed in the escape #dictionary after '\a', then '\x07lpha' gets translated to '\\7lpha', which #LaTeX cannot understand. As far as I know, LaTeX never starts a command #with a number, so I just got rid of all the numbers in the escape #dictionary. new_txt='' for char in txt: try: new_txt+=escape_dict[char] except KeyError: new_txt+=char return new_txt
4aa92dd88ca0f26849709f039885d13f8c4f3874
511,571
from typing import List from typing import Tuple def longest_path(dir_str: str) -> str: """ To find the longest path to any dir/file. Can be easily modified to get the longest path to just a file :param dir_str: Directory string containing the directory structure :return: longest directory path (number of characters) to any file """ def util(dirs: List[str], prefix: str) -> Tuple[str, int]: nonlocal index, l if not dirs: return "", 0 max_len = 0 return_str = "" while index < l: cur = dirs[index] if cur.startswith(prefix): cur = cur.lstrip(prefix) index += 1 sub_str, sub_len = util(dirs, prefix + "\t") if sub_len + len(cur) + 1 > max_len: if sub_len: max_len = sub_len + len(cur) + 1 return_str = cur + "/" + sub_str else: max_len = len(cur) return_str = cur else: break return return_str, max_len if not dir_str: return "" all_dirs = dir_str.split("\n") index: int = 0 l: int = len(all_dirs) return util(all_dirs, "")[0]
16929fe87a77503a23370b24af17a2d4b4316057
690,133
from typing import Any import re def format_safe(template: str, **kwargs: Any) -> str: """ Works similarly to `template.format(**kwargs)`, except that unmatched fields in `template` are passed through untouched. >>> format_safe('{a} {b}', a='123') '123 {b}' >>> format_safe('{a} {b[4]:3f}', a='123') '123 {b[4]:3f}' To avoid variable expansion, precede with a single backslash e.g. >>> format_safe('\\{a} {b}', a='123') '{a} {b}' """ result = template for key, value in kwargs.items(): find_pattern = re.compile( rf""" (?<!\#) # don't match if preceded by a hash {{ # literal open curly bracket {re.escape(key)} # the field name }} # literal close curly bracket """, re.VERBOSE, ) # we use a function for repl to prevent re.sub interpreting backslashes # in repl as escape sequences. result = re.sub( pattern=find_pattern, repl=lambda _: str(value), # pylint: disable=cell-var-from-loop string=result, ) # transform escaped sequences into their literal equivalents result = result.replace(f"#{{{key}}}", f"{{{key}}}") return result
56e37d8cfcb60eb9851e732fbd45b6fda053bd04
544,505
import string def expanded_chars(ch, cset): """ Add extra characters or ASCII punctuation marks to initial character set :param ch: initial character set :type ch: str :param cset: extra characters :type cset: list :return: final character set :rtype: str """ if len(cset) > 0: # Add extra characters for i in cset: ch += i else: # Add ASCII punctuation marks ch += string.punctuation return ch
10151ef7e3586d343064308ad6e092681b4e34c8
155,796
def get_lr_warmup_schedule(lr_final, schedule_end_step, schedule_start_step=0, lr_init=1e-10): """Creates a learning rate warm-up schedule. The learning rate is increased linearly during the steps. Arguments: lr_final: A float representing the value of the learning rate at the end of the warmup. schedule_start_step: An integer representing the step (either iteration or epoch) after which to start applying the warmupi. schedule_end_step: An integer representing the step (either iteration or epoch) after which we stop applying the warmup, and the learning rate reaches its final value `lr_final`. lr_init: A float representing the value of the learning rate at the beginning of the warm-up. Returns lr: A float representing the learning rate. """ total_steps = schedule_end_step - schedule_start_step total_lr_diff = lr_final - lr_init def lr_schedule(step, lr): if schedule_start_step <= step < schedule_end_step: lr = lr_init + total_lr_diff * step / total_steps # print('Step ', step, ' lr=', lr) return lr return lr_schedule
0ec4cefaf7903a6d8305c8dafefc90bd64d5f82c
473,963
def get_outputs_with_response_from_dialog(utterances, response, activated=False): """ Extract list of dictionaries with already formatted outputs of different skills from full dialog which replies containe `response`. If `activated=True`, skill also should be chosen as `active_skill`; otherwise, empty list. Args: utterances: utterances, the first one is user's reply response: target text to search among bot utterances activated: if target skill should be chosen by response selector on previous step or not Returns: list of dictionaries with formatted outputs of skill """ result = [] skills_outputs = [] for uttr in utterances: if "active_skill" in uttr: final_response = uttr["text"] for skop in skills_outputs: # need to check text-response for skills with several hypotheses if response in skop["text"]: if activated and skop["text"] in final_response and skop: result.append(skop) else: if not activated and skop: result.append(skop) elif "hypotheses" in uttr: skills_outputs = uttr["hypotheses"] return result
56fed08be53c49d006cdcf6b1e0a49fadcb1901c
541,079
import torch def kl_loss(mean1, logvar1, mean2, logvar2): """ KL divergence of two multivariate normal distributions with diagonal covariance. :param mean1: mean of distribution 1 :param logvar1: logarithm of the covariance diagonal of distribution 1 :param mean2: mean of distribution 2 :param logvar2: logarithm of the covariance diagonal of distribution 2 :return: KL divergence of distribution 1 and 2 """ result = -0.5 * torch.sum(logvar1 - logvar2 - torch.pow(mean1 - mean2, 2) / logvar2.exp() - torch.exp(logvar1 - logvar2) + 1, 1) return result.mean()
e10a971de84254faf09e46ff210e964929cc8d69
258,724
def _field_name(name, prefix=None): """ Util for quick prefixes >>> _field_name(1) '1' >>> _field_name("henk") 'henk' >>> _field_name("henk", 1) '1henk' """ if prefix is None: return str(name) return "%s%s" % (prefix, name)
03efaaca97ceffb1da958032f6d5ec65509c079f
664,127
def trailing_ones_str(bin_str: str): """ This method returns the trailing ones of a binary number formatted as string. :param bin_str: The binary number as str. :return: The trailing ones as int. """ # Only strings are allowed if not isinstance(bin_str, str): raise TypeError("String value expected") # Right trim bin_str = bin_str.rstrip() # Remove possible leading "b" from binary str if len(bin_str) > 0 and bin_str[-1] == "b": bin_str = bin_str[:-1] # Count trailing ones ones = 0 for index in range(len(bin_str)): c = bin_str[len(bin_str) - index - 1] if c == "1": ones += 1 else: break return ones
eb7b3e3154af8a2964678651456f98fa72cbd879
587,103
def load_alpha_square(alphafile): """loads the ADFG(V)X matrix from a file Args: alphafile (str): contents of the matrix file Raises: ValueError: raised if matrix is not square Returns: list of list of str: adfgvx translation matrix """ alpha_square = [] for line in alphafile: split_line = [i.strip() for i in line.split(',')] alpha_square.append(split_line) # make sure matrix is square if not all(len(i)==len(alpha_square) for i in alpha_square): raise ValueError("Input file is not 5x5 or 6x6") return alpha_square
6ef1465ef414d9796b7d2473bd86e4e68d86ea2b
557,315
def calculate_padding(base_image_height, base_image_width, target_image_height, target_image_width): """ Calculate the padding Values for the given images so that they are of same size Parameters ---------- base_image_height : int The height of the baseline image base_image_width : int The width of the baseline image target_image_height : int The height of the target image target_image_width : int The height of the target image Returns ------- base_left : int Left padding value for Baseline Image base_right : int Right padding value for Baseline Image base_top : int Top padding value for Baseline Image base_bottom : int Bottom padding value for Baseline Image target_left : int Left padding value for Target Image target_right : int Right padding value for Target Image target_top : int Top padding value for Target Image target_bottom : int Bottom padding value for Target Image """ delta_height = max(base_image_height, target_image_height) - \ min(base_image_height, target_image_height) delta_width = max(base_image_width, target_image_width) - \ min(base_image_width, target_image_width) target_top = target_left = target_bottom = target_right = base_top = base_left = base_bottom = base_right = 0 # both images are of same size if(delta_height == 0 and delta_width == 0): return (base_left, base_right, base_top, base_bottom, target_left, target_right, target_top, target_bottom) top, carry = divmod(delta_height, 2) bottom = top + carry left, carry = divmod(delta_width, 2) right = left + carry # Assign the top, bottom, left, right padding values tom make # the image of same size if(base_image_height > target_image_height): target_top = top target_bottom = bottom elif(base_image_height < target_image_height): base_top = top base_bottom = bottom if(base_image_width > target_image_width): target_left = left target_right = right elif(base_image_width < target_image_width): base_left = left base_right = right return (base_left, base_right, base_top, base_bottom, target_left, target_right, target_top, target_bottom)
dc744d686b9e94088653d2fa96c6e0615d427f91
509,040
def iops_to_kiops(number: float) -> float: """ Convert iops to k-iops. Parameters ---------- number : float A ``float`` in iops. Returns ------- float Returns a ``float`` of the number in k-iops. """ return round(number * 1e-3, 3)
c3e57ef09f0f2694fd13b61fcbfcf6dea8e8719e
57,860
def remove_small_bboxes(bboxes, min_size): """ Remove bounding boxes which contains at least one side smaller than the minimum size. Args: bboxes (:obj:`nn.Tensor[N, 4]`): Bounding boxes to be computed. They are expected to be in ``(x1, y1, x2, y2)`` format. min_size (float): The minimum size of bounding boxes. Returns: :obj:`nn.Tensor[K]`: Indices of the bounding boxes that have both \ sides larger than ``min_size``. """ ws, hs = bboxes[:, 2] - bboxes[:, 0], bboxes[:, 3] - bboxes[:, 1] keep = (ws >= min_size) & (hs >= min_size) keep = keep.nonzero(as_tuple=False).squeeze(1) return keep
290bba038a36047b7ac05ffe2e0d980ca7721f04
429,527
import random def generate_portfolio_inputs(tickers): """ tickers: list of ticker str Returns tuple of list for random balance and target allocation """ allocations = [] balances = [] for i in range(len(tickers)): a = random.random() b = round(random.uniform(1.0, 10000.0), 2) allocations.append(a) balances.append(str(b)) allocations = [round(i / sum(allocations) * 100, 4) for i in allocations] return allocations, balances
fd17ef96643bafb29f59fdedc881125572d0e429
229,362
def bytes_to_bits(inbytes, width=None): """Convert bytes/bytearray/sequence of int to tuple of bits.""" bitstr = ''.join('{:08b}'.format(_b) for _b in inbytes) bits = tuple(_c == '1' for _c in bitstr) return bits[:width]
6e489c5a359e7a6ba31e6ccb7b4a64e73109213d
284,511
def apk(y_true, y_pred, k=10, normalize='min'): """ Computes the average precision at k. This function computes the average precision at k between two lists of items. Parameters: y_true: list A list of elements that are to be predicted (order doesn't matter) y_pred: list A list of predicted elements (order does matter) k: int, optional The maximum number of predicted elements normalize: type of normalize 'k': normalize by top k 'm': normalize by number of relevant documents (length of y_true) 'min': normalize by min(m,k) Returns score : double The average precision at k over the input lists """ assert normalize in ['k', 'm', 'min'], "normalize should be in ['k', 'm', min']" if not y_true or not y_pred: return 0.0 if len(y_pred) > k: y_pred = y_pred[:k] score = 0.0 num_hits = 0.0 for i, p in enumerate(y_pred): if p in y_true: num_hits += 1.0 p_at_k = num_hits / (i + 1.0) score += p_at_k * 1 # (p@k * rel(k)) if normalize == 'k': score = score / k elif normalize == 'm': score = score / len(y_true) else: score = score / min(len(y_true), k) return score
6b711c47c19972399288ba3761f0a8a460a9a9ac
449,981
def lremove(string, prefix): """ Remove a prefix from a string, if it exists. >>> lremove('www.foo.com', 'www.') 'foo.com' >>> lremove('foo.com', 'www.') 'foo.com' """ if string.startswith(prefix): return string[len(prefix):] else: return string
390022b2e0e14a9d0a0869a4865711ccb6814016
380,998
def mk_int(s): """ Function to change a string to int or 0 if None. :param s: String to change to int. :return: Either returns the int of the string or 0 for None. """ try: s = s.strip() return int(s) if s else 0 except: return s
7d1c4133c0571b25ef4f161a1bd6b1e094ebec7e
691,984
def lookupOrInfer( pd, pi, word ): """ Lookup a word, take the first pronunciation. If not known, infer a pronunciation and return that. """ pronunciations = pd.findPronunciations( word ) if len( pronunciations ) <= 0: pronunciation = pi.pronounce( word ) else: pronunciation = pronunciations[ 0 ] return pronunciation
277138ad5ab1da69ee7ab05f8422383aa76af4e4
388,154
def calc_start_at(page, page_length): """Calculates the starting point for pagination. Pages start at 1. Args: page: page number page_length: length of previous pages """ return (int(page) - 1) * int(page_length) + 1
d8a471a5477731d07616c60ef6cd8e384b15502d
192,821
def _and(arg1, arg2): """Boolean and""" return arg1 and arg2
3456e68c2d06dc212ddff43869bb760b85245729
682,550
def td_format(td_object): """ Python format timedelta to string https://stackoverflow.com/a/13756038 """ seconds = int(td_object.total_seconds()) periods = [ ('year', 60*60*24*365), ('month', 60*60*24*30), ('day', 60*60*24), ('hour', 60*60), ('minute', 60), # ('second', 1) ] strings = [] for period_name, period_seconds in periods: if seconds > period_seconds: period_value, seconds = divmod(seconds, period_seconds) has_s = 's' if period_value > 1 else '' strings.append("%s %s%s" % (period_value, period_name, has_s)) return ", ".join(strings)
040683cf458ed1e531f3b2dcf73459482f37a798
278,741
def render_with_errors(bound_field): """ Usage: {{ field|render_with_errors }} as opposed to {{ field }}. If the field (a BoundField instance) has errors on it, and the associated widget implements a render_with_errors method, call that; otherwise, call the regular widget rendering mechanism. """ widget = bound_field.field.widget if bound_field.errors and hasattr(widget, "render_with_errors"): return widget.render_with_errors( bound_field.html_name, bound_field.value(), attrs={"id": bound_field.auto_id}, errors=bound_field.errors, ) else: return bound_field.as_widget()
d1e5cffabac9834c5cde5c94227a2510baf1768d
114,287
def is_vowel(char: str) -> bool: """ Checks if the character is an Irish vowel (aeiouáéíóú). :param char: the character to check :return: true if the input is a single character, and is an Irish vowel """ vowels = "aeiouáéíóú" return len(char) == 1 and char.lower()[0] in vowels
ae01631cc618c98c80e0cf9664486915f64edf66
468,605
def get_list_of_hyperparams_for_kernel(kernel_type): """ Returns the list of hyper-parameters for each kernel. It returns a 3-tuple of lists of equal length. The first is a list of strings describing each hyper-parameter. The second is a list of integers indicating the size/dimensionality of each hyper-parameter. If any element in this list is 'dim', it means the dimensionality is the dimensionality of the input space. The third is a list of strings indicating if they are float or integral. If its a list instead of a string, then it means the hyper-parameter is discrete with the elements in the list denoting possible values. """ if kernel_type.lower() == 'se': return ['scale', 'dim_bandwidths'], [1, 'dim'], ['float', 'float'] elif kernel_type.lower() == 'matern': return (['scale', 'dim_bandwidths', 'nu'], [1, 'dim', 1], ['float', 'float', [0.5, 1.5, 2.5]]) elif kernel_type.lower() == 'espse': return ['esp_order', 'scale', 'dim_bandwidths'], [1, 'dim', 1], \ ['float', 'float', 'int'] elif kernel_type.lower() == 'espmatern': return (['esp_order', 'scale', 'dim_bandwidths', 'nu'], [1, 'dim', 1, 1], ['float', 'float', [0.5, 1.5, 2.5], 'int']) elif kernel_type.lower() == 'expdecay': raise NotImplementedError('Not implemented this function for ExpDecayKernel yet!') else: raise ValueError('Unidentified kernel type %s.'%(kernel_type))
74a1c5180a54a30afc92711f66410c58fbda70c2
515,519
def int_to_bytes(x): """Changes an unsigned integer into bytes.""" return x.to_bytes((x.bit_length() + 7) // 8, 'big')
5f441a6a5767d8cd1292e8976a24b0c9c4ca157e
22,653
def file_allowed(filename, allowed_extensions): """Does filename have the right extension?""" return "." in filename and filename.rsplit('.', 1)[1] in allowed_extensions
b095b2b39095e580bb352661b161daf54752d417
279,872
import torch def psnr(prediction: torch.Tensor, label: torch.Tensor) -> torch.Tensor: """ Function computes the Peak Signal to Noise Ratio PSNR = 10 * log10(max[y]**2 / MSE(y, y')) Source: https://github.com/ChristophReich1996/CellFlowNet :param prediction: (torch.Tensor) Prediction :param label: (torch.Tensor) Label :return: (torch.Tensor) PSNR value """ assert prediction.numel() == label.numel(), 'Prediction tensor and label tensor must have the number of elements' return 10.0 * torch.log10(prediction.max() ** 2 / (torch.mean((prediction - label) ** 2) + 1e-08))
5f143801eb2b1f93eff5ea5d7804b5e8f4f7fd38
540,792
def to_boto3_tags(tagdict): """ Converts tag dict to list format that can feed to boto3 functions """ return [{'Key': k, 'Value': v} for k, v in tagdict.items() if 'aws:' not in k]
d417b3a8929018d68ddef7108d907c9712f11d78
120,295
import torch def pose_square(pose): """Converts pose matrix of size 3x4 to a square matrix of size 4x4""" pose_sh = pose.shape if pose_sh[2] == 3: pose_row = torch.tensor([0., 0., 0., 1.]) if pose.is_cuda: pose_row = pose_row.to(pose.device) pose_row = pose_row.repeat(pose_sh[0], pose_sh[1], 1, 1) pose = torch.cat((pose, pose_row), 2) return pose
24005bcbd5ad3c436be7baa646100bbda805a865
549,145
def round_robin(w) -> bool: """Implements a round-robin association strategy where iots are associated their ssid modulo number of APs. Return: Returns true on successful association. False otherwise. """ m = len(w.aps) i = 0 for device in w.iots: if device.do_associate(w.aps[i%m]) == False: return False i += 1 return True
03cb6ea5eac30ff2cad676d9e55388403cd955df
689,318
def valid_xml_char_ordinal(c): """Filters out certain bytes so that XML files contains valid characters. XML standard defines a valid character as: Char ::= #x9 | #xA | #xD | [#x20 - #xD7FF] | [#xE000 - #xFFFD] | [#x10000 - #x10FFFF] Args: c: Character to be checked Returns: true if character codepoint in valid range """ codepoint = ord(c) #conditions ordered by presumed frequency return ( 0x20 <= codepoint <= 0xD7FF or codepoint in (0x9, 0xA, 0xD) or 0xE000 <= codepoint <= 0xFFFD or 0x10000 <= codepoint <= 0x10FFFF )
32ae643ec970f00d4e65fd511de09bc8370ae9c6
683,780
import re def replace_all(repls, str): """ Applies replacements as described in the repls dictionary on input str. :param repls: Dictionary of replacements :param str: The string to be changed :return: The changed string """ return re.sub('|'.join(re.escape(key) for key in repls.keys()), lambda k: repls[k.group(0)], str)
43fbfee772e777e819f3559790fa744f0c6d45d9
91,122
from typing import List from typing import Dict import re def parse_header_links(value: str) -> List[Dict[str, str]]: """ Returns a list of parsed link headers, for more info see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Link The generic syntax of those is: :: Link: < uri-reference >; param1=value1; param2="value2" So for instance: Link; '<http:/.../front.jpeg>; type="image/jpeg",<http://.../back.jpeg>;' would return :: [ {"url": "http:/.../front.jpeg", "type": "image/jpeg"}, {"url": "http://.../back.jpeg"}, ] .. note:: Stolen code from httpx _utils.py (private method) :param value: HTTP Link entity-header field :return: list of parsed link headers """ links: List[Dict[str, str]] = [] replace_chars = " '\"" value = value.strip(replace_chars) if not value: return links for val in re.split(", *<", value): try: url, params = val.split(";", 1) except ValueError: url, params = val, "" link = {"url": url.strip("<> '\"")} for param in params.split(";"): try: key, value = param.split("=") except ValueError: break link[key.strip(replace_chars)] = value.strip(replace_chars) links.append(link) return links
536d3f2b477666c076ac29312f3acfe63f40e324
703,049
import torch def conv1x1(in_planes: int, out_planes: int, stride: int = 1): """ 1x1 convolution without padding. Args: in_planes (int): The number of input channels. out_planes (int): The number of output channels. stride (int): The convolution stride. Controls the stride for the cross-correlation Returns: :obj:`torch.nn.Conv3d`: A convolution layer. """ return torch.nn.Conv3d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
f09e75dfda34801a355501dc4de76f392c861eda
566,542
def isWord(wordList, word): """ Determines if word is a valid word. wordList: list of words in the dictionary. word: a possible word. returns True if word is in wordList. Example: >>> isWord(wordList, 'bat') returns True >>> isWord(wordList, 'asdf') returns False """ word = word.lower() word = word.strip(" !@#$%^&*()-_+={}[]|\\:;'<>?,./\"") return word in wordList
ffead901ccc10797928c966b14cf4c2ac3f7f558
630,678
def sec_to_time(s): """ Returns the hours, minutes and seconds of a given time in secs """ return (int(s//3600), int((s//60)%60), (s%60))
b96111c558f03de8530f63280f14b0413c567734
516,689
import colorsys def hsl_to_rgb(hue, saturation, lightness): """Takes a colour in HSL format and produces an RGB string in the form #RRGGBB. :param hue: The Hue value (between 0 and 360). :param saturation: The Saturation value (between 0 and 100). :param lightness: The Lightness value (between 0 and 100). :raises ValueError: if any of the three parameters are outside their \ bounds.""" if not isinstance(hue, int) and not isinstance(hue, float): raise TypeError("hue must be numeric, not '%s'" % hue) if not isinstance(saturation, int) and not isinstance(saturation, float): raise TypeError("saturation must be numeric, not '%s'" % saturation) if not isinstance(lightness, int) and not isinstance(lightness, float): raise TypeError("lightness must be numeric, not '%s'" % lightness) if not (0 <= hue <= 360): raise ValueError("hue must be between 0 and 360, not '%s'" % str(hue)) if not (0 <= saturation <= 100): raise ValueError( "saturation must be between 0 and 100, not '%s'" % str(saturation) ) if not (0 <= lightness <= 100): raise ValueError( "lightness must be between 0 and 100, not '%s'" % str(lightness) ) r, g, b = colorsys.hls_to_rgb(hue / 360, lightness / 100, saturation / 100) return ("#%02x%02x%02x" % (int(r * 255), int(g * 255), int(b * 255))).upper()
a7d0ab91bc01c04f2ecf5afa8255f639e5758a6c
694,785
def validate_hmm_output(hmm_output_files, hmm_file_name): """ Checks that the content of an hmmscan output are not corrupted. Arg is a list of line-by-line hmmscan outfile file contents. Returns True or False. """ if len(hmm_output_files) < 20: # somewhat arbitrary return False # check that this is the output for the right file if not (hmm_file_name in hmm_output_files[7]): return False # first line always is a comment with name of program if hmm_output_files[0] != '# hmmscan :: search sequence(s) against a profile database': return False # last line always says // (not [ok] because I split on that) if hmm_output_files[-1] != '//': return False return True
318747599e77776cc1fbd1455a56eb3fd77b5fb0
304,337
def is_date_time(file_type, path): """ Return True if path is an object that needs to be converted to date/time string. """ date_time_objects = {} date_time_objects["Gzip"] = ("mod_time",) date_time_objects["PE"] = ("pe.coff_hdr.time_date_stamp",) date_time_objects["Windows shortcut"] = ("header.time_creation", "header.time_access", "header.time_write", "last_mod_time") date_time_objects["ZIP"] = ("body.last_mod_time", "body.last_access_time", "body.creation_time") if file_type in date_time_objects.keys(): for p in date_time_objects[file_type]: if p in path: return True return False else: return False
e9f722fcbc6de94a2794a2639db68f21be538e41
662,316
import itertools def format_args(func, *args, **kwargs): """Render a call to the given function. Example: format_args(print, 'Hello', end=' ') == "print('Hello', end=' ')" """ str_args = ', '.join(itertools.chain( map(repr, args), (f'k={v!r}' for k, v in kwargs.items()), )) return f'{func.__qualname__}({str_args})'
899ef2bdafc418aefe40fd5c38a4b31656eec697
173,826
def getRelationsAndRNByCharacter(cname: str, rn: int) -> str: """Return a query to get the relation and Ryu Number of a character. The query retrieves the character's name, as well as the title and Ryu Number of all games that the character appears in with a Ryu Number greater than or equal to the passed value. The resulting tuple takes the following form for appears_in as AI and game as G: `(AI.cname: str, AI.gtitle: str, G.ryu_number: int)` """ return (f"SELECT AI.cname, AI.gtitle, G.ryu_number " f"FROM appears_in AS AI " f"JOIN game AS G ON G.title=AI.gtitle " f"WHERE cname='{cname}' AND G.ryu_number>={rn};" )
3d6f2de4acafd03d3577eb3198fcf7100955a99c
119,049
def is_local_file(path): """ Check that specified path is local, not URL. """ if path.lower().startswith("http://"): return False if path.lower().startswith("https://"): return False if path.lower().startswith("ftp://"): return False return True
df14f95774f37e73afdeaf273e7df0e4b1530681
510,366
def plural(quantity: int, singular: str, plural: str) -> str: """Return the singular or plural word.""" return singular if quantity == 1 else plural
b5ce2b0b461cf5fcdc5cb5666d1c9715ec0505c6
275,613
import calendar def datetime_to_timestamp(d): """Convert naive datetime to unix timestamp.""" return calendar.timegm(d.timetuple())
1781eb661bae83fd960a9096fc967041f8673baf
563,245
import platform def is_linux_x86_64() -> bool: """Returns true for Linux x86_64 machine""" return platform.system() == 'Linux' and platform.machine() == 'x86_64'
2bb04a57303c1ce936f80ff4309f6144dec33bca
450,069
def iscomplex(pscale): """ Returns whether pscale is an instance of a complex number """ return isinstance(pscale, complex)
23aa3f67ac7de7aa4629de3d232f83ba916c7e20
600,519
def check_and_remove_trailing_occurrence(txt_in, occurrence): """Check if a string ends with a given substring. Remove it if so. :param txt_in: Input string :param occurrence: Substring to search for :return: Tuple of modified string and bool indicating if occurrence was found """ n_occurrence = len(occurrence) if (txt_in[-n_occurrence:] == occurrence): txt_out = txt_in[0:-n_occurrence] flag_found = True else: txt_out = txt_in flag_found = False return txt_out, flag_found
09c0214dca7dbdf9ec7d199bb9348411cebe5ac1
100,908
def replicaset_votes(config_document): """ Return the number of votes in the replicaset """ votes = 0 for member in config_document["config"]['members']: votes += member['votes'] return votes
223a1139198c81e6d9490d1bdb41ec18a0212f33
215,221
def statbar_string(stat_dict: dict) -> str: """ Return a printable "statbar" string from a dictionary of named statistics """ stat_items = [] for key, value in stat_dict.items(): stat_items.append(f"{key} {value}") return ' | '.join(stat_items)
7f576085b887c442a61a521abe44d4f63eb74aa7
321,030
from typing import List def split_comment(comment: str) -> List[str]: """split_comment Splits a comment into an array of lines. Broadly, this just wraps the .splitlines() function, such that the leading and trailing newlines can be removed. """ lines: List[str] = comment.splitlines() while lines and not lines[0].strip(): lines.pop(0) while lines and not lines[-1].strip(): lines.pop() return lines
cbb6bc7fad7018a22934cb1b500cbd0d451aab16
337,569
def fallbackSeries(requestContext, seriesList, fallback): """ Takes a wildcard seriesList, and a second fallback metric. If the wildcard does not match any series, draws the fallback metric. Example: .. code-block:: none &target=fallbackSeries(server*.requests_per_second, constantLine(0)) Draws a 0 line when server metric does not exist. """ if len(seriesList) > 0: return seriesList else: return fallback
ff50bc21530e1ca7ce079132437dbdcd88f13886
633,601
import torch def unpack_episode(episode): """ Helper function that extracts samples and labels from an episode in numpy Args: episode: the episode to unpack """ examples = torch.cat([episode["support_images"], episode["query_images"]], 1).numpy() labels = torch.cat([episode["support_class_labels"], episode["query_class_labels"]], 1).numpy() return examples, labels
1326228e3433dc4eb7cfa0c50256468d02a05b64
634,952
def denormalize(coordinate: float, length: int) -> int: """Convert a normalized float coordinate between 0 and 1 to a pixel coordinate""" if not (0 <= coordinate <= 1): raise ValueError('Coordinate exceeds bounds') return int(coordinate * length)
fd7ba7ba1dcd85ac358cd76fc4fcf539dfdc0503
438,380
def flip(f): """Swap Arguments.""" return lambda left, right: f(right, left)
012f1a42d9f6690556e5622b04f8f6f39dc89007
573,873
def health() -> dict: """Health Check for the server. Returns: dict: Health status in a dictionary. """ return {'Message': 'Healthy'}
90a8184b6d689d236dbfe7c50f33c8de10411222
649,066
def repeating_key_xor(plaintext, key): """Implements the repeating-key XOR encryption.""" ciphertext = b'' i = 0 for byte in plaintext: ciphertext += bytes([byte ^ key[i]]) # Cycle i to point to the next byte of the key i = i + 1 if i < len(key) - 1 else 0 return ciphertext
e2d9787a3ef44f32f1cf80ffedecf41410edc47a
596,548
def class_hasattr(instance, attr): """Helper function for checking if `instance.__class__` has an attribute""" return hasattr(instance.__class__, attr)
0ae527423bba45dde1af02f19358e9a291e66bb5
506,878
import re def is_hex(in_col): """ Check whether an input string is a valid hex value. Return True if it is, otherwise False. """ if type(in_col) is not str: return False regular_expression = re.compile( r"""^ # match beginning of string [#]? # exactly one hash, but optional [0-9a-fA-F]{6} # exactly six of the hex symbols 0 to 9, a to f $ # match end of string """, re.VERBOSE | re.MULTILINE, ) if regular_expression.match(in_col) == None: return False else: return True
223b42d43f548a6254d94c3737ef82965c8e4b47
544,141
def check_distance_CA(AA, H): """ Parameters ---------- AA : amino acid from a protein structure object made with PDBParser(). H : hetero residue from a protein structure object made with PDBParser(). Returns ------- distance : the distance between CA of AA and H's closest atom. """ distance = [] for atom in H: distance.append(AA['CA']-atom) return min(distance)
6a9f12d5fc43b0d7f249d0f2a8f7b289d4793993
460,553
import random def random_bool(probability=0.5): """Returns True with given probability Args: probability: probability to return True """ assert (0 <= probability <= 1), "probability needs to be >= 0 and <= 1" return random.random() < probability
45b3ec3f15218df3da7b962301dc5174dbfa11c7
684,604
def bits_to_int_le(bits, count): """ Converts bits to an integer, little endian :param bits: the bits :param count: the number of bits :return: the integer """ i = 0 for k in range(count): i |= (bits[k] & 1) << k return i
76aa503a3e4e325c893578a4ffd0c20f6c6c4ac7
576,203
from typing import Callable from typing import Any from typing import Reversible from functools import reduce def foldr(func: Callable[[Any, Any], Any], it: Reversible[Any]): """ Right-fold iterable object Args: func: Two arguments function it: Reversible(Iterable) object Examples: >>> fpsm.foldr(lambda x ,y: x - y, range(11)) 5 """ return reduce(lambda x, y: func(y, x), reversed(it), 0)
ee6b88db9539313c8da35b53bd49d8ffb92820fd
546,668
def getPointsFromBoundingBox(bbox): """ Returns vertices of a bounding box Arguments: bbox = [left,top,width,height] """ # top left, top right, bottom right, bottom left left,top,width,height = bbox vertices = [(left,top),(left+width,top), (left+width,top+height),(left,top+height)] return vertices
c6f399ad9d041ade6bb741e3fc29cdbf33da9c19
392,697
def _evaluate_term_fwd(term, eta0, eps0, deps, eta_derivs, eval_directional_derivative, validate=False): """Evaluate the DerivativeTerm in forward mode. Parameters ---------------------- term: `DerivativeTerm` A `DerivativeTerm` object to evaluate. eta0, eps0 : `numpy.ndarray` Where to evaluate the derivative. deps : `numpy.ndarray` The direction in which to evaluate the derivative. eta_derivs : `list` of `numpy.ndarray` A list where ``eta_derivs[i]`` contains :math:`d\\eta^i / d\\epsilon^i \\Delta \\epsilon^i`. eval_directional_derivative: A function matching `eval_directional_derivative` in ForwardModeDerivativeArray. validate: optional `bool` If `True`, run checks for the appropriate sizes of arguments and produce helpful error messages if necessary. Default is `False`. """ if validate: if len(eta_derivs) < term.order() - 1: raise ValueError('Not enough derivatives in ``eta_derivs``.') # First eta arguments, then epsilons. vec_args = [] eta_directions = [] for i in range(len(term.eta_orders)): eta_order = term.eta_orders[i] if eta_order > 0: for j in range(eta_order): eta_directions.append(eta_derivs[i]) eps_directions = [] for i in range(term.eps_order): eps_directions.append(deps) return term.prefactor * eval_directional_derivative( eta0, eps0, eta_directions, eps_directions, validate=validate)
5e04024c9489170f32c6ab911164a018e7fd2c7d
569,047
def skiptest(reason): """ Decorate a test that should be skipped with a reason. NOTE: Don't import this as `skip`, because that will cause trial to skip the entire module that imports it. """ def skipdeco(func): func.skip = reason return func return skipdeco
2204e5d61cae7aebeacc440b5d501bb35e378010
69,949
import six import pwd def check_uid(val): """Return an uid, given a user value. If the value is an integer, make sure it's an existing uid. If the user value is unknown, raises a ValueError. """ if isinstance(val, six.integer_types): try: pwd.getpwuid(val) return val except (KeyError, OverflowError): raise ValueError("%r isn't a valid user id" % val) if not isinstance(val, str): raise TypeError(val) try: return pwd.getpwnam(val).pw_uid except KeyError: raise ValueError("%r isn't a valid user val" % val)
61f1919cfcf060b9d6f3a5bb96f6923727fc9d90
310,161
def detect_collisions(good_snake, bad_snake): """ Detect collisions between the Good Snake and the Bad Snake, and between the Good Snake and itself. Returns True if there's been a collision and False otherwise. """ for python_area in good_snake.get_rects(): for bad_snake_area in bad_snake.get_rects(): if python_area.colliderect(bad_snake_area): return True # detect collisions with ourself if good_snake.head_hit_body(): return True return False
acfc136c26616895071ad56f869e281f198d55ed
459,777
def get_str_clean(cp_section, key, default_value): """take a ConfigParser section, get key and strip leading and trailing \' and \" chars""" value = cp_section.get(key, default_value) if not value: return "" return value.lstrip('"').lstrip("'").rstrip('"').rstrip("'")
75c77c5350593b2723eff919fd47984fd51acbbe
123,867
def br(n): # type: (int) -> str """ Concisely create many <br> tags. :param n: number of <br> to retur :return: n <br> tags """ return '<br>' * n
0741ab941f991635a477fc8960df1f0926471c86
300,264
def ExpandIncome(e00200, pencon_p, pencon_s, e00300, e00400, e00600, e00700, e00800, e00900, e01100, e01200, e01400, e01500, e02000, e02100, p22250, p23250, cmbtp, ptax_was, benefit_value_total, expanded_income): """ Calculates expanded_income from component income types. """ expanded_income = ( e00200 + # wage and salary income net of DC pension contributions pencon_p + # tax-advantaged DC pension contributions for taxpayer pencon_s + # tax-advantaged DC pension contributions for spouse e00300 + # taxable interest income e00400 + # non-taxable interest income e00600 + # dividends e00700 + # state and local income tax refunds e00800 + # alimony received e00900 + # Sch C business net income/loss e01100 + # capital gain distributions not reported on Sch D e01200 + # Form 4797 other net gain/loss e01400 + # taxable IRA distributions e01500 + # total pension & annuity income (including DB-plan benefits) e02000 + # Sch E total rental, ..., partnership, S-corp income/loss e02100 + # Sch F farm net income/loss p22250 + # Sch D: net short-term capital gain/loss p23250 + # Sch D: net long-term capital gain/loss cmbtp + # other AMT taxable income items from Form 6251 0.5 * ptax_was + # employer share of FICA taxes on wages/salaries benefit_value_total # consumption value of all benefits received; # see the BenefitPrograms function in this file for details on # exactly how the benefit_value_total variable is computed ) return expanded_income
5c025af08377bed24d6086473ff787d65f305591
266,834
def default_context(engine, context=None) -> dict: """Return a dict with an engine, using the existing values if provided""" if context is None: context = {} context.setdefault("engine", engine) return context
ead2031e3cd1d95cbf893188996a43d3ca0bcdc0
183,711
from typing import Dict from typing import Any def create_default_configuration() -> Dict[str, Any]: """Creates a dictionary with the default configuration.""" # When changing this, make sure to update the docs as well return { 'content_directory': 'content', 'resource_directory': 'resources', 'static_directory': 'static', 'output_directory': 'output', 'generator_directory': 'generators', 'build': { 'clean_output': True, 'cache.fs.directory': 'cache', 'cache.db.directory': 'cache', 'cache.redis.host': 'localhost', 'cache.redis.port': 6379, 'cache.redis.db': 0, 'cache.redis.expiration_time': 60, 'cache.type': 'fs', 'resource.sass.compiler': 'libsass' }, 'ignore_files': ['*~'], 'content': { 'filters': ['date', 'status'] }, 'template': 'templates/default.yaml', 'routes': { 'static': 'static_routes.yaml' }, 'collections': 'collections.yaml', 'relaxed_date_parsing': False, 'allow_relative_links': True, 'feeds': 'feeds.yaml', 'metadata': 'metadata.yaml' }
f5b7c2ca428f32b576a4bb34562980954a19c571
351,814
def recalc_dhw_vol_to_energy(vol, delta_t=35, c_p_water=4182, rho_water=995): """ Calculates hot water energy in kWh/a from input hot water volume in liters/apartment*day Parameters ---------- vol : float Input hot water volume in liters/apartment*day delta_t : float, optional Temperature split of heated up water in Kelvin (default: 35) c_p_water : float, optional Specific heat capacity of water in J/kgK (default: 4182) rho_water : float, optional Density of water in kg/m3 (default: 995) Returns ------- dhw_annual_kwh : float Annual hot water energy demand in kWh/a """ en_per_day = vol / 1000 * rho_water * c_p_water * delta_t \ / (3600 * 1000) # in kWh dhw_annual_kwh = en_per_day * 365 return dhw_annual_kwh
75bd85e68949303d66ee658e8bd4698b7f86ec2f
60,791
def read_until(fd, b): """Read bytes from open file object fd until byte b is found""" s = b'' while True: c = fd.read(1) if not c: break #no more bytes to read if c == b: break #found our byte s += c return s
76ed62fada8c25a39d60f3bd02969d50af93007f
283,893
def ParseUrl(url): """Parses a URL string according to RFC 1738. Note: We allow '/' character in 'user', so we can specify workgroup of smb user. Args: url: An URL string. Returns: A dict with optional keys 'scheme', 'user', 'password', 'host', 'port', and 'urlpath'. """ result = {} scheme, delimiter, schemepart = url.partition('://') if not delimiter: return {} result['scheme'] = scheme userpass, unused_delimiter, hostpath = schemepart.rpartition('@') if userpass: user, delimiter, password = userpass.partition(':') result['user'] = user if delimiter: result['password'] = password hostport, delimiter, path = hostpath.partition('/') if delimiter: result['path'] = '/' + path host, unused_delimiter, port = hostport.partition(':') if host: result['host'] = host else: return {} if port: result['port'] = port return result
b1349f2091dad5daf404d672ee737dcfb904739a
615,501
def all_bases(obj): """ Return all the class to which ``obj`` belongs. """ def _inner(thing, bases=None): bases = bases or set() if not hasattr(thing, "__bases__"): thing = thing.__class__ for i in thing.__bases__ or []: bases.add(i) bases = bases | _inner(i, bases=bases) return bases return set(_inner(obj))
f199a56d03a88bae50dd5224661b9a23b4a50e12
129,657
def excess_pore_pressure_ratio(df): """ Assign the excess pore pressure ratio, if the water pressure u2 is defined. Else, raise ERROR. :param df: (DataFrame) :return: (DataFrame) """ try: u2 = df["u2"] except KeyError: raise SystemExit("ERROR: u2 not defined in .gef file, change classifier") df["excess_pore_pressure_ratio"] = (u2 - df["water_pressure"]) / ( df["qt"] - df["soil_pressure"] ) return df
371b43ddbbe80c282117436181c83d0b0bb19ff5
597,118
import torch def pairwise_euclidean_similarity(x, y): """Compute the pairwise Euclidean similarity between x and y. This function computes the following similarity value between each pair of x_i and y_j: s(x_i, y_j) = -|x_i - y_j|^2. Args: x: NxD float tensor. y: MxD float tensor. Returns: s: NxM float tensor, the pairwise euclidean similarity. """ s = 2 * torch.mm(x, torch.transpose(y, 1, 0)) diag_x = torch.sum(x * x, dim=-1) diag_x = torch.unsqueeze(diag_x, 0) diag_y = torch.reshape(torch.sum(y * y, dim=-1), (1, -1)) return s - diag_x - diag_y
8c52ebf391d5b71271d2e8e184bb494e2f481b6c
503,281
def exc_log_str(exception) -> str: """ Format an exception as a "nice" one-liner string (does not include stack trace). """ return "{}: {!s}".format(type(exception).__name__, exception)
17fe59fd6610d913a694b64a181143446d933a63
570,016
def get_property(line): """return key, value pair by splitting key=value with =""" # parser = re.compile(r'(.*)=(.*)') # match_object = parser.match(line) # if match_object: # return match_object.group(1),match_object.group(2) assert line.find('=') != -1 line_list = line.split('=', 1) return line_list[0], line_list[1]
9fbe4440021db03b85e7a12391736c2309f8a042
42,426
import re def generate_name(analysis_name, tool_name, logfc=1, fdr=0.05): """Generate name for gene sets.""" analysis_name = analysis_name.strip().replace(" ", "_") analysis_name = re.sub(r"[^-\w.]", "", analysis_name) tool_name = tool_name.strip().replace(" ", "_") tool_name = re.sub(r"[^-\w.]", "", tool_name) return f"{analysis_name}_{tool_name}_logFC{logfc}_FDR{fdr}"
69548f889383d5fdb0281a4d3d575f70b0229c1a
405,413