content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import pickle import pkg_resources from pathlib import Path def load_model(model): """Loads the model from the passed path or name""" if isinstance(model, Path): return pickle.loads(model.read_bytes()) model = 'models/' + model + '.pkl' model = pkg_resources.resource_string('botrecon', model) return pickle.loads(model)
7b416e39a740a25f360fcbd4371d76213de3e2ef
614,225
import struct def _ReadCoverageInfoEntry(data_file): """Reads a packet of data from the specified file.""" UINT32_SIZE = 4 pkt_size_buf = data_file.read(UINT32_SIZE) if len(pkt_size_buf) != UINT32_SIZE: raise ValueError("Invalid packet size read.") pkt_size = struct.unpack("I", pkt_size_buf)[0] pkt = data_file.read(pkt_size) if len(pkt) != pkt_size: raise ValueError("Incomplete packet.") return pkt
3d820f25074f7eb21ee46a5e6f0c6a79d290a5e7
262,728
def _calculate_working_record_checksum(working_record): """ Calculates the checksum of the argument ascii-hex string @retval int - modulo integer checksum value of argument ascii-hex string """ checksum = 0 # strip off the leading * and ID characters of the log line (3 characters) and # strip off the trailing Checksum characters (2 characters) star_and_checksum_stripped_working_record = working_record[3:-2] working_record_length = len(star_and_checksum_stripped_working_record) for x in range(0, working_record_length, 2): value = star_and_checksum_stripped_working_record[x:x+2] checksum += int(value, 16) modulo_checksum = checksum % 256 return modulo_checksum
759d661908ab7049adabd0496bdb6ecbc48c7544
370,465
def is_distributed_model(state_dict): """ determines if the state dict is from a model trained on distributed GPUs Parameters: ----------- state_dict: collections.OrderedDict Returns: -------- Boolean """ return all(k.startswith("module.") for k in state_dict.keys())
d5ca818ba4c005c487c6e5a6e8b09597aa61a499
544,819
def check_used_once(g): """Returns True if a graph has only one usage.""" mng = g.manager return sum(mng.graph_users[g].values()) == 1
b13a98066709e7749c4f38da9453fef547bf02ee
502,434
def _date_to_int(d): """Return a date object as a yyyymmdd int.""" return int(d.strftime("%Y%m%d"))
f41c150e303723ecc2d8a3e93fb06e8bc41c3676
302,988
def score_substitute(a_c1, a_c2): """Score substitution of two characters. Args: a_c1 (str): first word to compare a_c2 (str): second word to compare Returns: int: 2 if the last characters of both words are equal, -3 otherwise """ return 2 if a_c1[-1] == a_c2[-1] else -3
6c329daf5cdc9cfda54523d7322e6e2b92e4d4f7
573,495
def getSlotFromCardName(cardName): """ cardName is expected to be of the form 'gem-shelfXX-amcYY' where XX & YY are integers """ slot = (cardName.split("-")[2]) slot = int(slot.strip("amc")) return slot
e269321b3547142f4180ab906383c57aed91dc74
475,150
def genAliases(name): """ Generates aliases for metabolite names, e.g.: val --> set(['Val-L', 'Val', 'val', 'val-L']) """ name = name.replace('-L','').replace('-l','') output = [] output.append(name) output.append(name.lower()) output.append(name.lower()+'-L') output.append(name.lower()+'_L') output.append(name.capitalize()) output.append(name.capitalize()+'-L') output.append(name.capitalize()+'_L') return output
28b88a35588197765e296528fd3b05f34baa1351
31,227
def dic_sum_up_lengths(in_dic): """ Given a dictionary with strings or numbers, sum up the numbers / string lengths and return the total length. Currently works for integer numbers and strings. >>> in_dic = {'e1': 5, 'e2': 10} >>> dic_sum_up_lengths(in_dic) 15 >>> in_dic = {'e1': 'ACGT', 'e2': 'ACGTACGT'} >>> dic_sum_up_lengths(in_dic) 12 """ assert in_dic, "given dictionary in_dic empty" sum = 0 for e in in_dic: v = in_dic[e] if isinstance(v, str): sum += len(v) elif isinstance(v, int): sum += v else: assert False, "non-string or non-integer dictionary value given" return sum
c019d0e9b7425b02b9fb661a5ae85b27ac05ffe1
466,677
def xaxis3D( xaxis3d_type=None, xaxis3d_name="", xaxis3d_name_size=16, xaxis3d_name_gap=20, xaxis3d_min=None, xaxis3d_max=None, xaxis3d_interval="auto", xaxis3d_margin=8, **kwargs ): """ 3D x 轴配置项 :param xaxis3d_type: 3D x 轴类型 :param xaxis3d_name: x 轴名称,默认为 "" :param xaxis3d_name_size: x 轴名称体大小,默认为 16 :param xaxis3d_name_gap: x 轴名称与轴线之间的距离,默认为 25 :param xaxis3d_min: x 坐标轴刻度最小值,默认为自适应。 :param xaxis3d_max: x 坐标轴刻度最大值,默认为自适应。 :param xaxis3d_interval: x 轴刻度标签的显示间隔,在类目轴中有效。默认会采用标签不重叠的策略间隔显示标签。 设置成 0 强制显示所有标签。设置为 1,表示『隔一个标签显示一个标签』,如果值 为 2,表示隔两个标签显示一个标签,以此类推 :param xaxis3d_margin: x 轴刻度标签与轴线之间的距离。默认为 8 """ _xaxis3D = { "name": xaxis3d_name, "nameGap": xaxis3d_name_gap, "nameTextStyle": {"fontSize": xaxis3d_name_size}, "type": xaxis3d_type, "min": xaxis3d_min, "max": xaxis3d_max, "axisLabel": {"margin": xaxis3d_margin, "interval": xaxis3d_interval}, } return _xaxis3D
be5a05ddbf5b0c4d0bb93eb6384c5a260905ca8e
344,248
def arraytize(v): """ convenience function that "transforms" its arguments into a list. If the argument is already a list, returns it. If the argument is None, returns an empty list. Otherwise returns [argument]. """ if v is None: return [] try : return list(v) except : return [v]
38810e67cc10a14fe27dc4991d736b8a77fe6843
662,843
def base64(value): """The intrinsic function Fn::Base64 returns the Base64 representation of \ the input string. This function is typically used to pass encoded data to Amazon EC2 instances by way of the UserData property. Args: value: The string value you want to convert to Base64 Returns: The original string, in Base64 representation """ return {'Fn::Base64': value}
6d8a69e5160a57f8b50085434643e6f738cfbc17
303,031
def _get_all_policy_ids(zap_helper): """Get all policy IDs.""" policies = zap_helper.zap.ascan.policies() return [p['id'] for p in policies]
bd48cb1e5021f09c68286fdd83b6367f518290a3
186,667
import typing def no_parsing(f: typing.Callable): """Wrap a method under test so that it skips input parsing.""" return lambda *args, **kwargs: f(*args, _parse=False, **kwargs)
ca2222ef87f25dda6beb2a9af12dbd5e2f76ea01
76,229
import torch def calculate_accuracy(inputs: torch.Tensor, targets: torch.Tensor) -> float: """ A function that calculates accuracy for batch processing. Returns accuracy as a Python float. Args: inputs (torch.Tensor): shape == [N, n_class] targets (torch.Tensor): shape == [N] Returns: accracy (float) """ with torch.no_grad(): total = targets.shape[0] _, predicted = torch.max(inputs, 1) correct = (predicted == targets).cpu().sum().float().item() return correct / total
53eff3901d675370a9ed0f260d1230c2c5badb79
34,064
from typing import Sequence from typing import Callable from typing import Any from functools import reduce def compose(functions: Sequence[Callable]) -> Callable: """ Compose a sequence of functions :param functions: sequence of functions :return: combined functions, e.g. [f(x), g(x)] -> g(f(x)) """ def func(f: Callable, g: Callable) -> Callable: def func2(*x) -> Any: res = g(*x) if type(res) == bool: return f(*x) else: return f(*res) return func2 return reduce(func, reversed(functions), lambda *x: x)
9b3bcda936c55f53b9891fd1b522742c76a87097
583,795
def base36decode(base36_string): """Converts base36 string into integer.""" return int(base36_string, 36)
66da9d391705cd0748e0e7c0ea5c69be2366ed4e
22,527
from typing import Optional def int_input(prompt: str) -> Optional[int]: """Print ``prompt``; ensure the user enters an integer and return it.""" text = input(prompt + " ") if not text: return None try: number = int(text) except ValueError: print("No, you must type an integer.") return int_input(prompt) return number
dff7cef9fae05d5e47d3d8aa6ec7b75d469abaa1
517,455
def _small_body(close, low, open, high): """ do we have a small body in relation to the wicks :param close: :param low: :param open: :param high: :return: 0 if no 1 if yes (wicks are longer than body) """ size = abs(close - open) if close > open: top_wick = high - close bottom_wick = open - low else: top_wick = high - open bottom_wick = close - low wick_size = top_wick + bottom_wick if wick_size > size: return 1 else: return 0
c466fd7ebba16f62cc957d3ec43143d8d4a13500
322,418
def separate_units_by_type(all_units): """Separate all_units to their respective unit type group.""" immune = {} infection = {} unit_type_to_group = { 'immune': immune, 'infection': infection, } for unit_no, unit in all_units.items(): group = unit_type_to_group[unit['type']] group[unit_no] = unit return immune, infection
197bce89abeb2f7472e2c2c2b28115d491af718f
166,752
def stringToList(inputString): """Convert a string into a list of integers.""" return [ord(i) for i in inputString]
127e6b5d0e63d81dafa6c139d39517b1fd0c961e
312,939
from typing import Dict from typing import Any def _sort_contact_models(contact_models: Dict[str, Any]) -> Dict[str, Any]: """Sort the contact_models. First we have non recurrent, then recurrent contacts models. Within each group the models are sorted alphabetically. Args: contact_models (Dict[str, Any]): See :ref:`contact_models` Returns: Dict[str, Any]: Sorted copy of contact_models. """ sorted_ = sorted( name for name, mod in contact_models.items() if not mod["is_recurrent"] ) sorted_ += sorted( name for name, mod in contact_models.items() if mod["is_recurrent"] ) return {name: contact_models[name] for name in sorted_}
cc1c350726c9dbcf8bbdb9cf36561c72ffc767d5
430,700
def _get_pgh_obj_pk_col(history_model): """ Returns the column name of the PK field tracked by the history model """ return history_model._meta.get_field( 'pgh_obj' ).related_model._meta.pk.column
8ff914a7c0142973b58b48b22d9c7429682d69de
635,662
def zaid2za(zaid): """ Convert ZZAAA to (Z,A) tuple. """ # Ignores decimal and stuff after decimal. zaid = str(int(zaid)) Z = int(zaid[:-3]) A = int(zaid[-3:]) return (Z, A)
58643b9d7adb3c0756e4de5d51c5a7d68a7a65e0
654,558
def merge_dicts(*args): """Merge multiple dictionaries into a new dictionary as a shallow copy.""" merged_dict = {} for input_dict in args: merged_dict.update(input_dict) return merged_dict
4328c53d4b1c8b423465d3e69a4df6253869ec0e
586,806
import torch def coin_flip(prob): """ Return the outcome of a biased coin flip. Args: prob: the probability of True. Returns: bool """ return prob > 0 and torch.rand(1).item() < prob
672929fb49a0e65101a4bdfdd13e981ae5eae31c
6,858
def success_email_subject_msid_author(identity, msid, author): """email subject for a success email with msid and author values""" return u"{identity}JATS posted for article {msid:0>5}, author {author}".format( identity=identity, msid=str(msid), author=author )
c65f946e87140c9c28166daa0e664a994910b559
685,874
import re def get_r_filename(r_file): """Remove the file extension from an r_file using regex. Probably unnecessary but improves readability Parameters ---------- r_file : string name of R file including file extension Returns ------- string name of R file without file extension """ return re.split('\.[rR]$', r_file)[0]
bde15f79afcc585d0f86b45debf932c22d22f271
631,392
def value_in_many_any(a, b): """return true if item 'a' is found inside 'b': a list/tuple of many iterators else return false """ for c in b: if a in c: return True return False
0da7a168a023d6af72369a19180f6bdbdc237d42
550,078
def totient(lim): """Computes Euler's totient for values up to lim included.""" # http://en.wikipedia.org/wiki/Euler%27s_totient_function tot = list(range(lim + 1)) tot[0] = -1 for i in range(2, lim + 1): if tot[i] == i: for j in range(i, lim + 1, i): tot[j] = (tot[j] * (i - 1)) // i return tot
ad42fb6bce3233becd56b22d30b966a5e2487a00
174,615
def get_format(s): """ Returns the Open Babel format of the given string. Note: It is primitive, distinguishes only xyz, smiles, inchi formats. >>> print(get_format('C')) smi >>> print(get_format('InChI=1S/H2O/h1H2')) inchi >>> print(get_format(get_xyz('C'))) xyz """ frm = 'unknown' lines = s.splitlines() n = len(lines) # number of lines if n == 1: if s.startswith('InChI'): frm = 'inchi' else: frm = 'smi' else: try: natom = int(lines[0].strip()) if n >= natom + 2: frm = 'xyz' except: pass return frm
e3ec8c253b4cf14dd2401d4be710a6a596f6116b
627,820
def tableName(table): """Return a string with the name of the table in the current db.""" return table.sqlmeta.table
23ea66b80c8516d191d5f66e9d647d296480e0ab
566,859
def crc32_tab_rev(prev, crctab, byte): """ return next = crc32(prev, byte) crc32(p0,b0) ^ crc32(p1,b1) = crc32(p0^p1, b0^b1) """ return crctab[(prev^byte)&0xff] ^ (prev>>8)
d3adb5585d640cf3452441ccee721f2be9c1f89b
239,077
def itemKey(item): """ Build the form item's key from the the item's name and the name of all ancestors. """ parts = [item.name] parent = item.itemParent while parent is not None: parts.append(parent.name) parent = parent.itemParent parts.reverse() return '.'.join(parts)
29d317d62960594562577adc06b0db5279a7fbb6
52,636
import time def make_timestamp(precision=0): """ Returns timestamp string that represents the current time Precision is number of decimal places to add after the seconds """ #Outputs time stamp in format YYYYMMDD_hhmmss t = time.localtime() d = time.time()%1 #decimal places. May be needed later YYYY = str(t.tm_year) MM= str(100+t.tm_mon)[-2:] #pre-pends zeroes where needed DD = str(100+t.tm_mday)[-2:] hh = str(100+t.tm_hour)[-2:] mm = str(100+t.tm_min)[-2:] ss = str(100+t.tm_sec)[-2:] l=[YYYY,MM,DD,"_",hh,mm,ss] if precision<0: print("For backwards compatibility, the timestamp will include seconds anyway") elif precision>0: d = time.time()%1 d_str=str(d)[2:2+int(round(precision))] l.append("_") l.append(d_str) #timestamp format YYMMDD_hhmmss_dddd with "precision" digits in place of "dddd" return "".join(l)
701350d7ff57eb1203738cabce9da4ffb5e051fd
426,994
import collections import math def one_unit_tdelta(tdelta): """Return the timedelta as a string with 1 unit of time. Args: tdelta (timedelta): A non-negative timedelta object. Returns: A string in the form #.#x or ##x where x is a time unit. The units are w (weeks), d (days), h (hours), and m (minutes). The largest unit that is not below 1 is chosen. The number is rounded to 2 significant digits if below 100, and rounded without restriction if 100 or greater. However if it is minutes, the the ceiling is taken instead. """ total_s = tdelta.total_seconds() units = collections.OrderedDict() units['w'] = total_s/(60*60*24*7) units['d'] = total_s/(60*60*24) units['h'] = total_s/(60*60) units['m'] = total_s/(60) for unit, value in units.items(): if value < 1: continue if units == 'm' and value >= 10: return '{}{}'.format(math.ceil(value), unit) if value < 100: return '{:.2g}{}'.format(value, unit) return '{}{}'.format(round(value), unit) return '{}{}'.format(math.ceil(units['m']), 'm')
3d7eae6b46a4c717ccc7cadc46f7147301a23bf3
446,249
def recvall(sock, n): """ returns the data from a recieved bytestream, helper function to receive n bytes or return None if EOF is hit :param sock: socket :param n: length in bytes (number of bytes) :return: message """ # data = b'' while len(data) < n: print("Start function sock.recv") packet = sock.recv(n - len(data)) if not packet: return None data += packet return data
5641e7f13c4e61f9cdf672dc524c397f415e8a62
613,763
def compute_derivs_matrices(vecs, adv_vecs, dt): """Computes 1st-order time derivatives of vectors from data in matrices. Args: ``vecs``: Matrix with vectors as columns. ``adv_vecs``: Matrix with time-advanced vectors as columns. ``dt``: Time step between ``vecs`` and ``adv_vecs``. Returns: ``deriv_vecs``: Matrix of with time-derivs of vectors as cols. """ return (adv_vecs - vecs)/(1.*dt)
c953b1a04e3faab5f4af75fdc5029fe62f0864f8
227,999
def is_wgs_accession_format(contig_accession): """ Check if a Genbank contig is part of WGS (Whole Genome Shotgun) sequence :param contig_accession: Genbank contig accession (ex: CM003032.1) :return: True if the provided contig is in the WGS format """ wgs_prefix = contig_accession[:4] wgs_numeric_suffix = contig_accession[4:].replace(".", "") return str.isalpha(wgs_prefix) and str.isnumeric(wgs_numeric_suffix)
1e4ece9c428264ed5e74e8f83ad9b0521bc57988
43,879
import six def GetDictItems(namedtuple_object): """A compatibility function to access the OrdereDict object from the given namedtuple object. Args: namedtuple_object: namedtuple object. Returns: collections.namedtuple.__dict__.items() when using python2. collections.namedtuple._asdict().items() when using python3. """ return (namedtuple_object.__dict__.items() if six.PY2 else namedtuple_object._asdict().items())
92fc7098065846f0393a82b06d11afca7038075f
564,786
from typing import List from typing import Any def shard_list(alist: List[Any], shard_count: int) -> List[List[Any]]: """Breaks the list up into roughly-equally sized shards. Args: alist: A list of things. shard_count (int): The total number of shards. Returns: List[List[Any]]: The shards. """ shard_size = len(alist) / shard_count shard_start = 0.0 shards = [] for i in range(shard_count - 1): shard_end = shard_start + shard_size shards.append(alist[int(shard_start) : int(shard_end)]) # noqa: E203 shard_start = shard_end shards.append(alist[int(shard_start) :]) # noqa: E203 return shards
d5f5188514db968882c154c0118dff64dbc174fa
600,629
def createRefVal(pool_n=1): """Return a 2D list of reference distance and power""" dist = [100, 200, 400] power = [2000, 5000] liste = [] for i in range(pool_n): for p in power: for d in dist: liste.append([p, d]) return liste
3e3c7588912118c10bb606cba63e7f27d4e2d3f6
239,762
import re def valid_sexp_predicate(sexp): """ 文字列中の"のエスケープ判定もしている。文字列リテラルを除外しsexp中に出現する()が全て関数コールとなるようにしてから、(と)の対応関係をチェックする。 Args: sexp (str): euslispに送りつけるS式 Returns: bool: S式が正しいかどうか >>> valid_sexp_predicate("(+ 1 2)") True >>> valid_sexp_predicate('(princ "hoge")') True >>> valid_sexp_predicate('(TEST::consult-hash-with-key (let ((hsh (make-hash-table))) (progn (setf (gethash "moo" hsh) 100) (setf (gethash "bow" hsh) 200)) hsh) "moo")') True >>> valid_sexp_predicate("(+ 1 2 3))") # excess ')' False >>> valid_sexp_predicate("(list 1 2 (3)") # insufficient ') False >>> sexp = r'(concatenate string "really\\" weird(" "string\\"!")' # valid s-expression (エスケープ処理ができてないと'(concatenate string weird( string))'なる文字列が残されinvalidと判定されてしまう) >>> valid_sexp_predicate(sexp) True """ sub = re.sub(r'(?<!\\)\".*?(?<!\\)\"', '', sexp) # 否定的先読み(?<!)で「 \ が前に存在しない " 」のペアを最短マッチングで(.*?)発見して削除する counter = 0 for char in sub: if char == '(': counter += 1 elif char == ')': if counter > 0: counter -= 1 else: return False if counter > 0: return False return True
a66e013ee2f0a8a6ec10fc7f4acd46c9046925ee
205,344
from typing import Callable from typing import Any from typing import Iterable def takewhile(predicate: Callable[[Any], bool], seq: Iterable) -> Iterable: """Lazily evaluated takewhile :param predicate: First failure of predicate stops the iteration. Should return bool :param seq: Sequence from which to take :returns: filtered sequence :rtype: Same as `seq` """ it = iter(seq) _next = it.__next__() while predicate(_next): try: yield _next _next = it.__next__() except StopIteration: return None
b068d79386e8a6694de969992c265dd1a39dc9a3
534,236
def EnumsConflict(a, b): """Returns true if the enums have different names (ignoring suffixes) and one of them is a Chromium enum.""" if a == b: return False if b.endswith('_CHROMIUM'): a, b = b, a if not a.endswith('_CHROMIUM'): return False def removesuffix(string, suffix): if not string.endswith(suffix): return string return string[:-len(suffix)] b = removesuffix(b, "_NV") b = removesuffix(b, "_EXT") b = removesuffix(b, "_OES") return removesuffix(a, "_CHROMIUM") != b
33b096c4ffb1a620e83bbc19654d57218ab3e3be
459,972
def jar_file_filter(file_name): """ A function that will filter .jar files for copy operation :type file_name: str :param file_name: Name of the file that will be checked against if it ends with .jar or not """ return bool(file_name) and isinstance(file_name, str) and file_name.endswith(".jar")
b330cea355116acde9269006f9574cf194424c1a
403,491
def _normalize_typos(typos, replacement_rules): """ Applies all character replacement rules to the typos and returns a new dictionary of typos of all non-empty elements from normalized 'typos'. """ if len(replacement_rules) > 0: typos_new = dict() for key, values in typos.items(): typos_new[key] = list() for item in values: for orig, replacement in replacement_rules: item = item.replace(orig, replacement) item = item.strip() if item: typos_new[key].append(item) return typos_new else: return typos
fc47995303b00bc4d612a6a161dfad4c0bcd8e02
682,371
def unpack_ipv4_bytes(byte_pattern): """ Given a list of raw bytes, parse out and return a list of IPs :param byte_pattern: The raw bytes from the DHCP option containing a list of IP addresses. The RFC specifies that an IP list will be a list of octets, with each group of 4 octets representing one IP address. There are no separators or terminators. :returns: a list of IP addresses as strings""" ip_list = [] # reverse the bytes so we can pop them off one at a time byte_pattern.reverse() while len(byte_pattern) > 3: # if there are at least 4 octets, add them as an IP to the list ip_string = '' for i in range(0, 3): ip_string += str(byte_pattern.pop()) + "." ip_string += str(byte_pattern.pop()) ip_list.append(ip_string) return ip_list
f2ba6dd77acef4871a8511663fc494f03320f1ba
117,065
def exclude_pattern(f): """ Return whether f is in the exclude pattern. Exclude the files that starts with . or ends with ~. """ return f.startswith(".") or f.endswith("~")
b81208d37fe53cbbeb973d4cd56cd11022458a3d
249,781
def temp_coeff_cold(lower, upper): """ Calculates and returns the m and b coefficients for y = m*x + b for a line intersecting (lower, 0) and (upper, 255). """ m = 255/(upper-lower) b = 255 - m * upper return m, b
0391f2d918627f43577555056d09e9c9efe1d15d
650,663
def get_scope(field): """For a single field get the scope variable Return a tuple with name:scope pairs""" name = field['name'] if 'scope' in field['field']: scope = field['field']['scope'] else: scope = '' return (name, scope)
1b931ec1a7c5a629fe6b39034c23fd02568ed5a7
30,724
def convert_pixels(image, **kwargs): """Converts an image containing pixel values in [0, 255] to [0, 1] floats.""" image = image.astype('float32') image /= 255.0 return image
426875f4dfe8abdfde234418b63373a6fe5e5ec3
339,817
def exists(list_elements, check_function): """exists Check whether at least an element x of l is True for f(x) :param list_elements: A list of elements to test :param check_function: The checking function (takes one parameter and \ return a boolean) """ for element in list_elements: if check_function(element): return True return False
76553c72b733ce46d6b4db563ba79a5009559d2a
322,075
def map_across_table(fn, rows): """ Returns: (list of lists): A table expressed as a list of lists, having a applied a function to each cell. Args: fn (function): a single argument function to apply to each cell rows (list of lists): A table expressed as a list of lists, each cell of which will be used once as the argument to fn. """ return [ [ fn(cell) for cell in row ] for row in rows ]
32f421c8702da2378aaafbb50bbfeda3484a5473
381,033
from typing import Tuple def seat_to_seat_id(seat: Tuple[int, int]) -> int: """Convert a row/column pair into a seat ID""" return seat[0] * 8 + seat[1]
b78c45e73ff4c148927748e4aafd9e2c5489a2a5
179,472
def add_producer_function(new_producer_list, xml_producer_function_list, output_xml): """ Check if input list is not empty, write in xml for each element and return update list if some updates has been made Parameters: new_producer_list ([Data_name_str, Function]) : Data's name and producer's function list xml_producer_function_list ([Data_name_str, Function]) : Data's name and producer's function list from xml output_xml (GenerateXML object) : XML's file object Returns: update_list ([0/1]) : Add 1 to list if any update, otherwise 0 is added """ if not new_producer_list: return 0 output_xml.write_producer(new_producer_list) # Warn the user once added within xml for producer in new_producer_list: xml_producer_function_list.append(producer) print(f"{producer[1].name} produces {producer[0]}") return 1
7c30289d18d79fd6c2a7ee006f528bf5dd5d56a1
59,064
def display_timedelta(minutes): """Converts timedelta in minutes to human friendly format. Parameters ---------- minutes: int Returns ------- string The timedelta in 'x days y hours z minutes' format. Raises ------ ValueError If the timedelta is negative. """ def plural(num): if num != 1: return 's' else: return '' if minutes < 0: raise ValueError days = minutes // 1440 hours = minutes // 60 % 24 minutes = minutes % 60 time_elements = [] if days > 0: time_elements.append(f'{days} day{plural(days)}') if hours > 0: time_elements.append(f'{hours} hour{plural(hours)}') if minutes > 0 or (days == 0 and hours == 0): time_elements.append(f'{minutes} minute{plural(minutes)}') time_string = ' '.join(time_elements) return time_string
c8606cf6defcc38e5a12dc88cfc65d7c21aefd69
674,099
def find_item(list_containing_list, item): """ Find the index of the list that contains the item :param list_containing_list: List of lists; one of them must contain the item :param item: The item we are looking for :return: Index of the item in the outer list >>> find_item([[1,2,3],[4,5,6]],5) 1 """ for _list in list_containing_list: if item in _list: return list_containing_list.index(_list) return None
a7d9c3c745e8da8c83edb8941494188980250bf5
449,343
def get_cover_image(beatmapset_id: int): """Return url of cover image from beatmapset_id.""" return f"https://assets.ppy.sh/beatmaps/{beatmapset_id}/covers/cover.jpg"
54f5bf96c8e2e5dd266d5c3d2a7ae64c48e85c99
124,752
def percentile(sorted_values, p): """Calculate the percentile using the nearest rank method. >>> percentile([15, 20, 35, 40, 50], 50) 35 >>> percentile([15, 20, 35, 40, 50], 40) 20 >>> percentile([], 90) Traceback (most recent call last): ... ValueError: Too few data points (0) for 90th percentile """ size = len(sorted_values) idx = (p / 100.0) * size - 0.5 if idx < 0 or idx > size: raise ValueError('Too few data points ({}) for {}th percentile'.format(size, p)) return sorted_values[int(idx)]
a2704a59629b4634fb1f651fdbbfd9b19209816e
591,095
def remove_degenerate_bboxes(boxes, dim0: int, dim1: int, min_boxside=0): """Remove bboxes beyond image or smaller than a minimum size. This assumes a format where columns 0, 1 are min dim0 and dim1, while columns 2, 3 are max dim0 and dim1, respectively. """ # adjust boxes boxes[boxes < 0] = 0 boxes[boxes[:, 0] > dim0, 0] = dim0 boxes[boxes[:, 2] > dim0, 2] = dim0 boxes[boxes[:, 1] > dim1, 1] = dim1 boxes[boxes[:, 3] > dim1, 3] = dim1 # remove boxes outside cropped region keep1 = boxes[:, 3] - boxes[:, 1] > min_boxside keep2 = boxes[:, 2] - boxes[:, 0] > min_boxside keep = keep1 & keep2 boxes = boxes[keep] return boxes, keep
9f15955fc52fd679af9ec55215111177876553ed
457,260
def is_unit_str(ustr): """Check if a string defines a unit""" ustr = ustr.strip() if(len(ustr)>=2 and ustr[0]=="[" and ustr[-1]=="]"): return True else: return False
60a28058d1fb35d5a8f69e76dc416099b4e9718a
252,888
import math def ols(dist, scale, kappa): """Calculate OLS based on distance, scale and kappa.""" e = dist ** 2 / 2 / (scale ** 2 * kappa) return math.exp(-e)
666eb6ba1b4ca4b93953930ebf049d8a76771f51
599,464
def add_score(score, pts, level): """Add points to score, determine and return new score and level.""" lvl_multiplier = 10 score += pts if (score % (pts * lvl_multiplier)) == 0: level += 1 return score, level
e65f9cf989068513131610e80b12821976a6546e
362,840
def get_parameter_list_from_request(req,parameter): """Extracts a parameter from the request. Parameters ---------- req : HttpRequest The HTTP request. parameter : str The parameter being extracted. Returns ------- List List of comma separated parameters. """ try: id_string= req.GET.get(parameter) param_list = list(map(int, id_string.split(','))) except (AttributeError, ValueError) as e: param_list = [] return param_list
4b9f66e64455229a998eb199a3fc890d2ba8ba02
648,745
import inspect def func_source_data(func): """ Return data about a function source, including file name, line number, and source code. Parameters ---------- func : object May be anything support by the inspect module, such as a function, method, or class. Returns ------- filename : str lineno : int The line number on which the function starts. source : str """ filename = inspect.getsourcefile(func) lineno = inspect.getsourcelines(func)[1] source = inspect.getsource(func) return filename, lineno, source
0f8c16365979505bac0304bd05435c8f700ce800
382,864
import io def as_csv(df): """Returns an in-memory csv of Pandas.DataFrame""" f = io.StringIO() df.to_csv(f, index=False) f.seek(0) return f
4401bb096c6c0f6c578ac45d8210c7b21ff86d2e
336,880
from typing import Union def next_page(page: str) -> Union[None, str]: """ Given the contents of a Letterboxd page, returns the relative path to the next page to parse. It handles the pagination of any type of page, from followers, to following, to movies watched, etc. Returns None if this is the last page already and there isn't another one to parse. """ key_page_next = "\"next\" href=\"" start = page.rfind("paginate-nextprev") start = page.find(key_page_next, start) if start is -1: return None start += len(key_page_next) end_idx = page.find("\"", start) return page[start + 1:end_idx]
f0d041548a91553a512907f951813912a50f6182
73,915
def calc_conformance(results): """Returns a tuple with the number of total and failed testcase variations and the conformance as percentage.""" total = len(results) failed = sum(1 for status, _ in results.values() if status != 'PASS') conformance = (total-failed)*100/total if total > 0 else 100 return total, failed, conformance
91d7406a9d8dc34505e16237b2f66cb2b2e6c65e
519,951
def tf(term, document): """ computes term frequency. TF is defined as how often the term in question appears in a document over the sum of all terms in the document: (term/all_terms_in_doc). Parameters: term: a string containing the search term document: a list representing the document text, split into tokens (make sure that punctuation is split, too!) Return Value: a float representing the tf value """ terms_found = 0 for token in document: if token == term: terms_found += 1 return terms_found/len(document)
033c65fb1165431a63ce5d758f65dc6a1d40346d
514,927
def valid_op_json(op_json): """Asserts object is in the form of `[command, {payload}]`.""" assert isinstance(op_json, list), 'json must be a list' assert len(op_json) == 2, 'json must be a list with 2 elements' assert isinstance(op_json[0], str), 'json[0] must be a str (command)' assert isinstance(op_json[1], dict), 'json[1] must be dict (payload)' return op_json
820e4554bd559aff114fa90aa32da7b362416523
494,382
import base64 def release_asset_node_id_to_asset_id(node_id: str) -> str: """ Extracts and returns the asset id from the given Release Asset |node_id|. The "id" returned from the GraphQL v4 API is called the "node_id" in the REST API v3. We can get back to the REST "id" by decoding the "node_id" (it is base64 encoded) and extracting the id number at the end, but this is undocumented and may change. :param node_id: The Release Asset node_id. :return: The extracted REST API v3 asset id. """ # There is a new format and an old format. if node_id.startswith("RA_"): # New format: "RA_[base64 encoded bytes]". # The last four bytes (big-endian, unsigned) of the base64 encoded bytes are the node id. # Strip off the "RA_". base64_string = node_id[3:] asset_id = str(int.from_bytes(base64.b64decode(base64_string)[-4:], "big")) else: # Old format: just a base64 encoded string. # Once decoded, the format is similar to "012:ReleaseAsset18381577". # noqa: SC100 # The asset id part is 18381577. node_id_decoded: str = base64.b64decode(node_id).decode( encoding="utf-8", errors="ignore" ) if "ReleaseAsset" not in node_id_decoded: raise AssertionError( f"Unrecognized node_id format: {node_id}. Decoded (base64) string: {node_id_decoded}." ) asset_id = node_id_decoded.split("ReleaseAsset")[1] return asset_id
5046f9aa6740e2de8254db83de31f308b4becc31
237,078
def nthword(n, sep=None): """ Construct a function to return the nth word in a string. E.g.:: >>> import petl as etl >>> s = 'foo bar' >>> f = etl.nthword(0) >>> f(s) 'foo' >>> g = etl.nthword(1) >>> g(s) 'bar' Intended for use with :func:`petl.transform.conversions.convert`. """ return lambda s: s.split(sep)[n]
025957c032db349dadc0eed876fc2b44e966a5fc
289,804
import random def do_random_test(negexp=3): """ Return :obj:`True` if a random probability is not greater than a threshold, otherwise return :obj:`False`. :type negexp: :obj:`int` (non-negative) :arg negexp: This is the negative exponent that is used to compute a probability threshold. Higher values of ``negexp`` make the threshold exponentially smaller. A value of 0 naturally makes the threshold equal 1, in which case the returned value will be :obj:`True`. :rtype: :obj:`bool` """ # Compare random probability with threshold 1e-negexp return random.random() <= 10**(-1*negexp)
8f7549a3a781abb728b1ac9b1289bee8bacaf352
222,581
import torch def compute_var_and_mean_sq(lst): """Compute variance and mean square of a list of samples.""" num_samples = len(lst) tensor = torch.stack(lst) mean = torch.mean(tensor, 0, keepdim=True) # estimate variance var = (tensor - mean).pow(2).sum(0) / (num_samples - 1) # estimate E[x^2]. cannot estimate E[x]^2 without bias square = tensor.pow(2).mean(0) return var.mean(0).mean(0), square.mean(0).mean(0)
69869040485a9162ec68b62ed4e3a1e0ab2e1170
435,576
from typing import Dict from typing import List import csv def read_temp_csv_data(filepath_temp: str) -> Dict[str, Dict[int, List[float]]]: """Return a mapping from the state to a mapping of the year to yearly data in the format [average temperature, precipitation, wildfire counts]. Currently, the values for precipitation are wildfires counts are dummy values. Preconditions: - filepath refers to a csv file in the format of data/Annual_Temperature.csv (i.e., could be that file or a different file in the same format) """ # ACCUMULATOR: The mapping from the state to its yearly data so far data_so_far = {} with open(filepath_temp) as file_temp: reader_temp = csv.reader(file_temp) # Skip the first 3 lines of the data for _ in range(0, 3): next(reader_temp) # Store the header containing the names of the states states = next(reader_temp) # Iterate through the header, skipping the 1st index, to map states to an empty dictionary, # which will map the year to the data about the average temp, precipitation, wildfire counts for i in range(1, len(states)): data_so_far[states[i]] = {} # Iterate through the remaining rows of the dataset for row in reader_temp: # The current year being processed year = int(row[0]) # Iterate through each index of the row for i in range(1, len(row)): # The average temperature value of a state in the current year value_temp = float(row[i]) # Create a mapping from the year to that year's data # in the format [average temp, precipitation, wildfires] data_so_far[states[i]][year] = [value_temp, 0, 0] # The 0s are dummy values return data_so_far
4342eb9b1ed392cd8ee94dfa400a194948fef3a4
663,909
import numbers def discrete_signal(signal, step_size): """Discretize signal Parameters ---------- signal: pd.Series Signals for betting size ranged [-1, 1] step_size: float Discrete size Returns ------- pd.Series """ if isinstance(signal, numbers.Number): signal = round(signal / step_size) * step_size signal = min(1, signal) signal = max(-1, signal) else: signal = (signal / step_size).round() * step_size signal[signal > 1] = 1 signal[signal < -1] = -1 return signal
183af7cf6ca30daaebb44b0d41c860b001109136
70,887
def org_default_payload(login: str, uid: int): """Provide the basic structure for an organization payload.""" return { 'action': 'member_added', 'membership': { 'url': '', 'state': 'pending', 'role': 'member', 'organization_url': '', 'user': { 'login': login, 'id': uid, 'node_id': 'MDQ6VXNlcjM5NjUyMzUx', 'avatar_url': '', 'gravatar_id': '', 'url': '', 'html_url': '', 'followers_url': '', 'following_url': '', 'gists_url': '', 'starred_url': '', 'subscriptions_url': '', 'organizations_url': '', 'repos_url': '', 'events_url': '', 'received_events_url': '', 'type': 'User', 'site_admin': False } }, 'organization': { 'login': 'Octocoders', 'id': '38302899', 'node_id': 'MDEyOk9yZ2FuaXphdGlvbjM4MzAyODk5', 'url': '', 'repos_url': '', 'events_url': '', 'hooks_url': '', 'issues_url': '', 'members_url': '', 'public_members_url': '', 'avatar_url': '', 'description': '' }, 'sender': { 'login': 'Codertocat', 'id': '21031067', 'node_id': 'MDQ6VXNlcjIxMDMxMDY3', 'avatar_url': '', 'gravatar_id': '', 'url': '', 'html_url': '', 'followers_url': '', 'following_url': '', 'gists_url': '', 'starred_url': '', 'subscriptions_url': '', 'organizations_url': '', 'repos_url': '', 'events_url': '', 'received_events_url': '', 'type': 'User', 'site_admin': False } }
af542f5a9da78ff3b08e80cd49e84f828e9a8cbb
628,702
def grab_job_links(soup): """ Grab all non-sponsored job posting links from a Indeed search result page using the given soup object Parameters: soup: the soup object corresponding to a search result page e.g. https://ca.indeed.com/jobs?q=data+scientist&l=Toronto&start=20 Returns: urls: a python list of job posting urls """ urls = [] # Loop thru all the posting links for link in soup.find_all('h2', {'class': 'jobtitle'}): # Since sponsored job postings are represented by "a target" instead of "a href", no need to worry here partial_url = link.a.get('href') # This is a partial url, we need to attach the prefix url = 'https://ca.indeed.com' + partial_url # Make sure this is not a sponsored posting urls.append(url) return urls
159b3d768266900446bc17326aeb41bfd048b267
666,852
def _shorten_decimals(self, fValue : float, iDecimals : int) -> float: """ Description ----------- Method for reducing the amount of decimals of a float value Parameters ---------- `fValue` : float Value whose decimals should be cut `iDecimals` : int Amount of decimals Return ------ `fValue` : float Value whose decimals were cut """ # Initalize variabels iTemp = 1 for iIndex in range(iDecimals): iTemp *= 10 return float(int(fValue * iTemp)/ iTemp)
74746e2ab4eca01149feeea6603fabf9cd42b795
512,673
import inspect def arg_names(receiver): """ Get the expected keyword arguments for a function or class constructor. """ return list(inspect.signature(receiver).parameters.keys())
3267c2cb36ee54ff99f972a7a0e1e8ba90393bef
654,771
def generate_graph(edges): """ Generate a graph as dict using edges generator. Args: edges (generator): generator of edges. Returns: dict. the graph as { "node": ["dependencies"], ... } """ graph = {} for edge in list(edges): if not edge[0] in graph: graph[edge[0]] = [] if edge[1] and not edge[0] == edge[1]: graph[edge[0]].append(edge[1]) return graph
782c89041005207b8187eb1a9dfac836c50ae484
514,758
def load_glove_vocab(filename): """Loads GloVe's vocab from a file. Args: filename (str): path to the glove vectors. Returns: set: a set of all words in GloVe. """ print('Building vocab...') with open(filename) as f: vocab = {line.strip().split()[0] for line in f} print('- done. {} tokens'.format(len(vocab))) return vocab
ad36dffb75dec1bb44108de8de2b4ecbd9d066dd
31,147
import logging def Handle( logger, handler=logging.NullHandler(), formatter="%(asctime)s %(name)s - %(levelname)s: %(message)s", level="WARNING", ): """ Handle a logger with a standardised formatting. Parameters ----------- logger : :class:`logging.Logger` | :class:`str` Logger or module name to source a logger from. handler : :class:`logging.Handler` Handler for the logging messages. formatter : :class:`str` | :class:`logging.Formatter` Formatter for the logging handler. Strings will be passed to the :class:`logging.Formatter` constructor. level : :class:`str` Logging level for the handler. Returns ---------- :class:`logging.Logger` Configured logger. """ if isinstance(logger, str): logger = logging.getLogger(logger) elif isinstance(logger, logging.Logger): pass else: raise NotImplementedError if isinstance(formatter, str): formatter = logging.Formatter(formatter) handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(getattr(logging, level)) return logger
0b2f2c7e29f3702c4154ca29f457bbba2f0677e4
693,631
def get_forwards_dict(forwards_file): """Returns a dictionary with new ott_ids for forwarded ott_ids """ fwd_dict = {} fi=open(forwards_file) header = fi.readline() for lin in fi: lii = lin.split() fwd_dict[int(lii[0])]=lii[1] return fwd_dict
1f0e50aac045e8c9ce2fdee500abc46e1d04d2dc
314,777
def find_episode(episode_id, seasons): """ Return metadata for a specific episode from within a nested metadata dict. Returns an empty dict if the episode could not be found. """ for season in seasons: for episode in season['episodes']: if str(episode['id']) == episode_id: return episode return {}
64255ca8e330c3b45768704644ac8bfddbfc1416
30,862
def makeOfficialGlyphOrder(font, glyphOrder=None): """Make the final glyph order for 'font'. If glyphOrder is None, try getting the font.glyphOrder list. If not explicit glyphOrder is defined, sort glyphs alphabetically. If ".notdef" glyph is present in the font, force this to always be the first glyph (at index 0). """ if glyphOrder is None: glyphOrder = getattr(font, "glyphOrder", ()) names = set(font.keys()) order = [] if ".notdef" in names: names.remove(".notdef") order.append(".notdef") for name in glyphOrder: if name not in names: continue names.remove(name) order.append(name) order.extend(sorted(names)) return order
95c3cf0096a8390a10c90092aa6b96636c061c5c
368,365
from typing import Tuple from typing import Union def central_pixel_coordinates_2d_from( shape_native: Tuple[int, int] ) -> Union[Tuple[float], Tuple[float, float]]: """ Returns the central pixel coordinates of a data structure of any dimension (e.g. in 1D a `Line`, 2D an `Array2D`, 2d a `Frame2D`, etc.) from the shape of that data structure. Examples of the central pixels are as follows: - For a 3x3 image, the central pixel is pixel [1, 1]. - For a 4x4 image, the central pixel is [1.5, 1.5]. Parameters ---------- shape_native : tuple(int) The dimensions of the data structure, which can be in 1D, 2D or higher dimensions. Returns ------- central_pixel_coordinates : tuple(float) The central pixel coordinates of the data structure. """ return (float(shape_native[0] - 1) / 2, float(shape_native[1] - 1) / 2)
1b9acfcf7751c6f6224200f68fd06154940a10d5
477,814
from typing import Mapping from typing import Tuple from typing import Optional from typing import Sequence def _get_sorted_experiment_kinds( experiment_kinds: Mapping[str, Tuple[str]], distance_kinds_order: Optional[Sequence[str]], ) -> Mapping[str, Tuple[str]]: """Sorts experiment_kinds` in order of `distance_kinds_order`, if specified. Args: experiment_kinds: Different subsets of data to tabulate, e.g. visitation distributions. distance_kinds_order: The order in which to run and present the distance algorithms, which are keys of `experiment_kinds`. Returns: `experiment_kinds` sorted according to `distance_kinds_order`. """ if not experiment_kinds: raise ValueError("Empty `experiment_kinds`.") if distance_kinds_order: if len(distance_kinds_order) != len(experiment_kinds): raise ValueError( f"Order '{distance_kinds_order}' is different length" f" to keys '{experiment_kinds.keys()}'." ) if set(distance_kinds_order) != set(experiment_kinds.keys()): raise ValueError( f"Order '{distance_kinds_order}' is different set" f" to keys '{experiment_kinds.keys()}'." ) experiment_kinds = {k: experiment_kinds[k] for k in distance_kinds_order} return experiment_kinds
910c401c229f4716132a66984a87cee976788534
328,778
def __row_helper(title, value, unit=None, seperator=None): """Helps format package information in a standardized way. Args: title (str): The title of the value. Left aligned. value (any): The value to be displayed. unit (str, optional): The value's unit. seperator (str): The seperator between the value and unit. Returns: String: Title (left aligned, 50 char) and value with unit (right aligned, 28 char). """ title = str(title) if seperator is None: seperator = " " if unit: value = "{}{}{}".format(value, seperator, unit) else: value = str(value) length_left = 78 - len(title) if length_left - len(value) < 1: return "{:30} {:>48}\n".format(title, value) else: return "{:{}} {:>{}s}\n".format(title, len(title), value, length_left)
b5ff037b913c7a0f26c135d4f9fcdeb015b05ba4
394,875
import json def _parse_request_body(request): """Parse the JSON from the request body, or return its bytes""" body = request.body if not body: return b'' if request.headers.get("Content-Type") == "application/json": if isinstance(body, bytes): return json.loads(body.decode()) return json.loads(body) if isinstance(body, str): return body.encode() return body
d6faaa7b18474e66679260bca9fc9e00db22fcec
192,656
def is_title(result): """Returns true if the result is a title match.""" for metadatum in result.metadata: if metadatum.name == "is_title": return metadatum.value == "true" return False
2760a421755947126c89b7b0d193c6d4d3a56579
334,816
def get_input_path_contents(path): """Get the contents of a file.""" if not path.is_file(): error = "Unable to find file: '{p}'" error = error.format(p=path) raise Exception(error) contents = path.read_text() return contents
f94aca95afd56a454399becab1c61ab9dd646a1e
154,311
def clean_cluster_seq_id(id): """Returns a cleaned cd-hit sequence id The cluster file has sequence ids in the form of: >some_id... """ return id[1:-3]
b690d7472b1fb90743be27fed9ce9ef7c3b06694
687,241
def _get_r2(y, y_hat): """Calculate the goodness of fit. Arguments: y {ndarray} -- 1d array object with int. y_hat {ndarray} -- 1d array object with int. Returns: float """ m = y.shape[0] n = y_hat.shape[0] assert m == n, "Lengths of two arrays do not match!" assert m != 0, "Empty array!" sse = ((y - y_hat) ** 2).mean() sst = y.var() r2 = 1 - sse / sst return r2
e40dc26e016ec319b8658685fd00dbc3bdd63ffe
142,530
def replace_variant(ref, variant, start, stop=None): """Take a string, ref. Insert a string variant that replaces the bases in ref from start to stop, inclusive. start and stop are 0-based Pythonic coordinates if stop is None, the variant will simply be inserted before the start base """ if stop == None: stop = start assert stop >= start assert start > 0 and stop > 0 assert start <= len(ref) assert stop <= len(ref) return ref[:start] + variant + ref[stop:]
4a449b2cf51cf4e996519cc441345cd98ced97ef
298,269
def del_none_col(df,threshold = 0.5): """delete those columns whose none values number is bigger than threshold.""" l = [] for each in df: if df[each].isnull().sum()/len(df[each]) > threshold: l.append(each) df.drop(l,axis = 1,inplace = True) if len(l) > 0: print(f"Deleted {len(l)} features below:") return l else: print('No qualified feature to delete')
e7c7301366bafded460ef39b669f1b01e743b59f
461,215
def _make_params_string(params): """Convert list of parameters to string""" p_str = '&'.join(['{}={}'.format(f, params[f]) for f in params.keys()]) return p_str
db94c55492d506a364b6064bda3638995caf546f
72,185
def make_default_headers(n): """ Make a set of simple, default headers for files that are missing them. """ return [u'column%i' % (i + 1) for i in range(n)]
c82045035f989cd4d462f901af36529816387bd9
441,584