content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def del_constant_start_stop(x): """ >>> l = [1,2,3,4] >>> del_constant_start_stop(l) [1, 2] >>> l = [1,2,3,4,5,6,7] >>> del_constant_start_stop(l) [1, 2, 7] """ del x[2:6] return x
f6b98c3d1e082588db962f6887a4035307756a0d
109,584
import re def gather_fn_handles(mc_lines, warn): """Gets all footnote references (handles) occuring in main content excluding any occuring in XML comments.""" fn_handles = set() in_comment = False for mcl in mc_lines: if re.match("^\s*<!---?\s*$", mcl): in_comment = True elif in_comment and re.match("^\s*---?>\s*$", mcl): in_comment = False elif not in_comment: # handle up to 3 footnotes in a single <sup></sup> fns1 = re.findall("<sup>\[([a-zA-Z0-9_-]*)\]</sup>", mcl) fn_handles = fn_handles.union(set(fns1)) fns2 = re.findall("<sup>\[([a-zA-Z0-9_-]*)\],\[([a-zA-Z0-9_-]*)\]</sup>", mcl) for fn in fns2: fn_handles = fn_handles.union(set(fn)) fns3 = re.findall("<sup>\[([a-zA-Z0-9_-]*)\],\[([a-zA-Z0-9_-]*)\],\[([a-zA-Z0-9_-]*)\]</sup>", mcl) for fn in fns3: fn_handles = fn_handles.union(set(fn)) fns4p = re.findall("<sup>\[([a-zA-Z0-9_-]*)\],\[([a-zA-Z0-9_-]*)\],\[([a-zA-Z0-9_-]*)\],(.*)</sup>", mcl) if len(fns4p) >= 4: print("Error: Max number of grouped footnotes between <sup>...</sup> is 3") print("This occurred at or near this line...") print(mcl) if not warn: print("Correct above issues and re-try...") exit(1) return fn_handles
da5cd8bef220e01968005700d336e54a3dde0ee2
265,968
def get_wv1(conds): """ [ [wc, wo, wv], [wc, wo, wv], ... ] """ wv1 = [] for cond in conds: wv1.append(cond[2]) return wv1
e88de170aaf091d5fa3bbde9b26690e0942b4b49
259,391
def image_zoom(img, new_size, **kwargs): """ Resizes an *image* (from :epkg:`PIL`). @param img :epkg:`PIL.Image` @param new_size size after zoom @param kwargs additional arguments @return new image """ return img.resize(new_size, **kwargs)
24799135c293a40a13a1462d8d5c978ba5170c67
232,645
def _is_hierachy_searchable(child_id: str) -> bool: """ If the suffix of a child_id is numeric, the whole hierarchy is searchable to the leaf nodes. If the suffix of a child_id is alphabetic, the whole hierarchy is not searchable. """ pieces_of_child_id_list = child_id.split('.') suffix = pieces_of_child_id_list[len(pieces_of_child_id_list) - 1] return suffix.isnumeric()
13146128fc8ab050323a23f07133676caeb83aaf
28,275
def _parse_limits(joint): """Parse joint limits.""" limit = joint.find("limit") lower, upper = float("-inf"), float("inf") if limit is not None: if limit.has_attr("lower"): lower = float(limit["lower"]) if limit.has_attr("upper"): upper = float(limit["upper"]) return lower, upper
6aa1a31183f52b4699081e1a34860f8f7588c67f
277,768
def align_embedding(sequences, padding_word="<PAD/>", max_length=-1): """ Pads all sentences to the same length. The length is defined by the longest sentence. Returns padded sentences. """ print("aligning sequences...") if max_length < 0: max_length = max(len(x) for x in sequences) padded_sequences = [] for sequence in sequences: # new_sequence = [1] if len(sequence) >max_length: new_sequence = sequence[:max_length] else: new_sequence = sequence + [padding_word] * (max_length - len(sequence)) # print("new_sequence:", new_sequence) padded_sequences.append(new_sequence) return padded_sequences
c87670ad068f811c44621772e0ace4dbdee9de6b
461,338
def _get_node(root, name): """ Find xml child node with given name """ for child in root: if child.get('name') == name: return child raise ValueError(f'{name} not found')
4cb0d450fd1ea6fffda3366ec77ed500e6a47a9a
406,620
from typing import List from typing import Optional from typing import Tuple def _get_file_name(path: List[str]) -> Optional[Tuple[str, str]]: """Fetches the file name from the path. Args: path: The list of elements in the path Returns: None is returned on failure. If the path contains a valid file path the tuple will contain the path in the first element. The second element will be the extension of the file. """ # The path may not be empty if len(path) < 1: return None # Check that the path is sanitized # Path elements may only contain alphabetic characters, numbers and # dashes. Only the last path element may have exactly one dot. for i in range(len(path)): is_last = i == len(path) - 1 has_dot = False has_chars = False for c in path[i]: if c == ".": if not is_last: return None # dots not in last element if has_dot: return None # multiple dots if not has_chars: return None # element starts with a dot has_dot = True elif c not in ("abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789-"): return None # illegal character has_chars = True # Construct the file name fn = "/".join(path) if "." not in fn: return None # no file extension dot_idx = fn.index(".") f = "./res/%s" % fn ext = fn[(dot_idx + 1):] return f, ext
101069b6403b3da0d6b705d714943f32cd0a4810
331,551
import pickle import zlib def zunpickle(zdata): """Given a zlib compressed pickled serialization, returns the deserialized data.""" return pickle.loads(zlib.decompress(zdata), encoding='latin1')
201ca5f014c7f3b23ddbd418541b233b3a6f1d54
152,194
def addLists(list1, list2): """Add lists together by value. i.e. addLists([1,1], [2,2]) == [3,3].""" # Find big list and small list blist, slist = list(list2), list(list1) if len(list1) > len(list2): blist, slist = slist, blist # Overlay small list onto big list for i, b in enumerate(slist): blist[i] += b return blist
f5469dab8fd2c62d2d3ffed253803c1a3d343281
702,997
def addYears(date, value): """Add or subtract an amount of years to a given date and time. Args: date (Date): The starting date. value (int): The number of units to add, or subtract if the value is negative. Returns: Date: A new date object offset by the integer passed to the function. """ return date.replace(year=date.year + value)
0d218095921e38b92042b80e1fb8b20c62de3b28
537,075
def is_palindrome(word): """ Determines if the specified word is a palindrome. Args: word: The word to check. Returns: True if the word is a palindrome, otherwise False. """ return word.lower() == word[::-1].lower()
606934276488a1e8e88fa59fb21b41c8da84f1d2
433,991
import torch def random_choice(a, size): """Generates a random sample of a given size from a 1-D tensor. The sample is drawn without replacement. Parameters ---------- a: torch.tensor The input tensor from which the sample will be drawn. size: int The size of the generated sample. Returns ------- sample: torch.tensor The generated sample. """ permutation = torch.randperm(a.size(0)) indices = permutation[:size] return a[indices]
d47202dd08a1a5f0845c6057fbee8a0a41a0a3f9
83,636
from datetime import datetime def as_date(x, format='%m/%d/%Y'): """ Convert date string to datetime object """ return datetime.strptime(x, format)
a4a6e11310894db0c8781a2ae70874b51a41bc5b
396,240
import six def convert_id36_to_numeric_id(id36): """Convert strings representing base36 numbers into an integer.""" if not isinstance(id36, six.string_types) or id36.count("_") > 0: raise ValueError("must supply base36 string, not fullname (e.g. use " "xxxxx, not t3_xxxxx)") return int(id36, 36)
5ccd3661f6985c73e38ebcb6bbf9d0e242cddbb6
657,389
def get_pbs_node_requirements(sys_settings,node_count): """Get the cpu and memory requirements for a given number of nodes Args: sys_settings (dict): System settings dict, as supplied from config_manager node_count (int): Number of whole nodes on target system Returns: dict: ncpus and mem for target number of nodes """ ncpus = node_count * sys_settings['REMOTE_SETTINGS']['PBS_SETTINGS']['CORES_PER_NODE'] mem_per_node = sys_settings['REMOTE_SETTINGS']['PBS_SETTINGS']['MEM_PER_NODE'] mem = '%s%s' % (int(node_count * mem_per_node[0]), mem_per_node[1]) return dict(ncpus=ncpus,mem=mem)
f4fd12dee6608a87e6c8b0f2f56e245e6be7c0fc
8,257
def max_value_constraint(value, limit): """Test maximum value constraint.""" if value <= limit: return True return False
53f9849ae2bd3ee2dd953525c6a0a0bd223c5757
449,304
def get_objname_from_tuple(obj_name_tuple): """Given a O, C, I tuple, return its string full name (e.g 0&0&DEFINING_ORIGIN). """ O, C, I = obj_name_tuple return str(O) + '&' + str(C) + '&' + I
23f6bee2f1af9dfbca90eecca16a00fe5641b722
370,375
from typing import List from typing import Any def flatten_list(nested: List[List[Any]]) -> List[Any]: """Flatten a nested list.""" flat = [] for x in nested: flat.extend(x) return flat
ef71bf3072b74af31ff218ea0e57ba3c08e0436c
293,176
def check_size(indices: list, queries: list) -> int: """ Check whether size of all indices and queries are the same :param list indices: list of all indices :param list queries: list of all queries :returns: the size when size of all indices and queries are the same or -1 if lists does not have same size """ width = 0 # get the first length if any if indices: width = len(indices[0]) elif queries: width = len(queries[0]) # at this point, width will be the length of 1 of the indices or queries, # or will still be 0 if there is no index or query for index in indices: if len(index) != width: print('Indices are not the same width') return -1 for query in queries: if len(query) != width: print('Queries and indices not the same width') return -1 return width
7075c42042824439b6d25bffd1a5b8c2291b4012
549,550
def interpolate(r1, r2, x=None, y=None): """Perform simple linear interpolation between two points in 2D - one of x or y must be defined Parameters ---------- r1,r2 : float x,y coordinates from which to interpolate x : float x-value from which to interpolate y (default: None) y : float y-value from which to interpolate x (default: None) Returns ------- val : float interpolated value History 2019 - Written - Webb (UofT) """ x1, y1 = r1 x2, y2 = r2 m = (y2 - y1) / (x2 - x1) b = y1 - m * x1 if x != None: val= m * x + b elif y != None: val=(y - b) / m else: print("NO INTERMEDIATE COORDINATE GIVEN") val=0 return val
a222293c451b5a0826ddcd5c5223f9b85c2a6510
315,999
from datetime import datetime def is_rule_expired(rule_timestamp): """Returns True if the rule timestamp is expired.""" return rule_timestamp < datetime.now()
fd9c0c5f693efa53ea721a030544db5b620d2b54
402,323
def get_average(pixels): """ Given a list of pixels, finds the average red, blue, and green values Input: pixels (List[Pixel]): list of pixels to be averaged Returns: rgb (List[int]): list of average red, green, blue values across pixels respectively Assumes you are returning in the order: [red, green, blue] """ red_total = 0 # set variable for calculate total value for pixel in red green_total = 0 # set variable for calculate total value for pixel in green blue_total = 0 # set variable for calculate total value for pixel in blue rgb = [] # set a list for storage the average value for ele in pixels: # for each element in pixel red_total += ele.red # calculate total value for pixel in red green_total += ele.green # calculate total value for pixel in green blue_total += ele.blue # calculate total value for pixel in blue red = red_total / len(pixels) # calculate average for red green = green_total / len(pixels) # calculate average for green blue = blue_total / len(pixels) # calculate average for blue rgb.append(int(red)) # add average value of red in list rgb.append(int(green)) # add average value of green in list rgb.append(int(blue)) # add average value of blue in list return rgb # return the list
5e26a0aee4f719ff499f0c5ce73e8794ce40388f
296,111
def doc_urls_to_string(doc_urls, queryset=False): """ Args: doc_urls: [{"name": "wiki", "url": "http://www.wiki.com"}, ...] OR doc_urls: [models.DocUrl] (if queryset=True) Returns: '(wiki, http://www.wiki.com), ...' """ if queryset: new_doc_urls = [(i.name, i.url) for i in doc_urls] else: new_doc_urls = [(i['name'], i['url']) for i in doc_urls] return str(sorted(new_doc_urls))
c89de44daec117c5e09870e4381993e08ef1b933
147,857
def check(grad_honests, f_real, defense, factor=-16, negative=False, **kwargs): """ Check parameter validity for this attack template. Args: grad_honests Non-empty list of honest gradients f_real Number of Byzantine gradients to generate defense Aggregation rule in use to defeat ... Ignored keyword-arguments Returns: Whether the given parameters are valid for this attack """ if not isinstance(grad_honests, list) or len(grad_honests) == 0: return "Expected a non-empty list of honest gradients, got %r" % (grad_honests,) if not isinstance(f_real, int) or f_real < 0: return "Expected a non-negative number of Byzantine gradients to generate, got %r" % (f_real,) if not callable(defense): return "Expected a callable for the aggregation rule, got %r" % (defense,) if not ((isinstance(factor, float) and factor > 0) or (isinstance(factor, int) and factor != 0)): return "Expected a positive number or a negative integer for the attack factor, got %r" % (factor,) if not isinstance(negative, bool): return "Expected a boolean for optional parameter 'negative', got %r" % (negative,)
59b11df0809cf2de7e35e23c6b9416adad3202da
425,255
import torch def smooth_weights(class_freqs: torch.Tensor, smoothing: float = 0.15, clip: float = 10.0, normalize: bool = True) -> torch.Tensor: """Compute smoothed weights starting from class frequencies (pixel counts). Args: class_freqs (torch.Tensor): tensor with shape (num_classes,) smoothing (float, optional): smoothing factor. Defaults to 0.15. clip (float, optional): maximum value before clipping. Defaults to 10.0. normalize (bool, optional): whether to map them to range [0, 1]. Defaults to True. Returns: torch.Tensor: weights inversely proportial to frequencies, normalized if required """ # the larger the smooth factor, the bigger the quantities to sum to the remaining counts (additive smoothing) freqs = class_freqs.float() + class_freqs.max() * smoothing # retrieve the (new) max value, divide by counts, clip to max. value weights = torch.clamp(freqs.max() / class_freqs, min=1.0, max=clip) if normalize: weights /= weights.max() return weights
75bdb480c158632dfd418582d77f3c54c3b7af8a
288,691
import torch def all_comb(X, Y): """ Returns all possible combinations of elements in X and Y. X: (n_x, d_x) Y: (n_y, d_y) Output: Z: (n_x*x_y, d_x+d_y) Example: X = tensor([[8, 8, 8], [7, 5, 9]]) Y = tensor([[3, 8, 7, 7], [3, 7, 9, 9], [6, 4, 3, 7]]) Z = tensor([[8, 8, 8, 3, 8, 7, 7], [8, 8, 8, 3, 7, 9, 9], [8, 8, 8, 6, 4, 3, 7], [7, 5, 9, 3, 8, 7, 7], [7, 5, 9, 3, 7, 9, 9], [7, 5, 9, 6, 4, 3, 7]]) """ assert len(X.size()) == 2 assert len(Y.size()) == 2 X1 = X.unsqueeze(1) Y1 = Y.unsqueeze(0) X2 = X1.repeat(1,Y.shape[0],1) Y2 = Y1.repeat(X.shape[0],1,1) Z = torch.cat([X2,Y2],-1) Z = Z.view(-1,Z.shape[-1]) return Z
525370a163c2c40593e6277f1f27befd1fc3be09
558,543
def surface_absorption(wave_len, wave_len_min, wave_len_max, abs_surface): """Determines probability of absorption as a function of wavelength at a particular boundary. Parameters ---------- wave_len : float Bundle wavelength wave_len_min : float Minimum wavelength absorbed by matrix wave_len_max : float Maximum wavelength absorbed by matrix abs_surface : DataFrame Probability of absorption of a surface as a function of wavelength Returns ------- probability : float Probability that a bundle is absorbed Notes ----- surface_absorption should be renamed to boundary_absorption """ probability = 0 if wave_len >= wave_len_min and wave_len <= wave_len_max: probability = abs_surface.__call__(wave_len) return probability
90cc70051f6977bdffa42e026d1a46f6d2575ba3
478,333
def calc_clf(rs, rso): """ Crawford and Duchon (1999) cloud fraction. Parameters ---------- rs : numpy ndarray Total incoming shortwave solar radiation, in MegaJoules per square meter per day rso : numpy ndarray Clear-sky solar radiation. """ clf = 1.0 - (rs / rso) # From Crawford and Duchon (1999): # Calculated values of clf less than zero were adjusted back to # zero so as to be physically realistic. clf[clf < 0] = 0 return clf
47b73001f05e06bb7b01a25338c54b8398e6cf3c
376,013
def get_sources(dataframe): """ extract sources :param pandas.core.frame.DataFrame dataframe: :rtype: set :return: set of archive.org links """ sources = set() for index, row in dataframe.iterrows(): sources.update(row['incident_sources'].keys()) return sources
468c0cf6428833c9b05c06415917a516471189a5
700,385
def load_RIRE_ground_truth(file_name): """ Load the point sets defining the ground truth transformations for the RIRE training dataset. Args: file_name (str): RIRE ground truth file name. File format is specific to the RIRE training data, with the actual data expectd to be in lines 15-23. Returns: Two lists of tuples representing the points in the "left" and "right" coordinate systems. """ with open(file_name, "r") as fp: lines = fp.readlines() l = [] r = [] # Fiducial information is in lines 15-22, starting with the second entry. for line in lines[15:23]: coordinates = line.split() l.append( (float(coordinates[1]), float(coordinates[2]), float(coordinates[3])) ) r.append( (float(coordinates[4]), float(coordinates[5]), float(coordinates[6])) ) return (l, r)
9c7747b6fad1a10fb8cbb32162a3423e31fa40f3
27,368
def print_last_word(words): """Prints the last word after popping it off.""" word = words.pop(-1) print(word) # Poop out result = None return result
35b24f63921c46e8f075a0f85fe0bd6d540b2a37
398,375
def _calc_padding_for_alignment(align, base): """ Returns byte padding required to move the base pointer into proper alignment """ rmdr = int(base) % align if rmdr == 0: return 0 else: return align - rmdr
473876dc24eefadfc174c5c3cb05820085d2e1d5
455,332
def expand_onPremisesExtensionAttributes(entry): """ entry - a dictionary that must have the "onPremisesExtensionAttributes" key defined This transformer takes a dictionary, and returns the same dictionary, but with the "onPremisesExtensionAttributes" value expanded into it's own key: value pair. This value must initially be a JSON. """ for key, value in entry.get("onPremisesExtensionAttributes", {}).items(): entry[key] = value entry.pop("onPremisesExtensionAttributes", None) return entry
3bcf88ff68fe92588f42ad27018a31d4935161d7
159,224
def splitclass(classofdevice): """ Splits the given class of device to return a 3-item tuple with the major service class, major device class and minor device class values. These values indicate the device's major services and the type of the device (e.g. mobile phone, laptop, etc.). If you google for "assigned numbers bluetooth baseband" you might find some documents that discuss how to extract this information from the class of device. Example: >>> splitclass(1057036) (129, 1, 3) >>> """ if not isinstance(classofdevice, int): try: classofdevice = int(classofdevice) except (TypeError, ValueError): raise TypeError("Given device class '%s' cannot be split" % \ str(classofdevice)) data = classofdevice >> 2 # skip over the 2 "format" bits service = data >> 11 major = (data >> 6) & 0x1F minor = data & 0x3F return (service, major, minor)
37c19ab17293b4fd0c46cff24c30e349459f7bd0
707,830
def as_lines(content): """This function splits given ``content`` into lines if it's a string or returns it as is if it's a list. Args: content: String or list of strings. Returns: List of strings. """ if isinstance(content, list): return content return content.split('\n')
129d95a35e3e74837d9c168df5bcd4e6c7271d5e
598,013
import random def _get_user_agent() -> str: """Get a random User-Agent strings from a list of some recent real browsers Parameters ---------- None Returns ------- str random User-Agent strings """ user_agent_strings = [ "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.10; rv:86.1) Gecko/20100101 Firefox/86.1", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:86.1) Gecko/20100101 Firefox/86.1", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:82.1) Gecko/20100101 Firefox/82.1", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:86.0) Gecko/20100101 Firefox/86.0", "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:86.0) Gecko/20100101 Firefox/86.0", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.10; rv:83.0) Gecko/20100101 Firefox/83.0", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:84.0) Gecko/20100101 Firefox/84.0", ] return random.choice(user_agent_strings)
bad887c3c3ee66bdb70f7f4fda4a925df0afb12d
122,239
def is_numeric(lit): """ value of numeric: literal, string, int, float, hex, binary From http://rosettacode.org/wiki/Determine_if_a_string_is_numeric#Python """ # Empty String if len(lit) <= 0: return lit # Handle '0' if lit == '0': return 0 # Hex/Binary if len(lit) > 1: # sometimes just '-' means no data... litneg = lit[1:] if lit[0] == '-' else lit if litneg[0] == '0': if litneg[1] in 'xX': return int(lit, 16) elif litneg[1] in 'bB': return int(lit, 2) else: try: return int(lit, 8) except ValueError: pass # Int/Float/Complex try: return int(lit) except ValueError: pass try: return float(lit) except ValueError: pass try: return complex(lit) except ValueError: pass return lit
75f49677274e800af93fd9f518f05d72e8f85c6c
313,890
def sql_file(tmp_path): """ Construct a file containing a SQL statement """ directory = tmp_path / "sql" directory.mkdir() file = directory / "sql.txt" file.write_text("SELECT\n 1 as foo;") return file.absolute()
e5adb73bc24519ab7547aa2ca897bcca70e4aaca
653,561
def gen_file_name(output_path, title): """Generates the name of the PDF-file from the "doc title" in the json file. Args: output_path (string): relative output path title (string): title of the file according to content.json Returns: string: file_name of the pdf-file """ file_name = output_path + title + ".pdf" return file_name
031e5fd78e0958c3de711c33e0357f1034a5e02d
64,598
def separate_hourly_vals(hourstring): """Separate individual hourly field means from the string containing all 24 values in the WDC file. Called by wdc_parsefile. Args: hourstring (str): string containing the hourly magnetic field means parsed from a WDC file for a single day. Returns: hourly_vals_list (list): list containing the hourly field values. """ n = 4 hourly_vals_list = [hourstring[i:i+n] for i in range(0, len(hourstring), n)] return hourly_vals_list
0f9cca7102f583ca747ba4928fcb3cec4d996a99
407,572
def get_child_object_data(raw_data): """ Returns key/value pairs that describe child objects in raw data Args: raw_data (dict): Returns: dict: """ return {k: v for k, v in raw_data.items() if k.startswith("[")}
1ea57f62592de0ed17d9f45628ecd3d87313d654
453,077
def _GetMuteConfigIdFromFullResourceName(mute_config): """Gets muteConfig id from the full resource name.""" mute_config_components = mute_config.split("/") return mute_config_components[len(mute_config_components) - 1]
33b5c5598a156768dc4c87ddf5cc77d08cb6766e
87,112
def list_contains(list1, list2): """Return True if any item in `list1` is also in `list2`.""" for m in list1: for n in list2: if n == m: return True return False
37668fe6641f3c7c575c4b82dd6c3d1d4dfda974
265,023
import collections def count_tokens(samples): """Count tokens in the data set.""" token_counter = collections.Counter() for sample in samples: for token in sample: if token not in token_counter: token_counter[token] = 1 else: token_counter[token] += 1 return token_counter
416d3450b25c3e2d1b9ab10914b45f5f848a6608
193,162
import csv def get_consensus_sequence(seq_info): """ Determine the consensus sequence of an alignment. Definition of consensus: most common base represented at that position. """ def output_position_matrix(position_matrix): output_file = open('position_matrix.csv', 'w', newline='') keys = position_matrix[0].keys() csvwriter = csv.DictWriter(output_file, keys) csvwriter.writeheader() csvwriter.writerows(position_matrix) output_file.close() consensus_sequence = [] position_matrix = [] for position in seq_info: #Ignore any ambiguous basecalls - accept A, T, C, G, and 'gap' base_counts = { 'A':position['bases'].count('a'), 'T':position['bases'].count('t'), 'C':position['bases'].count('c'), 'G':position['bases'].count('g'), '-':position['bases'].count('-'), } #print(base_counts) max_basecalls = [key for key, count in base_counts.items() if count == max(base_counts.values())] if len(max_basecalls) == 1: consensus_sequence.append(max_basecalls[0]) else: consensus_sequence.append('N') #Assembling position_matrix position_matrix.append(base_counts) #Output position matrix output_position_matrix(position_matrix) return ''.join(consensus_sequence)
d0c0d8ab0ebe5686514d8769bf7e00df5dd92a36
551,998
def get_repository_from_image(image): """ Returns the first RepoTag repository found in image. """ repotags = image.attrs['RepoTags'] for repotag in repotags: repository, tag = repotag.split(':') return repository
1fd10666caea62d2412639b64ac573cefbd77a12
316,680
def poll_for_valid_message(consumer): """ Polls the subscribed topics by the consumer and checks the buffer is not empty or malformed. :param consumer: The consumer object. :return: The message object received from polling. """ msg = consumer.poll() assert not msg.error() return msg
50e7e48cf24427101e1db52854b540ebb2d60f9f
570,472
def evaluate(pred_joins, gt_joins): """ Evaluate the performance of fuzzy joins Parameters ---------- pred_joins: list A list of tuple pairs (id_l, id_r) that are predicted to be matches gt_joins: The ground truth matches Returns ------- precision: float Precision score recall: float Recall score f1: float F1 score """ pred = {(l, r) for l, r in pred_joins} gt = {(l, r) for l, r in gt_joins} tp = pred.intersection(gt) precision = len(tp) / len(pred) recall = len(tp) / len(gt) f1 = 2 * precision * recall / (precision + recall) return precision, recall, f1
c8c43a7d1d0905f10f21395d162b2202186989f4
225,030
def get_grouped_by_powers(bases, powers): """ Groups the powers and bases in the given `~astropy.units.CompositeUnit` into positive powers and negative powers for easy display on either side of a solidus. Parameters ---------- bases : list of `astropy.units.UnitBase` instances powers : list of ints Returns ------- positives, negatives : tuple of lists Each element in each list is tuple of the form (*base*, *power*). The negatives have the sign of their power reversed (i.e. the powers are all positive). """ positive = [] negative = [] for base, power in zip(bases, powers): if power < 0: negative.append((base, -power)) elif power > 0: positive.append((base, power)) else: raise ValueError("Unit with 0 power") return positive, negative
add38c3024ef25c04bccdab6093a32370725f654
453,522
def ad_group_ids_are_unique(df): """ This function returns True iff ad_group_ids are unique (only show up once in the rows of df) """ return len(df[['ad_group_id']].drop_duplicates()) == len(df)
468834e05f7bb15282078fa0bb73ed82227fc18d
599,750
import json def loadSettings(save): """load ON, OFF and VOLUME_THRESHOLD settings from a file""" try: with open("./settings/" + save+".json", "r") as f: data = json.load(f) print("Loaded settings") return data["off"], data["on"], data["vol"] except Exception as e: raise OSError("Invalid setting name: " + save)
6010dc5b1e7e51d5a2da905c0906236090c8a2d1
295,198
def _function_iterator(graph): """Iterate over the functions in a graph. :rtype: iter[str] """ return ( node.function for node in graph )
aaff945176f3d5754a4381cb74ad5a660a298556
691,013
from typing import Iterable from typing import Any def join(items: Iterable[Any], s: str) -> str: """Join items (probably of an array). Example usage: myArray|join:',' """ return s.join(items)
30480c3582cd7e602a7d5108b820c573908b4c22
561,833
def win_ts_to_unix_epoch(high, low): """Convert Windows timestamp to POSIX timestamp. See https://goo.gl/VVX0nk Args: high (int): high 32 bits of windows timestamp. low (int): low 32 bits of windows timestamp. Returns: float """ return high * ((2 ** 32) / 1e9) + low / 1e9 - 11644473600
6f309ec4255dd8063814ad329984f0d80afd6b36
79,499
import torch def get_minibatch(t, y, nsub=None, tsub=None, dtype=torch.float64): """ Extract nsub sequences each of lenth tsub from the original dataset y. Args: t (np array [T]): T integration time points from the original dataset. y (np array [N,T,d]): N observed sequences from the original dataset, each with T datapoints where d is the dimension of each datapoint. nsub (int): Number of sequences to be selected from. If Nsub is None, then all N sequences are considered. tsub (int): Length of sequences to be returned. If tsub is None, then sequences of length T are returned. Returns: tsub (torch tensor [tsub]): Integration time points (in this minibatch) ysub (torch tensor [nsub, tsub, d]): Observed (sub)sequences (in this minibatch) """ # Find the number of sequences N and the length of each sequence T [N,T] = y.shape[:2] # If nsub is None, then consider all sequences # Else select nsub sequences randomly y_ = y if nsub is None else y[torch.randperm(N)[:nsub]] # Choose the starting point of the sequences # If tsub is None, then start from the beginning # Else find a random starting point based on tsub t0 = 0 if tsub is None else torch.randint(0,1+len(t)-tsub,[1]).item() # pick the initial value tsub = T if tsub is None else tsub # Set the data to be returned tsub, ysub = torch.from_numpy(t[t0:t0+tsub]).type(dtype), torch.from_numpy(y_[:,t0:t0+tsub]).type(dtype) return tsub, ysub
7773b86be5966306c3d33827ef49353b9525f8bf
459,783
def build_dependency_list(deps, version_prefix=''): """Build a list of dependency specifiers from a dependency map. This can be used along with :py:data:`package_dependencies`, :py:data:`npm_dependencies`, or other dependency dictionaries to build a list of dependency specifiers for use on the command line or in :file:`setup.py`. Args: deps (dict): A dictionary of dependencies. Returns: list of unicode: A list of dependency specifiers. """ new_deps = [] for dep_name, dep_details in deps.items(): if isinstance(dep_details, list): new_deps += [ '%s%s%s; python_version%s' % (dep_name, version_prefix, entry['version'], entry['python']) for entry in dep_details ] else: new_deps.append('%s%s%s' % (dep_name, version_prefix, dep_details)) return sorted(new_deps, key=lambda s: s.lower())
910a250a45a21025a47ff2695f11b56d1cee910f
170,646
def get_snapping_pos_line_chart_vertical(data_objects, line_pos): """ Calculates the nearest pos of and objects from a line chart with an vertical line. :param data_objects: The nodes of the data objects in the chart. :type data_objects: dict[str, PolyLineNode] :param line_pos: The necessary position of the node of the line according to the orientation. :type line_pos: float :return: The nearest pos (x) of an data object in the given chart. :rtype: float """ nearest_pos = [] for node in data_objects.itervalues(): nearest_pos.append(sorted(node.pos, key=lambda pos: abs(line_pos - pos[0]))[0]) return sorted(nearest_pos, key=lambda pos: abs(line_pos - pos[0]))[0][0]
bb0db8aa9280d21119fd07d85b3046453fa4ffda
641,467
import collections def read_csv(path): """Read data from a CSV file. Parameters ---------- path : str Path to the CSV file produced by the `ChessAnalysis` program. Returns ------- data : dict Dictionary mapping player names to a sequence of rating values. """ data = collections.defaultdict(list) with open(path) as f: next(f) # First line is header. for line in f: name, year, mu, sigma = line.strip().split(",") data[name].append((int(year), float(mu), float(sigma))) return data
2fc9dee72997eebbbfdd8e1958b8a7d6ce60b265
287,851
def transposed(matrix): """Returns the transpose of the given matrix.""" return [list(r) for r in zip(*matrix)]
30835a1583f365b558c39e8fd1b459e79e85448e
638,027
import torch def get_backward_segment(backward_rnn_output, s, e, device): """Gets span representation in backward rnn Arguments: forward_rnn_output {tensor} -- backward rnn output s {int} -- span start e {int} -- span end device {int} -- device Returns: tensor -- span representaion vector """ seq_len, hidden_size = backward_rnn_output.size() if s >= e: vec = torch.zeros(hidden_size, dtype=backward_rnn_output.dtype) if device > -1: vec = vec.cuda(device=device, non_blocking=True) return vec if e == seq_len: return backward_rnn_output[s] return backward_rnn_output[s] - backward_rnn_output[e]
11189a690b1fb9b0420657882a47093f1099589b
598,995
import math def degToRadian(angle): """Convert angle from degrees to radians""" return angle*(math.pi/180)
fcac824c69ea56e58702ead320293e760a10fe2c
215,039
def get_responders(players, suggester): """ get the responders (in the correct order) for the given suggester """ si = players.index(suggester) return players[si+1:] + players[:si]
b64fb803c3c95ddf9080ca5bbaaf902cd61e45c2
430,998
from typing import Optional import logging def get_existing_handlers(handlertype) -> Optional[logging.Handler]: """ Returns Existing handler or None (if the handler has not yet been added to the root handlers). """ return next((h for h in logging.root.handlers if isinstance(h, handlertype)), None)
b5cdfbf20133fcc7629c3291f1111fe353b067af
694,434
def constraints_violated(constraints): """ :param constraints: constraints to be evaluated :return [0]: True if there are any constraints that are violated, false otherwise :return [1]: Maximum violation if one or more constraints exist, else None :return [2]: Name of the maximum constraint violated if one or more constraints exist, else None """ n = [] v = [] violated = False for constraint_name, constraint in constraints.items(): n.append(constraint_name) v.append(constraint.violation) if constraint.violated: violated = True if not v: return False, None, None return violated, max(v), n[v.index(max(v))]
a9274c2f5de0eada038b8c4068acf3a510dc0146
107,653
def user_known(env, user): """Returns whether the user has ever been authenticated before.""" for row in env.db_query(""" SELECT 1 FROM session WHERE authenticated=1 AND sid=%s """, (user,)): return True return False
97238304979f670fba5e3b7f2c09512e40db1f8e
374,202
def xor(bytes1, bytes2): """ Xor two input bytes and return Input: `bytes1`, `bytes2`: bytes need to xor. Output: xor result """ return bytes([a ^ b for a, b in zip(bytes1, bytes2)])
ac898cf412bb48482d2919a213934262e9d7ecda
649,416
def parse_filename(filename): """Parse python and platform according to PEP 427 for wheels.""" stripped_filename = filename.strip(".whl") try: proj, vers, build, pyvers, abi, platform = stripped_filename.split("-") except ValueError: # probably no build version available proj, vers, pyvers, abi, platform = stripped_filename.split("-") return (pyvers, platform)
3f301b0939ffbb5886e827d6b7fd15a96e68d0fc
353,551
def get_friendly_name(name: str) -> str: """Get a friendly version of a name.""" return name.replace("_", " ").title()
f66c5c8205e973d3d0c0cf0ed3acf3ea0a99e840
146,509
def crowd_comparison(p, q): """Crowded Comparison Operator. Return 1 means p is better than q. Return -1 means q is better than p. """ if p.rank < q.rank or (p.rank == q.rank and p.distance > q.distance): return 1 elif p.rank > q.rank or (p.rank == q.rank and p.distance < q.distance): return -1 else: return 0
ae7f484feb319477b5d90476f9e4440c551041fe
484,200
def standard_rb(x, baseline, amplitude, decay): """ Fitting function for randomized benchmarking. :param numpy.ndarray x: Independent variable :param float baseline: Offset value :param float amplitude: Amplitude of exponential decay :param float decay: Decay parameter :return: Fit function """ return baseline + amplitude * decay ** x
60d67143e16307ebd2fa8581d269570590ae6774
651,705
from typing import Callable def abstract(method: Callable) -> Callable: """Marks a method as abstract""" def wrapper(*args, **kwargs): raise NotImplementedError(f"Missing required method {repr(method)}") return wrapper
70462cb5232b6b3ad2ac56b19d6da65f9eb64e9d
286,143
def split_obs(obs): """Split a dict obs into state and images.""" return obs['state'], obs['img']
ebb043f2b75c2a9e12883ef8fe49595c3a751483
38,450
import re def parse_event_fields(lines, idx, event_dict): """ Parses lines from a proto file that contain an event definition and stores it in event_dict """ fields = [] end_of_event = False # record all fields in event definition. # note: we don't check if there's a leading brace. while not end_of_event and idx < len(lines): line = lines[idx].rstrip() idx += 1 # ex 1: uint32_t numSampleCLZExecuted; // number of sample_cl_z instructions executed # ex 2: char reason[256]; // size of reason match = re.match(r'^(\s*)([\w\*]+)(\s+)([\w]+)(\[\d+\])*;\s*(\/\/.*)*$', line) # group 1 - # group 2 type # group 3 - # group 4 name # group 5 [array size] # group 6 //comment if match: field = { "type": match.group(2), "name": match.group(4), "size": int(match.group(5)[1:-1]) if match.group(5) else 1, "desc": match.group(6)[2:].strip() if match.group(6) else "", } fields.append(field) end_of_event = re.match(r'(\s*)};', line) event_dict['fields'] = fields event_dict['num_fields'] = len(fields) return idx
f7c229d4a0315e3e8a7d0adbf5b91a4105f36592
614,839
def filename_dist(dist): """ Return the filename of a distribution. """ if hasattr(dist, 'to_filename'): return dist.to_filename() else: return dist
3791f8b03b254108a2a97bc725fd2802187367bd
655,482
import logging def create_ssh_key(nova_client, keypair_name, replace=False): """Create ssh key. :param nova_client: Authenticated nova client :type nova_client: novaclient.v2.client.Client :param keypair_name: Label to apply to keypair in OpenStack. :type keypair_name: str :param replace: Whether to replace the existing keypair if it already exists. :type replace: str :returns: The keypair :rtype: nova.objects.keypair """ existing_keys = nova_client.keypairs.findall(name=keypair_name) if existing_keys: if replace: logging.info('Deleting key(s) {}'.format(keypair_name)) for key in existing_keys: nova_client.keypairs.delete(key) else: return existing_keys[0] logging.info('Creating key %s' % (keypair_name)) return nova_client.keypairs.create(name=keypair_name)
8b62e2378145cd9473530a390e97dd45e40a78ab
255,236
def full_email(service_account): """Generate the full email from service account""" return "{0}@{1}.{2}".format(service_account.name, service_account.project, service_account.suffix)
8b5305f794fd59b24adfefca1338db594fb799bc
33,897
def str2list(string): """Convert a string with comma separated elements to a python list. Parameters ---------- string: str A string with comma with comma separated elements Returns ------- list A list. """ string_list = [str_.rstrip(" ").lstrip(" ") for str_ in string.split(",")] return string_list
5563ef02e27f30a3316164c48148db62735a7469
372,242
def get_thirdparty_plugin(main_window, plugin_title): """Get a reference to the thirdparty plugin with the title given.""" for plugin in main_window.thirdparty_plugins: if plugin.get_plugin_title() == plugin_title: return plugin
cfa5bf6e0933fa253ed97e0efb5f3c61e466b430
131,350
def lopen_loc(x): """Extracts the line and column number for a node that may have an opening parenthesis, brace, or bracket. """ lineno = x._lopen_lineno if hasattr(x, "_lopen_lineno") else x.lineno col = x._lopen_col if hasattr(x, "_lopen_col") else x.col_offset return lineno, col
75545bb527dac4ab0ceffffbbd2c3f028ac4f898
666,509
import json def open_vocab(vocab_path): """ Opens the json containing the vocabulary used for the word2vec Args: vocab_path : str the path where the vocab json file is stored Returns: vocab_dic : dic the vocab dictionnary """ with open(vocab_path, "r", encoding="utf-8") as vo: vocab_dic = json.load(vo, encoding="utf-8") return vocab_dic
4ff7d66bc53df99581c2e06f7f64aa13978c676d
488,345
def CombineMetrics(loss_metric_weight_pairs): """Combines metrics from `loss_metric_weight_pairs` according to weights. Keys must either exist in all metrics, in which it will be processed as a weighted sum, or exist in only one metrics, in which case it will be copied. Args: loss_metric_weight_pairs: a list of (metrics, weight) pairs, where each weight is a float and each metrics is a dict with str keys and (metric_value, target_weight) values. Returns: A dict with the same set of keys as input metrics and values of (weighted_sum(metric_value), weighted_sum(target_weight)). Raises: ValueError: if there exists a metric that exists in more than one element of `loss_metric_weight_pairs` but not in all of them. """ all_keys = set( [k for loss_metrics, _ in loss_metric_weight_pairs for k in loss_metrics]) # pylint: disable=g-complex-comprehension result = {} for k in all_keys: count = 0 for loss_metrics, weight in loss_metric_weight_pairs: if k in loss_metrics: count += 1 if count > 1 and count != len(loss_metric_weight_pairs): raise ValueError('Found metric %s which exists in more than one' 'but not all loss metrics.' % k) total_val = 0 total_target_weight = 0 for loss_metrics, weight in loss_metric_weight_pairs: if k in loss_metrics: val, target_weight = loss_metrics[k] if count == 1: # Single metric, don't multiply by weight. total_val = val * target_weight total_target_weight = target_weight else: # Total weighted sum of all predictions. total_val += weight * val * target_weight total_target_weight += weight * target_weight result[k] = (total_val / total_target_weight, total_target_weight) return result
f1859a6a250dcc911cd654cac9ea8aeaa0e73deb
409,574
def transform_point(point, matrix): """Transform point by matrix. :param list point: 2-item list :param list matrix: 6-item list representing transformation matrix :returns list: 2-item transformed point """ x, y = point a, b, c, d, e, f = matrix # This leaves out some unnecessary stuff from the fact that the matrix is # homogenous coordinates. new_x = x * a + y * c + e new_y = x * b + y * d + f return [new_x, new_y]
92c16f1698db3e9b2d754aefb0b44a8bef2cf783
59,670
from typing import List def match_shape(tensor, *expected_shape): """Compare the given tensor's shape with what you expect it to be. This function serves two goals: it can be used both to assert that the size of a tensor (or part of it) is what it should be, and to query for the size of the unknown dimensions. The former result can be achieved with: >>> match_shape(t, 2, 3, 4) which is similar to >>> assert t.size() == (2, 3, 4) except that it doesn't use an assert (and is thus not stripped when the code is optimized) and that it raises a TypeError (instead of an AssertionError) with an informative error message. It works with any number of positional arguments, including zero. If a dimension's size is not known beforehand pass a -1: no check will be performed and the size will be returned. >>> t = torch.empty(2, 3, 4) >>> match_shape(t, 2, -1, 4) 3 >>> match_shape(t, -1, 3, -1) (2, 4) If the number of dimensions isn't known beforehand, an ellipsis can be used as a placeholder for any number of dimensions (including zero). Their sizes won't be returned. >>> t = torch.empty(2, 3, 4) >>> match_shape(t, ..., 3, -1) 4 """ if not all(isinstance(d, int) or d is Ellipsis for d in expected_shape): raise RuntimeError( "Some arguments aren't ints or ellipses: %s" % (expected_shape,)) actual_shape = tensor.size() error = TypeError("Shape doesn't match: (%s) != (%s)" % ( ", ".join("%d" % d for d in actual_shape), ", ".join("..." if d is Ellipsis else "*" if d < 0 else "%d" % d for d in expected_shape)), ) if Ellipsis not in expected_shape: if len(actual_shape) != len(expected_shape): raise error else: if expected_shape.count(Ellipsis) > 1: raise RuntimeError("Two or more ellipses in %s" % (tuple(expected_shape),)) if len(actual_shape) < len(expected_shape) - 1: raise error pos = expected_shape.index(Ellipsis) expected_shape = (expected_shape[:pos] + actual_shape[pos:pos + 1 - len(expected_shape)] + expected_shape[pos + 1:]) unknown_dims: List[int] = [] for actual_dim, expected_dim in zip(actual_shape, expected_shape): if expected_dim < 0: unknown_dims.append(actual_dim) continue if actual_dim != expected_dim: raise error if not unknown_dims: return None if len(unknown_dims) == 1: return unknown_dims[0] return tuple(unknown_dims)
2206343129ce9b1078bcab567f3a49a66b64ecde
600,243
import time def do_work(task): """ Sample task evaluation. Returns a result as string and an optional log message.""" time.sleep(2) return "result", "Another information"
3e354a7e3547428f8ab73832278838a120a9cd58
442,513
def flag(state, name, value: bool = True): """Set the state variable as a flag (boolean value)""" state.vars[name] = bool(value) return state
7d7f42b51a900f2de647ce36ccd13bc8ae67c0b3
80,251
import json def output_generic_from_traj(traj, out_file, name, f): """Creates and returns outputter function that dumps some data about atoms to json. The resulting file will be in the JSON Lines format, i.e. one JSON-document on each line. :param traj: traj object containing atoms objects. :param outfile: open file handle that the data should be appended to. :param str name: name of data, this will be the name of the json-field. :param function f: lambda used to extract some data from an atoms object. """ def output(): values = [f(atoms) for atoms in traj] data = { name: values } json.dump(data, fp=out_file) # Newline to generate JSON Lines data, one doc per line out_file.write('\n') return output
17a07cfcf13e6ef05a7a8895b30bb4432798b6f3
301,268
def is_in_period(month, period): """Return which months fall within a specified group of calendar months. Parameters ---------- month : int or iterable of ints One or a series of calendar month numbers [1..12]. period : tuple of ints Group of calendar month numbers to match against. These are usually consecutive, e.g., [3, 4, 5] for MAM, but they don't have to be. Returns ------- return : bool or iterable of bools True for month numbers that are in the group. """ try: return [m in period for m in month] except TypeError: return month in period
e973f1ec11ea4dc6b87834c75d6374bbbb152635
10,105
def _append_spc_date_to_storm_ids(primary_id_strings, spc_date_string): """Appends SPC date to each storm ID. N = number of storm objects :param primary_id_strings: length-N list of primary IDs. :param spc_date_string: SPC date (format "yyyymmdd"). :return: primary_id_strings: Same as input but with new IDs. """ return [ '{0:s}-{1:s}'.format(p, spc_date_string) for p in primary_id_strings ]
de5d54dfb322bdbf4ab7e261526b6e295ea1900c
103,427
def get_session(monitored_sess): """ Get Session object from MonitoredTrainingSession. """ session = monitored_sess while type(session).__name__ != 'Session': session = session._sess return session
a377a2aeeed1cc7f625f49693adc6b1b6ee203cf
96,268
def _sanitize_ipv4_mapping(ip_str): """ Sanitize IPv4 mapping in a expanded IPv6 address. This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10. If there is nothing to sanitize, returns an unchanged string. Args: ip_str: A string, the expanded IPv6 address. Returns: The sanitized output string, if applicable. """ if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'): # not an ipv4 mapping return ip_str hextets = ip_str.split(':') if '.' in hextets[-1]: # already sanitized return ip_str ipv4_address = "%d.%d.%d.%d" % ( int(hextets[6][0:2], 16), int(hextets[6][2:4], 16), int(hextets[7][0:2], 16), int(hextets[7][2:4], 16), ) result = ':'.join(hextets[0:6]) result += ':' + ipv4_address return result
ece3a84d69eb5eefed1ca4908ea03b095644e89d
552,015
def get_legacy_msg_type(identifier): """ Convert an SBP spec identifier to the message type #define identifier according to the legacy API Only works for real messages, will assert when called for any other type """ assert identifier[:4] == "MSG_" return "SBP_" + identifier
213f469458298c8e8ec4b9f12156cb60db9561f2
322,515
def epsi_vapor_top(Fr_top): """ Calculates the vapor content of bubble layer at the top of column Parameters ---------- Fr_top : float The Frudo criterion at the top of column, [dimensionless] Returns ------- epsi_vapor_top : float The vapor content of bubble layer at the top of column, [dimensionless] References ---------- Дытнерский, страница 207, формула 5.47 """ return Fr_top**0.5 / (1 + Fr_top**0.5)
30157399f659514ef2a041fa19e7902773581ed4
503,544
def _create_sitelink_campaign_extension_setting_mutate_operation( client, customer_id, campaign_id ): """Creates a MutateOperation for the sitelink campaign extension setting that will be removed. Args: client: an initialized GoogleAdsClient instance customer_id: the client customer ID. campaign_id: the campaign ID. Returns: The created MutateOperation for the sitelink campaign extension setting. """ extension_type_enum = client.enums.ExtensionTypeEnum # Construct the campaign extension setting resource name, in format: # customers/{customer_id}/campaignExtensionSettings/{campaign_id}~{extension_type} resource_name = client.get_service( "CampaignExtensionSettingService" ).campaign_extension_setting_path( customer_id, campaign_id, extension_type_enum.SITELINK.name ) # Create a MutateOperation for the campaign extension setting. mutate_operation = client.get_type("MutateOperation") mutate_operation.campaign_extension_setting_operation.remove = resource_name return mutate_operation
28dbaf410935b3e59c655c370aecd30ab0b465a3
288,776
def mult(value, arg): """Multiplies the value by the arg""" return int(value)*int(arg)
74afbb99fa473ffbe822ea9e44a54cc0ccb27677
453,038
def make_weights_for_balanced_classes(images, nclasses): """ Generates weights to get balanced classes during training. To be used with weighted random samplers. :param images: list of training images in training set. :param nclasses: number of classes on training set. :return: list of weights for each training image. """ count = [0] * nclasses for item in images: count[item[1]] += 1 weight_per_class = [0.] * nclasses N = float(sum(count)) for i in range(nclasses): weight_per_class[i] = N / float(count[i]) weight = [0] * len(images) for idx, val in enumerate(images): weight[idx] = weight_per_class[val[1]] return weight
9d48b07161c403543174548b90ebda7b04ce760f
603,965
def no_op(arg): """Dummy do nothing function""" return arg
185a29bbfbcf6598d88b6a934a5bbc47017710c2
319,459
from typing import List def read_docs(fnames: List[str]) -> List[str]: """ Reads in the documents. param fnames: List of paths to .txt files to read. returns: List of input documents. """ all_docs = [] for docfile in fnames: doc = open(docfile, 'r').read() all_docs.append(doc) return all_docs
26c8bd7c31ecfd84144ee5d180ae5d41353945f3
69,599
def update_trackers(frame,trackers,penalties=0,mark_new=True): """Update all the trackers using the new frame Args: frame ([type]): new frame trackers (List[TrackerObj]): List of trackers to update penalties (int, optional): Amount of penaltie. Defaults to 0. mark_new (bool, optional): Mark tracker as new or old, if it's old, later the bounding box will be reset to be more accurate using the detection box. Defaults to True. Returns: List[TrackerObj]: The updated list without destroyed trackers """ for trk in trackers: trk.update(frame) trk.new=mark_new trk.fails+=penalties return [tr for tr in trackers if not tr.destroy()]
275905dbffc57639caf8a99d55a686d2f53ddf5f
480,724