content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def modelBaseline_forward_pass(img_t, ImageEncModel, PathDecModel, training_state=False, **kwargs): """ Forward pass of the lyft basline. Args: img_t (tf.tensor / np.array): Tensor with input map batch, multi channel image shape: [batch, x, y, ch]. ImageEncModel (keras.model): Keras model for image encoding. PathDecModel (keras.model): Keras model for path decoding/generation. training_state (np.bool): True -> Training, False -> Inference. Returns: out_list (tf.tensor): Predicted path points. """ # Process input image img_feats = ImageEncModel(img_t, training = training_state) # Get Path out_list = PathDecModel(img_feats, training = training_state) return out_list
836d460eaab7a3bfc22ac45772f171171d5494b6
355,295
def parse_args(argument_parser, system_arguments): """Call `parse_args` on the argument parser. Parameters ---------- - argument_parser - the main argument parser for awsume - system_arguments - the arguments from the system Returns ------- The parsed arguments. """ return argument_parser.parse_args(system_arguments)
c4330256c9cd30fb0d68dc78cea73260d0069467
295,597
def read_csv_from_hdfs(spark, filename): """Reads a csv file from HDFS. Parameters ---------- filename filepath and name of csv file. Returns ------- PySpark DataFrame """ sdf = spark.read.csv(filename, header=True, inferSchema=True) return sdf
fdb6ec0fdaf48ccacfb831849f0c10646507ca63
440,226
import csv def import_trials_duration(path): """ Import some already existing local csv of times Parameters ---------- path : string the path that the file is located in Returns ------- list a list of times to be plotted or viewed """ with open(path, "r") as times_file: csv_object = csv.reader(times_file) times = [[float(time) for time in row] for row in csv_object] return times
567ea262acbf8b758bf8a6dcc4620fdc659f8e94
78,121
def jaccard_similarity(first, second): """ Given two sets, returns the jaccard similarity between them :param first: a set :param second: a set :return: the similarity as a double """ return len(first & second) / len(first | second)
0907882e315b39adf5e07a6bfe704cf93209c2a6
611,197
def root_to_leaf(input_weights, taxa_tree): """Return a dictionary mapping taxa to weights. Each output weight should be the sum of input weights for a taxa and all of its ancestor taxa up to the 'root' of the tree. Use `taxa_tree.ancestors(<taxon>)` to get a list of ancestors for taxon. """ output_weights = {} for taxon, weight in input_weights.items(): for ancestor in taxa_tree.ancestors(taxon): out_weight = output_weights.get(taxon, 0) output_weights[taxon] = out_weight + input_weights.get(ancestor, 0) return output_weights
ec115dd8efda1aac2410dac325ab0c2be993da0d
345,825
def format_currency(flt): """Return a formatted UK currency string from a float""" return '£{:,.2f}'.format(flt)
534f30069c3faf5f6fa5b53e3ea4b6c7fa2d6af9
58,506
import torch def evaluate_accuracy(data_iter, net, device=torch.device('cpu')): """Evaluate accuracy of a model on the given data set.""" net.eval() # Switch to evaluation mode for Dropout, BatchNorm etc layers. acc_sum, n = torch.tensor([0], dtype=torch.float32, device=device), 0 for X, y in data_iter: # Copy the data to device. X, y = X.to(device), y.to(device) with torch.no_grad(): y = y.long() acc_sum += torch.sum((torch.argmax(net(X), dim=1) == y)) n += y.shape[0] return acc_sum.item()/n
7566ca16176e9e44530790b791dbfcb80389af08
359,995
def CreateAvailableNumbers(size): """Fill a list with available numbers given the size""" #available_numbers = the list being constructed available_numbers = [] for x in range(1, size*size + 1): available_numbers.append(x) return available_numbers
a16c0ba4a8523a78f6df1acc0cf1a1f4c830a6e1
562,520
def find_subclasses(cls): """ Recursively find all subclasses of given class cls. """ subclasses = cls.__subclasses__() for subclass in list(subclasses): subclasses.extend(find_subclasses(subclass)) return list(set(subclasses))
de56e483f113df09ea2732185f01c30f0f05e833
244,507
def inner(x, y): """ Returns the inner product of two equal-length vectors. """ n = len(x) assert len(y) == n sum = 0 for i in range(0, n): sum += x[i]*y[i] return sum
36567d82c7c482311a40d9dab29b3ec80c772037
53,896
def pybulletenv_get_state(env): """ Function used to get state information from PyBullet physics engine. :param env: :return: """ body_num = env.env._p.getNumBodies() # body_info = [env.env._p.getBodyInfo(body_i) for body_i in range(body_num)] floor_id, robot_id = 0, 1 robot_base_pos_ori = env.env._p.getBasePositionAndOrientation(robot_id) robot_base_vel = env.env._p.getBaseVelocity(robot_id) joint_num = env.env._p.getNumJoints(robot_id) joint_state = [] for joint_i in range(joint_num): joint_state.append(env.env._p.getJointState(robot_id, joint_i)) state = {'body_num': body_num, 'robot_base_pos_ori': robot_base_pos_ori, 'robot_base_vel': robot_base_vel, 'joint_num': joint_num, 'joint_state': joint_state} return state
daf75efb08a90f9896ddd7b277b087a052386321
324,779
def filename_from_url(url): """ Similar to Django slugify http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename-in-python :param url: url to convert :return: a valid filename """ filename = "".join(i for i in url if i not in "\/:*?<>|") return filename
094b5053f52cb7c6d60ec21d7625d722493f2014
625,157
def AnalyzeScanResults(input_api, whitelisted_files, offending_files): """Compares whitelist contents with the results of file scanning. input_api: InputAPI of presubmit scripts. whitelisted_files: Whitelisted files list. offending_files: Files that contain 3rd party code. Returns: A triplet of "unknown", "missing", and "stale" file lists. "Unknown" are files that contain 3rd party code but not whitelisted. "Missing" are files that are whitelisted but doesn't really exist. "Stale" are files that are whitelisted unnecessarily. """ unknown = set(offending_files) - set(whitelisted_files) missing = [f for f in whitelisted_files if not input_api.os_path.isfile(f)] stale = set(whitelisted_files) - set(offending_files) - set(missing) return (list(unknown), missing, list(stale))
6722c25bbe49c0e4718a17a001961ea0b1afad50
219,995
def get_attribute(attrs, name, default=None): """ Get div attribute :param attrs: attribute dict :param name: name field :param default: default value :return: value """ if 'data-'+name in attrs: return attrs['data-'+name] else: return default
9f642ca6061037d2e7b922c26bce50180ee94e97
188,825
import math def sizeof_fmt(num, suffix='B'): """Get human-readable file size Args: num (int): Number of unit size. suffix (str): Unit of size (default: B). """ mag = int(math.floor(math.log(num, 1024))) val = num / math.pow(1024, mag) if mag > 7: return f"{val:.1f}Y{suffix}" else: return f"{val:3.1f}{['','K','M','G','T','P','E','Z'][mag]}{suffix}"
f1ad8405c485a58dd3099f6fe4a6ca945b8c265f
190,176
def bytelen(a): """Determine the length of a in bytes.""" if hasattr(a, "nbytes"): return a.nbytes elif isinstance(a, (bytearray, bytes)): return len(a) else: raise ValueError(a, "cannot determine nbytes")
131a7a43f74814a7fa1523cc086b21d9a6036884
536,925
def get_current_account_id(context): """ Get current account id. @param context: the context of the event @return: current account id """ return context.invoked_function_arn.split(":")[4]
d3f8fce71edf575af7aebd7c75a9cc71f2900f86
658,346
def nobind(func): """This function decorator will prevent the function from being bound.""" func.__qi_signature__ = "DONOTBIND" return func
8db94178e6e51dd324607c57dbeb62380a9fc4ba
444,222
def calc_num_beats(peaks): """Calculate the number of beats in ECG recording Args: peaks (list[int]): list with indexes of peaks of QRS complexes Returns: int: the number of peaks """ num_beats = len(peaks) return num_beats
46ca9bf1332bc5ec15505ddfb4577bd12aba27ae
251,975
def smallest_sums(partition:list, num_of_sums:int=1)->float: """ Given a partition, return the sum of the smallest k parts (k = num_of_sums) >>> smallest_sums([[1,2],[3,4],[5,6]]) 3 >>> smallest_sums([[1,2],[3,4],[5,6]], num_of_sums=2) 10 """ sorted_sums = sorted([sum(part) for part in partition]) return sum(sorted_sums[:num_of_sums])
dda9b846dd772a08936b205bc22d6b274d978166
415,912
def is_par(comm): """Check if or not in parallel environment """ return comm and comm.size > 1
07459a11b5fa3850364f724d5acf9c7c66e3a003
503,739
def euler_richardson_method_2system_ode2(f, g, dt, x, y, xp, yp, range): """ The Euler-Richardson method working on a two-coupled system. Required for chapter 6, problem 5 in order to express two coupled parameterized functions as a single output y(x). :param f: The first second order diffeq expressed as a lambda. :type f: lambda :param g: The second second order diffeq expressed as a lambda. :type g: lambda :param dt: Step size. Smaller is better. :type dt: float :param x: The initial condition for x. :type x: float,int :param y: The initial condition for y. :type y: float,int :param xp: The initial condition for xp. :type xp: float,int :param yp: The initial condition for yp. :type yp: float,int :param range: A list which specifies the beginning and the ending of our domain. :type range: list :return: Returns a tuple for the t,x,y,xp,yp values as lists. :rtype: 5-tuple(list) """ # f = x'' and g = y'' # both requires (t, x, y, x', y') # get initial conditions and setup arrays t = min(range) t_space = [t] x_space = [x] y_space = [y] xp_space = [xp] yp_space = [yp] while t <= max(range): # find get midpoints t_mid = t + (1/2)*dt xp_mid = xp + 1/2*f(t, x, y, xp, yp)*dt yp_mid = yp + 1/2*g(t, x, y, xp, yp)*dt x_mid = x + (1/2)*xp*dt y_mid = y + (1/2)*yp*dt # get slopes xp_s = f(t_mid, x_mid, y_mid, xp_mid, yp_mid) yp_s = g(t_mid, x_mid, y_mid, xp_mid, yp_mid) x_s = xp_mid y_s = yp_mid # update values t += dt x += x_s*dt y += y_s*dt xp += xp_s*dt yp += yp_s*dt # append values t_space.append(t) x_space.append(x) xp_space.append(xp) y_space.append(y) yp_space.append(yp) return (t_space, x_space, y_space, xp_space, yp_space)
93a49bd76a83694f918c6df3312a5a88b7f5f45f
514,829
def times_numeric(text): """ A function to convert timepoints encoded within the filename into a float corresponding to the value in nanoseconds. Parameters: text (str): e.g. (100us) Returns: float: e.g. (100,000) """ number = float(text[:-2]) if text.endswith("ns"): return number elif text.endswith("us"): return 1e3*number elif text.endswith("ms"): return 1e6*number else: print("scale could not be calculated")
c911ad104a72770318accce169657635db25c388
412,069
def make_tree(depth: int) -> tuple: """ Trees or tuples, final leaves have None as values. """ return (None, None) if depth == 0 else ( make_tree(depth - 1), make_tree(depth - 1))
440100ade6695ce70e09ef5eabb3e45ee306986f
547,859
def filter_slaves(slaves, filters): """ Filter slaves by attributes :param slaves: list of slaves to filter :param filters: list of functions that take a slave and return whether the slave should be included :returns: list of slaves that return true for all the filters """ if filters is None: return slaves return [s for s in slaves if all([f(s) for f in filters])]
a4e35d7dd56df233b009445493b9fe66fdb87160
280,024
def modNeg90To90(angle_deg): """Returns a modulo between -90 and 90""" return (angle_deg + 90)%180-90
f79d6bb1cba93b0d4bbbfcf6fd3c581b14da518c
670,200
def get_generic_specific_genes(summary_data, generic_threshold): """ This function returns a dictionary of generic genes and other (non-generic) genes, based on the statistics contained within the summary dataframes Here genes are determined as generic based on their ranking across multiple simulated experiments (i.e. generic genes are those that are high ranked = genes were found to be consistently changed across multiple simulated experiments. All other genes are 'other' Arguments --------- summary_data: df Dataframe containing gene summary statistics generic_threshold: int Threshold to use to define generic genes """ print(summary_data.shape) # Generic genes ls_generic_genes = list( ( summary_data[summary_data["Rank (simulated)"] >= generic_threshold] .set_index("Gene ID") .index ) ) print(f"No. of generic genes: {len(ls_generic_genes)}") # Other (non-generic) genes ls_other_genes = list( ( summary_data[summary_data["Rank (simulated)"] < generic_threshold] .set_index("Gene ID") .index ) ) print(f"No. of other genes: {len(ls_other_genes)}") # Create dictionary dict_genes = { "generic": ls_generic_genes, "other": ls_other_genes, } return dict_genes
590d67ebc2acb0fe84301989d938f859c0b26f7b
515,005
from typing import Optional def _serialize_comment(prefix: str, comment: Optional[str]) -> str: # pylint: disable=unsubscriptable-object """Serialize a comment, with an optional prefix.""" return "%s%s" % (prefix, comment) if comment else ""
99acb1b16f0f8628a516687cf88af8587c6d317c
640,259
def clean(var): """Removes tabs, newlines and trailing whitespace""" if var is None: return '' return var.replace("\t", "").replace("\n", "").strip()
7fe7d3006cc632a71b8839106a5ea6da0a00ad2d
118,751
def bond_constraints(system): """For a openmm system, map constrained atom pairs to distances.""" # this is ok bonds = {} nc = system.getNumConstraints() for c in range(nc): prm = system.getConstraintParameters(c) if len(prm) == 3: ia, ib, dd = prm ids = (ia, ib) if ib > ia else (ib, ia) bonds[ids] = dd return bonds
361bae83013217eb8bb1bbded8729c3f258615fb
527,135
def quoteattr(data): """Quote an attribute value. Escape less then the obvious xml.saxutils.quoteattr, e.g. do not convert `\n` to `&#10;`. """ return '"%s"' % data.replace('"', "&quot;")
566e8abf3cb2a94f9ab48afa30b6b458d89f0f21
456,769
def num_to_note(note_int: int, custom_map=None) -> str: """ Convert a musical pitch from integer representation to a string. "Merge" enharmonic equivalents, e.g. the integers for 'F#' and 'Gb' just become the string 'F#'. Args: note_int: The integer representation of a musical pitch. Returns: The corresponding string for the pitch. """ octave = str(note_int // 12) rev_note_map = { 0: 'C', 1: 'C#', 2: 'D', 3: 'D#', 4: 'E', 5: 'F', 6: 'F#', 7: 'G', 8: 'G#', 9: 'A', 10: 'A#', 11: 'B' } if custom_map is not None: rev_note_map.update(custom_map) note_str = rev_note_map[note_int % 12] + octave return note_str
3ff3b7bb7578ae28ab6f27abc7e7a29001444bbe
463,462
def with_first_last(xs): """Return a generator which indicates whether the returned element is the first or last. Args: xs: Generator to wrap. Yields: bool: Element is first. bool: Element is last. object: Element. """ state = {"first": True} def first(): if state["first"]: state["first"] = False return True else: return False prev = None have_prev = False cur = None have_cur = False for x in xs: cur = x have_cur = True if not have_prev: # We will need a `prev`, but there is no `prev` yet. Take the current one as # `prev` and skip to the next iteration. prev = cur have_prev = True continue # We currently have available `prev` and `cur`. We will return `prev` and, # after the loop has finished, return `cur` as the last one. yield first(), False, prev prev = cur if have_cur: yield first(), True, cur
2558b643e54388a35be01dbe2d1c3340f0667d35
294,577
def add_ner_prompts(df, prompt_config, sep): """ Combining sentences and entities to create prompts in Dataframe with 'sents' and 'entities' columns. Adds 'prompts' and 'empty_prompts' (prompt without answer) columns to DataFrame """ prompts = [] empty_prompts = [] prompt_sample_structure = prompt_config['sent_intro'] + ' {}\n' + prompt_config['retrieval_message'] + ' {}' empty_prompt_sample_structure = prompt_config['sent_intro'] + ' {}\n' + prompt_config['retrieval_message'] for i, row in df.iterrows(): sent = row['sents'] entities = sep.join(row['entities']) prompt = prompt_sample_structure.format(sent, entities) empty_prompt = empty_prompt_sample_structure.format(sent) prompts.append(prompt) empty_prompts.append(empty_prompt) df['prompts'] = prompts df['empty_prompts'] = empty_prompts return df
a43f4f255872a139b3273ec3322699105d91806c
111,133
def get_db_syllable_side(node, matcher): """ Get which side is suitable according to syllable matching. This function expect node to have both syllable neighbours. Return minus int if char node should be added to left neighbour, plus int if to right, 0 if not determined. """ if node.prev.syllable is None or node.next.syllable is None: raise Exception('Char node does not have both syllable neighbours') left_opt_sylls = node.prev.syllable.left_neighbours(node) + node.next.syllable.right_neighbours() right_opt_sylls = node.prev.syllable.left_neighbours() + node.next.syllable.right_neighbours(node) left_score = matcher.syllable_concord(left_opt_sylls) right_score = matcher.syllable_concord(right_opt_sylls) return right_score - left_score
5f08984b1805f3067b8e9483e0b33b3b19465ee7
595,589
def get_resources(data): """Gets resources from the input cloutformation file content.""" return data['Resources']
c5dc1e57fc88b2b0f95e055b2077422bee7935b1
380,957
def version_tuple_to_str(version, sep='.'): """Join the version components using '.' and return the string.""" return sep.join([str(x) for x in version])
2170b33a66762666e3e9e2ad2af3c5a5bec255e9
431,870
import sqlite3 def commit_data(conn): """Commit data to db""" try: conn.commit() conn.close() except sqlite3.Error as e: print(e) return None
5107563c659c0acdd9d2d59c526284ffe38a4740
679,763
import colorsys def scale_lightness(rgb, scale_l): """ Scales the lightness of a color. Takes in a color defined in RGB, converts to HLS, lightens by a factor, and then converts back to RGB. """ # converts rgb to hls h, l, s = colorsys.rgb_to_hls(*rgb) # manipulates h, l, s values and returns as rgb return colorsys.hls_to_rgb(h, min(1, l * scale_l), s = s)
2fee635f26419cfe8abc21edb0092a8c916df6ef
13,661
from typing import Dict from typing import Any def convert_dict_to_yaml(input_dict: Dict[str, Any], indent_spaces: int = 4, indent_level: int = 0) -> str: """ The original yaml.dump needed improvements, this is a recursive re-implementation yaml.dump(config_dict) Args: input_dict: Dict to be converted to yaml. indent_spaces: How many spaces per indent level. indent_level: Current indent level for the recursion. Returns: YAML string. """ # setup key-value collector and indent level ret_list = [] indent = " " * (indent_level * indent_spaces) # loop input dict for key, value in input_dict.items(): # setup collector for single key-value pair single_ret_list = [f"{indent}{key}:"] # check type if isinstance(value, bool): # bools as lower-case value_str = str(value).lower() elif isinstance(value, (int, float)): # leave float conversion to python value_str = str(value) elif isinstance(value, str): # put quotes around strings value_str = f"\"{value}\"" elif value is None: # None is null in yaml value_str = "null" elif isinstance(value, dict): # iterate dictionaries recursively value_str = "\n" + convert_dict_to_yaml(value, indent_spaces=indent_spaces, indent_level=indent_level + 1) else: raise ValueError(f"dict to yaml, value type not understood: {value}") # concatenate the single key-value pair and add it to the key-value collector single_ret_list += [f" {value_str}"] ret_list += ["".join(single_ret_list)] # join the collected key-value pairs with newline return "\n".join(ret_list)
7dff030116522084089c497cab82967bbaed8b5e
83,674
from urllib.parse import urljoin, urlsplit def make_absolutizing_url_mutator(baseurl): """Return a function that makes relative URLs absolute. Parameters ---------- baseurl : string, absolute URL The absolute URL with which to combine relative URLs Returns ------- A mutator function suitable for use with :meth:`wwt_data_formats.abcs.UrlContainer.mutate_urls`. Notes ----- This function is designed for usage with :meth:`wwt_data_formats.abcs.UrlContainer.mutate_urls`. It returns a mutator function that can be passed to this method. The mutator will take relative URLs and make them absolute by combining them with the *baseurl* argument. Input URLs that are already absolute will be unchanged. """ def mutator(url): if not url: return url if urlsplit(url).netloc: return url # this URL is absolute return urljoin(baseurl, url) return mutator
b0068a6fdc9784a3322b24f1835edb480f81a19d
572,542
def decode_input(input_: str) -> list[int]: """Decode the puzzle input :param input_: puzzle input as a integer string :return: list of digits """ cups = list(map(int, input_)) assert len(cups) == len(set(cups)) return cups
970291b63ae56e057ce16a2a79e4a18cd91fc0a9
289,262
def _is_mfa_admin_creds(section): """ Filter for ~/.aws/credentials profile section names. """ if not section.startswith('admin-'): return False if section.endswith('-long-term') or section.endswith('-root'): return False return True
0204bb51e39b7b38adcb576b00c429ab6cbea400
348,761
def decode_bytes(data: bytes) -> str: """ Takes a bytes object and return a decoded version of the data. Must be reversible with `encode_string()`. """ return data.decode("utf-8")
01bde4df84f0020845910c406c383c79adabf633
200,985
def lzip(*args, **kwargs): """Take zip generator and make a list for Python 3 work""" return list(zip(*args, **kwargs))
c1ee69397464e053bcc606d86beae4dfd9620691
219,362
def mapping_fields(mapping, parent=[]): """ reads an elasticsearh mapping dictionary and returns a list of fields cojoined with a dot notation args: obj: the dictionary to parse parent: name for a parent key. used with a recursive call """ rtn_obj = {} for key, value in mapping.items(): new_key = parent + [key] new_key = ".".join(new_key) rtn_obj.update({new_key: value.get('type')}) if value.get('properties'): rtn_obj.update(mapping_fields(value['properties'], [new_key])) elif value.get('fields'): rtn_obj.update(mapping_fields(value['fields'], [new_key])) rtn_obj[new_key] = [rtn_obj[new_key]] + \ list(value['fields'].keys()) return rtn_obj
e196207b4f08dbb25adc92a0a4214804bb9827ce
317,051
def min_max(field, validator_class): """ Returns maximum minimum and minimum maximum value for given validator class of given field. :param field: WTForms Field object :param validator_class: WTForms Validator class Example:: class MyForm(Form): some_integer_field = IntegerField( validators=[Length(min=3, max=6), Length(min=4, max=7)] ) form = MyForm() min_max(form.some_integer_field, Length) # {'min': 4, 'max': 6} """ min_values = [] max_values = [] for validator in field.validators: if isinstance(validator, validator_class): if validator.min is not None: min_values.append(validator.min) if validator.max is not None: max_values.append(validator.max) data = {} if min_values: data['min'] = max(min_values) if max_values: data['max'] = min(max_values) return data
a0f70160346f929c6f4e0275570d6d65f2210c9b
421,434
def _s2_st_to_uv(component: float) -> float: """ Convert S2 ST to UV. This is done using the quadratic projection that is used by default for S2. The C++ and Java S2 libraries use a different definition of the ST cell-space, but the end result in IJ is the same. The below uses the C++ ST definition. See s2geometry/blob/c59d0ca01ae3976db7f8abdc83fcc871a3a95186/src/s2/s2coords.h#L312-L315 """ if component >= 0.5: return (1.0 / 3.0) * (4.0 * component ** 2 - 1.0) return (1.0 / 3.0) * (1.0 - 4.0 * (1.0 - component) ** 2)
a78fa1ac89f74b3aa9bfc0585d10f440e6392391
655,486
import calendar def dt2unix(dto): """ dt2unix : Convert a datetime object to a UNIX time stamp Usage: unixTS = dt2unix(dto) Input:dto A datetime object Output: a unix time stamp (int) """ return calendar.timegm(dto.utctimetuple())
26a0352b69d989824f906ff149f5ed50bbd91a0d
479,005
def relabel(score): """Extract centrality measure type""" return score.split("_")[0]
3534c4485e08e94c81d6226b29727cf5a4a09b31
559,657
def maybe_num(x): """Converts string x to an int if possible, otherwise a float if possible, otherwise returns it unchanged.""" x = x.strip('[').strip(']') try: return int(x) except ValueError: try: return float(x) except ValueError: return x
44f4e2cc765016521ff122462363aeef35433182
237,241
def greatest_subarray_sum(nums: list[int], inverse_mode: bool = False) -> int: """ Find the greatest subarray sum in nums, or find the smallest subarray sum using the inverse_mode. """ def min_max_helper(x: list[int]) -> int: return min(x) if inverse_mode else max(x) result = nums[0] last_total = nums[0] for num in nums[1:]: curr_total = min_max_helper([last_total + num, num]) result = min_max_helper([result, curr_total]) last_total = curr_total return result
7be5e7bd81f52cb01b158ca05fa5b6fffbe238f5
99,217
def _get_proto_filename(src): """Assemble the filename for a proto Args: src: the .proto <File> Returns: <string> of the filename. """ parts = src.short_path.split("/") if len(parts) > 1 and parts[0] == "..": return "/".join(parts[2:]) return src.short_path
b8b775e8b3a2cad4fe908cb4e614213a4e78b336
137,044
from typing import List from typing import Tuple import re def get_issues_in_text(repo_name: str, text: str) -> List[Tuple[str, int]]: """ See https://docs.github.com/en/free-pro-team@latest/github/writing-on-github/ autolinked-references-and-urls#issues-and-pull-requests :param repo_name: full repo name like "username/repository_name" :param text: the text to extract linked issues (e.g. commits) :return: Tuples of (repo_name, issue_number) """ pattern = r"([\w\.\-_]+/[\w\.\-_]+)#(\d+)|#(\d+)|GH-(\d+)" result = [] for match in re.finditer(pattern, text): # print(match.string) # print(text[match.start():match.end()]) # print(match.groups()) groups = match.groups() if groups[0] is not None and groups[1] is not None: result.append((groups[0], int(groups[1]))) elif groups[2] is not None: result.append((repo_name, int(groups[2]))) elif groups[3] is not None: result.append((repo_name, int(groups[3]))) url_pattern = r"https://github\.com/([\w\.\-_]+/[\w\.\-_]+)/(issues|pull)/(\d+)" for match in re.finditer(url_pattern, text): result.append((match.groups()[0], int(match.groups()[2]))) return result
23b3072b18b12db89682908b341b744817b5ddaf
415,218
import ast from typing import Dict def _get_keyword_args_by_names( call: ast.Call, *names: str, ) -> Dict[str, ast.expr]: """Returns keywords of ``call`` by specified ``names``.""" keyword_args = {} for keyword in call.keywords: if keyword.arg in names: keyword_args[keyword.arg] = keyword.value return keyword_args
98c2954bc0aefb933356760f8b0a36629bb07e71
122,409
def get_sample_count(profileDict): """ Gets the number of samples taken from a dictionary representing data from an Arm MAP file Args: profileDict (dict): Dictionary from which to obtain the count of samples Returns: The number of samples taken (non-negative integer) """ assert isinstance(profileDict, dict) return profileDict["samples"]["count"]
26d31dd6fae3e0c4adf567706387712397e7fd28
111,838
from typing import List def _partition(nums: List[int], left: int, right: int) -> int: """ Helper function to partition the given sub-list. :param nums: list[int] :param left: int :param right: int :return: int """ # The pivot has already been moved to the left. pivot = nums[left] # Iterate over the sub-list, use a pointer to keep track of the smaller # part, and swap the current number with the pointer as necessary smaller_ptr = left + 1 i = left + 1 while True: while i <= right and nums[i] > pivot: i += 1 if i > right: break if i != smaller_ptr: nums[smaller_ptr], nums[i] = nums[i], nums[smaller_ptr] smaller_ptr += 1 i += 1 if left != smaller_ptr - 1: nums[left], nums[smaller_ptr - 1] = nums[smaller_ptr - 1], nums[left] return smaller_ptr - 1
5190a697c1a7412dc0dd13277a0ff48db348e82a
61,756
import toml def data_from_toml_lines(lines): """ Return a mapping of data from an iterable of TOML text ``lines``. For example:: >>> lines = ['[advisory]', 'id = "RUST1"', '', '[versions]', 'patch = [">= 1"]'] >>> data_from_toml_lines(lines) {'advisory': {'id': 'RUST1'}, 'versions': {'patch': ['>= 1']}} """ return toml.loads("\n".join(lines))
a82020a5defea4356ece2f7d01ed754feafafeae
684,431
def name_to_stackdriver_type(name): """Stackdriver custom metric type name for the given metric name.""" return 'custom.googleapis.com/buildtool/{name}'.format(name=name)
a194ac748808391872d96242ca9c99bdd18b5cff
302,840
def parse_command_line_watch_list(watch_str): """Parse string that represents devices to watch. Valid examples: * aa:bb:cc:dd:ee:ff - Threshold of 1 for the given MAC address * aa:bb:cc:dd:ee:ff,11:22:33:44:55:66 - This means look for any traffic from either address * aa:bb:cc:dd:ee:ff=1337, 11:22:33:44:55:66=1000 - This means look for 1337 bytes for the first address, and 1000 for the second * my_ssid, 11:22:33:44:55:66=1000 - This means look for 1 byte from my_ssid or 1000 for the second * 11:22:33:44:55:66=-30 - This means trigger if 11:22:33:44:55:66 is seen at a power level >= -30dBm (negative value implies power) Returns dict in this format: {'aa:bb:cc:dd:ee:ff': {'threshold': 100, 'power': None}, '11:22:33:44:55:66': {'threshold': None, 'power': -30}} """ watch_list = [i.strip() for i in watch_str.split(',')] watch_dict = {} for watch_part in watch_list: power = None threshold = None if '=' in watch_part: # dev_id is a MAC, BSSID, or SSID dev_id, val = [i.strip() for i in watch_part.split('=')] try: val = int(val) except ValueError: # Can't parse with "dev_id=threshold" formula, so assume '=' sign was part of ssid dev_id = watch_part if val > 0: threshold = val else: power = val else: dev_id = watch_part watch_dict[dev_id] = {'threshold': threshold, 'power': power} return watch_dict
283c8348e734aea896f58c26f688fc5b138e8534
291,619
import base64 def decode_base64(b64: str): """Decodes a base64 value with or without padding.""" # Accept unpadded base64 by appending padding; b64decode won't accept it otherwise if 2 <= len(b64) % 4 <= 3 and not b64.endswith('='): b64 += '=' * (4 - len(b64) % 4) return base64.b64decode(b64, validate=True)
2e05bd8724108e0f7e1def89d05538275896d2df
596,509
def _is_for_synthesis(study_annotations): """checks if the study whose annotations are in study_annotations is intended for synthesis (aka nominated)""" for ann in study_annotations: if '@property' in ann: if ann['@property'] == 'ot:notIntendedForSynthesis': val = ann['$'] if val is True: return False return True
9d2e2c9e85e127d3bff586e4d5c411b0f69607bc
378,611
def _convert_soap_method_args(*args): """Convert arguments to be consumed by a SoapClient method Soap client required a list of named arguments: >>> _convert_soap_method_args('a', 1) [('arg0', 'a'), ('arg1', 1)] """ soap_args = [] for arg_n, arg in enumerate(args): soap_args.append(('arg' + str(arg_n), arg)) return soap_args
a57c993c5bff622d52a5b8bb06259644e725ec6b
56,621
import time import random def _custom_libname(filetype, salt_len=2): """ Choose a custom name for library file Format: filetype: file extensiion cur_time: current time salt: salt_len number of digits <cur_time>_<salt>.<filetype> Args: filetype: The extension the library filename will be saved as salt_len: The length of the salt salt is a sequence of random digit default to 2 Returns: Library file name string """ filetype = filetype or 'lib' cur_time = time.time_ns() salt = ''.join(str(d) for d in random.sample(range(10), salt_len)) return "%d_%s.%s" % (cur_time, salt, filetype)
f6edf0c0ced94a0e418519a353c96d521934b763
655,321
def intersection(lst1, lst2): """ Takes two lists and returns a list of the elements in common between the lists :param lst1: list, any type :param lst2: list, any type :return: list, any type """ return list(set(lst1).intersection(lst2))
6b34ac896605cb0cbb9c7ee25b65cb9cdcded828
418,137
def format_bytes(bytes, unit, SI=False): """ Converts bytes to common units such as kb, kib, KB, mb, mib, MB Parameters --------- bytes: int Number of bytes to be converted unit: str Desired unit of measure for output SI: bool True -> Use SI standard e.g. KB = 1000 bytes False -> Use JEDEC standard e.g. KB = 1024 bytes Returns ------- str: E.g. "7 MiB" where MiB is the original unit abbreviation supplied """ if unit.lower() in "b bit bits".split(): return f"{bytes*8} {unit}" unitN = unit[0].upper()+unit[1:].replace("s","") # Normalised reference = {"Kb Kib Kibibit Kilobit": (7, 1), "KB KiB Kibibyte Kilobyte": (10, 1), "Mb Mib Mebibit Megabit": (17, 2), "MB MiB Mebibyte Megabyte": (20, 2), "Gb Gib Gibibit Gigabit": (27, 3), "GB GiB Gibibyte Gigabyte": (30, 3), "Tb Tib Tebibit Terabit": (37, 4), "TB TiB Tebibyte Terabyte": (40, 4), "Pb Pib Pebibit Petabit": (47, 5), "PB PiB Pebibyte Petabyte": (50, 5), "Eb Eib Exbibit Exabit": (57, 6), "EB EiB Exbibyte Exabyte": (60, 6), "Zb Zib Zebibit Zettabit": (67, 7), "ZB ZiB Zebibyte Zettabyte": (70, 7), "Yb Yib Yobibit Yottabit": (77, 8), "YB YiB Yobibyte Yottabyte": (80, 8), } key_list = '\n'.join([" b Bit"] + [x for x in reference.keys()]) +"\n" if unitN not in key_list: raise IndexError(f"\n\nConversion unit must be one of:\n\n{key_list}") units, divisors = [(k,v) for k,v in reference.items() if unitN in k][0] if SI: divisor = 1000**divisors[1]/8 if "bit" in units else 1000**divisors[1] else: divisor = float(1 << divisors[0]) value = bytes / divisor return f"{value:,.0f} {unitN}{(value != 1 and len(unitN) > 3)*'s'}"
19590cba4720cf0c3a8234f959d739522fd4ef5f
498,973
from typing import Dict from typing import List from typing import Tuple def parse_flight_data() -> Dict[str, List[Tuple[str, str, int]]]: """Parse flight data and returns dictionary of flights by month/year Returns: Dict[str, List[str, str, int]]: d['mm-yy'] = [(origin_code, dest_code, aircraft_code), ...] """ data_paths = { 'test': 'data/test_flight_data.csv', '11-19': 'data/Nov2019_flight_data.csv', '12-19': 'data/Dec2019_flight_data.csv', '01-20': 'data/Jan2020_flight_data.csv', '02-19': 'data/Feb2019_flight_data.csv', '03-19': 'data/Mar2019_flight_data.csv', '04-19': 'data/Apr2019_flight_data.csv', '05-19': 'data/May2019_flight_data.csv', '06-19': 'data/Jun2019_flight_data.csv', '07-19': 'data/Jul2019_flight_data.csv', '08-19': 'data/Aug2019_flight_data.csv', } flight_data = dict() for month_year, data_path in data_paths.items(): flight_data[month_year] = list() with open(data_path) as f: lines_to_skip = 1 i = 0 for line in f: if i < lines_to_skip: i += 1 continue split_line = line.rstrip().split(',') try: int(split_line[11]) except: raise Exception('Bad line: {}'.format(line)) flight_data[month_year].append((split_line[4], split_line[8], int(split_line[11]))) return flight_data
c9dbcdb70d5097b34c1e9c9f76128f583eb63102
84,417
def route(rule, **options): """Like :meth:`Flask.route` but for nereid. .. versionadded:: 3.0.7.0 Unlike the implementation in flask and flask.blueprint route decorator does not require an existing nereid application or a blueprint instance. Instead the decorator adds an attribute to the method called `_url_rules`. .. code-block:: python :emphasize-lines: 1,7 from nereid import route class Product: __name__ = 'product.product' @classmethod @route('/product/<uri>') def render_product(cls, uri): ... return 'Product Information' """ def decorator(f): if not hasattr(f, '_url_rules'): f._url_rules = [] f._url_rules.append((rule, options)) return f return decorator
c7f33af4e8fa10090e5b6a90532707fd59688885
28,755
def mask_size(classes): """Computes per-class segmentation mask size.""" return classes.sum(dim=-2)
9865d32118b8836a1694b2a40f8ae624cbca1a4e
563,135
def is_next_tile_trap(prev_tiles): """Check if next tile is trap.""" left, center, right = prev_tiles return left != right
56168d12a41b5e4b3623cdd0e1985ef86757f35f
513,706
def _sparse_ftrs_indices1(ftr_name): """Returns the name of the 1st axis indices for `ftr_name`""" return f"{ftr_name}_indices1"
6d9cbb3195493e99a9132427b85a3eecda89c442
75,421
def _GenDataArray( resources, entry_pattern, array_name, array_type, data_getter): """Generates a C++ statement defining a literal array containing the hashes. Args: resources: A sorted list of |Resource| objects. entry_pattern: A pattern to be used to generate each entry in the array. The pattern is expected to have a place for data and one for a comment, in that order. array_name: The name of the array being generated. array_type: The type of the array being generated. data_getter: A function that gets the array data from a |Resource| object. Returns: A string containing a C++ statement defining the an array. """ lines = [entry_pattern % (data_getter(r), r.name) for r in resources] pattern = """const %(type)s %(name)s[] = { %(content)s }; """ return pattern % {'type': array_type, 'name': array_name, 'content': '\n'.join(lines)}
6050f6bc55ff83379f0ea368f36ea59543a45179
424,724
def save_nameserver_ip(instance, filepath="/etc/resolv.conf"): """ Save ip in /etc/resolv.conf in a list """ nameservers_ip = [] nameservers = instance.execute(["cat", filepath]).stdout.split("\n") for nameserver in nameservers: nameserver = nameserver.split(" ") if len(nameserver) == 2 and nameserver[0] == "nameserver": nameservers_ip.append(nameserver[1]) return nameservers_ip
a6cb7c1658c7563fa75e95e75e394aa4398148ff
489,817
def extract_entities(openapi_str): """ Extract entities from an OpenAPI string, where entities are defines as anything within "```" :param openapi_str: The OpenAPI str :type openapi_str: ```str``` :return: Entities :rtype: ```List[str]``` """ entities, ticks, space, stack = [], 0, 0, [] def add_then_clear_stack(): """ Join entity, if non empty add to entities. Clear stack. """ entity = "".join(stack) if entity: entities.append(entity) stack.clear() for idx, ch in enumerate(openapi_str): if ch.isspace(): space += 1 add_then_clear_stack() ticks = 0 elif ticks > 2: ticks, space = 0, 0 stack and add_then_clear_stack() stack.append(ch) elif ch == "`": ticks += 1 elif stack: stack.append(ch) add_then_clear_stack() return entities
47f3b7ec9a9a282bcc90ca35669687e62fd031ea
620,889
def is_pgn(filename): """Tells if a filename is a pgn.""" return filename[-4:] == ".pgn"
a5250d765fe57f87eb295b88c0b899ac6f87b61e
396,568
def histogram(s): """ Returns a histogram (dictionary) of the # of letters in string s. The letters in s are keys, and the count of each letter is the value. If the letter is not in s, then there is NO KEY for it in the histogram. Example: histogram('') returns {}, histogram('all') returns {'a':1,'l':2} histogram('abracadabra') returns {'a':5,'b':2,'c':1,'d':1,'r':2} Parameter s: The string to analyze Precondition: s is a string (possibly empty). """ # DICTIONARY COMPREHENSION #return { x:s.count(x) for x in s } # ACCUMULATOR PATTERN result = {} for x in s: result[x] = s.count(x) return result
2580040d031ae9520c458714787031cba17badd2
90,241
def macc_chardis(row): """Assigns an integer to distinguish rows of charging cycles from those of discharging cycles. -1 for discharging and +1 for charging.""" if row['Md'] == 'D': return -1 else: return 1
f83bdd0dbcdbdc09e9579b43a26b658d95e33701
393,959
from typing import Any def _get_name(cls: Any) -> str: """ >>> from typing import Tuple, Callable, Any, List >>> _get_name(int) 'int' >>> _get_name(Any) 'Any' >>> _get_name(List) 'List' >>> _get_name(List[int]) 'List' >>> _get_name(List[Any]) 'List' >>> _get_name(Tuple) 'Tuple' >>> _get_name(Tuple[int, float]) 'Tuple' >>> _get_name(Tuple[Any, ...]) 'Tuple' >>> _get_name(Callable) 'Callable' """ if hasattr(cls, '_name'): return cls._name elif hasattr(cls, '__name__'): return cls.__name__ else: return type(cls).__name__[1:]
8c1d1664ed0f5a20c9185d29dcc6c72f63f74e81
566,184
def searchForm( buttonText="", span=2, inlineHelpText=False, blockHelpText=False, focusedInputText=False, htmlId=False): """ *Generate a search-form - TBS style* **Key Arguments:** - ``buttonText`` -- the button text - ``span`` -- column span - ``inlineHelpText`` -- inline and block level support for help text that appears around form controls - ``blockHelpText`` -- a longer block of help text that breaks onto a new line and may extend beyond one line - ``focusedInputText`` -- make the input focused by providing some initial editable input text - ``htmlId`` -- htmlId **Return:** - ``searchForm`` -- the search-form """ if span: span = "span%(span)s" % locals() else: span = "" if not focusedInputText: focusedInputText = "" focusId = "" else: focusId = "focusedInput" if inlineHelpText: inlineHelpText = """<span class="help-inline">%(inlineHelpText)s</span>""" % locals( ) else: inlineHelpText = "" if blockHelpText: blockHelpText = """<span class="help-block">%(blockHelpText)s</span>""" % locals( ) else: blockHelpText = "" if not htmlId: htmlId = "" searchForm = """ <form class="form-search"> <div class="input-append"> <input type="text" class="search-query %(span)s" id="%(htmlId)s" id="%(focusId)s" value="%(focusedInputText)s"> <button type="submit" class="btn">%(buttonText)s</button> %(inlineHelpText)s%(blockHelpText)s </div> </form>""" % locals() return searchForm
5a980390e5742bb31fa280750b3d52f485f632b8
101,565
import re def get_words_from_file(filepath): """Return the set of all words at least three letters long from within a named file. """ with open(filepath) as f: # # re.findall returns a list of all instances matching a pattern # The pattern \w{3,} matches a word at least three letters long # f.read() reads the entire contents of a file into a string # set() returns a unique set of the values in an iterator # # So we have: # A unique set of # All words at least three letters long within # The contents of the file f # return set(re.findall(r"\w{3,}", f.read()))
7021e5c20b0a64ca07ca25cd20ff5e44373e8465
599,220
def mock_return_true(*args): """A mock function to return True""" return True
dd577fb8afea07d1da480667d4c663073a428e52
375,018
def choose(docs): """Print line number, title and truncated description for each tuple in :docs. Get the user to pick a line number. If it's valid, return the first item in the chosen tuple (the "identifier"). Otherwise, return None.""" last = len(docs) - 1 for num, doc in enumerate(docs): print(f"{num}: ({doc[1]}) {doc[2][:30]}...") index = input(f"Which would you like to see (0 to {last})? ") try: return docs[int(index)][0] except: return None
d1e7b88d478f8fba5aa9ab21ad82ce0b9b6a543d
112,627
def determine_keep_history(cursor, keep_history=None): """Determine whether the RelStorage databases is set to keep history. """ if keep_history is None: # We don't know, so sniff cursor.execute( "select 1 from pg_catalog.pg_class " "where relname = 'current_object'") keep_history = bool(list(cursor)) return keep_history
21c7c6c280d7928186d9dfd750e535b2ab025af2
146,262
def prunejoin(dict_, list_, sep=", "): """Remove non-values from list and join it using sep.""" return sep.join([dict_.get(i) for i in list_ if dict_.get(i)])
c9232e97e24811053b4916ceee4ef6a2ff1f97fa
342,003
import torch def points_to_cartesian(batch: torch.Tensor) -> torch.Tensor: """ Transforms a batch of points in homogeneous coordinates back to cartesian coordinates. Args: batch: batch of points in homogeneous coordinates. Should be of shape BATCHSIZE x NUMPOINTS x NDIM+1 Returns: torch.Tensor: the batch of points in cartesian coordinates """ return batch[..., :-1] / batch[..., -1, None]
d2acca1df5deac28c4cb415a471e1eecadc2d2a9
646,749
def select_unit(unitsDF,name): """ Look for a unit in the unitsDF with a name exactly equal to name If there is exacty one then return that row using transpose and squeeze Otherwise get every unit with a naame that contains name If there are none those check if the input was a key instead and raise and error if it is not If there is exactly one result give that If there is more than one result go through the partial matches and return both their name and key """ # Look for a unit with a name that matches exactly # If we get exactly one match move on # Otherwise # look for every unit that includes that name # if there is exactly one move on # if there are zero matches then # check if there is an exact match as a key value # if not the input is invalid # if there is then move on # if there is more then one match print out all the possibilities along with their key unit = unitsDF[unitsDF["name"] == name] if len(unit) != 1: unit = unitsDF[unitsDF["name"].str.contains(name)] if len(unit) == 0: unit = unitsDF[unitsDF["key"] == name] if len(unit) == 0: raise Exception(f"{name} is not a unit name or key") if len(unit) == 1: return unit.T.squeeze() if len(unit) > 1: helper = unit[["name","key"]] S = "" for line in helper.values: S += f"{line[0]:<50} {line[1]}\n" raise Exception(f"The name '{name}' is ambiguous. Please use one of these names or key values:\n{S}") return unit.T.squeeze()
a6a22fbb4e5d00354cd8795220f38257671fb391
515,673
def split(input_file=""): """Read the SMILES into a list""" input_list = [] with open(input_file, mode="r") as newfile: for entry in newfile: input_list.append(str(entry).strip()) return input_list
357b52b4faeed98603acc65c817eafd5da7d2ad0
156,209
def _getDct(dct, frame): """ Gets the dictionary for the frame. Parameters ---------- dct: dictionary to use if non-None frame: stack frame Returns ------- dict """ if dct is None: #dct = frame.f_back.f_locals dct = frame.f_back.f_globals return dct
d561ae11bf4b68114c8981fca3c9aee8f95f70a3
250,713
import functools def required_chromium_based_browser(func): """ A decorator to ensure that the client used is a chromium based browser. """ @functools.wraps(func) def wrapper(self, *args, **kwargs): assert self.caps["browserName"].lower() not in ["firefox", "safari"], "This only currently works in Chromium based browsers" return func(self, *args, **kwargs) return wrapper
129d037ecdc8adcc5f389c53d11ca75b8f1b1630
326,654
def get_ase(counts): """defined as abs(0.5-(a1_count/total_count))""" return (0.5-counts['a1_count']/counts['total_count']).abs()
fe79a0fa3688cdfef6282dcd35551bf8bd20f8fc
379,983
def rearrange_matrix(m, indices): """Rearrange matrix `m` according to provided indices.""" # rearrange i_rows, i_cols = indices m = m[i_rows, :] m = m[:, i_cols] return m
93611769ceac431e1ee6cc8c7f8b47ae84c94332
413,935
import torch def normalise_grad(grad: torch.Tensor) -> torch.Tensor: """Normalise and handle NaNs caused by norm division. Args: grad (torch.Tensor): Gradient to normalise Returns: torch.Tensor: Normalised gradient """ normed_grad = grad / torch.linalg.norm(grad, dim=1)[:, None] if torch.isnan(normed_grad).any(): normed_grad = torch.where( torch.isnan(normed_grad), torch.zeros_like(normed_grad), normed_grad, ) return normed_grad
f33f2111dcf6235de7688413b71f0edd5694bdd2
135,540
def _is_boolish(b): """ Check if b is a 1, 0, True, or False. """ if (b == 1) or (b == 0) or (b == True) or (b == False): return True else: return False
0da5c05ec2f7d748620906b8f49002d2984ccb24
139,910
def sort_ipv4_addresses_with_mask(ip_address_iterable): """ Sort IPv4 addresses in CIDR notation | :param iter ip_address_iterable: An iterable container of IPv4 CIDR notated addresses | :return list : A sorted list of IPv4 CIDR notated addresses """ return sorted( ip_address_iterable, key=lambda addr: ( int(addr.split('.')[0]), int(addr.split('.')[1]), int(addr.split('.')[2]), int(addr.split('.')[3].split('/')[0]), int(addr.split('.')[3].split('/')[1]) ) )
97517b2518b81cb8ce4cfca19c5512dae6bae686
3,864
import json def load_pos(filepath): """Load pos dictionary from json.""" with open(filepath, 'r') as fp: return json.load(fp)
9cb89039ac1d9755fd31e14e2fd4c09312ddc21a
247,175
import re def valid_md5_str(md5_str): """ 校验md5 字符 :param md5_str: :return: bool """ str_list = md5_str.split('.') n_len = len(str_list) if n_len != 2: return False n_len_md5 = len(str_list[0]) if n_len_md5 != 32: return False find_str = re.findall('[^a-z0-9]+', str_list[0]) if find_str: return False return True
01e6de753dd4fc76f76b7e434c020d506e1d7c6f
105,959
def get_pr_reviews(pr): """Gets a list of all submitted reviews on a PR. Does not list requested reviews.""" # Required to access the PR review API. headers = {'Accept': 'application/vnd.github.black-cat-preview+json'} reviews = pr.session.get( 'https://api.github.com/repos/{}/{}/pulls/{}/reviews'.format( pr.repository[0], pr.repository[1], pr.number), headers=headers).json() return reviews
a62b0ca5059f318df1d1e355682cee31f4af4513
76,519
import re def is_valid_file_category_id(category_id): """ Validates a file lifecycle category ID, also known as the categories "category-ext-name". A valid category ID should look like: category-00000001 - It should always start with "category-" and end with 8 hexadecimal characters in lower case. :type category_id: str :param category_id: The file lifecycle category ID to be validated. :rtype: bool :return: True or False depending on whether category_id passes validation. """ if category_id is None: return False match = re.match(r'^category-[0-9a-f]{8}$', category_id) if not match: return False return True
75b074120fbdd612814a140f056163486a09ade3
638,643
import asyncio async def py_normal(title: str): """Normal exposed function Enter -> async sleep 10sec -> Exit Respond to calls from other clients, even if the call is from one client. Parameters ---------- title: str Characters to print out on the server Returns ---------- True """ print(f'Enter py_normal: {title}') await asyncio.sleep(10) print(f'Exit py_normal: {title}') return True
27c58d8906a344374f66ed7046385cf1c614063e
50,075