content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def is_wrapped(transformer): """Check if a transformer is wrapped. Args: transformer: A transformer instance Returns: bool: True if transformer is wrapped, otherwise False. """ return hasattr(transformer, "is_wrapped")
a6be022672838fc43505478db9939afa46583a03
191,524
def all_attrs_missing(record): """Checks if all attributes have missing values, excluding ID and Class""" return all(value == '?' for value in record[1:-1])
95edc3fcb42645e438b7c3267c4996f02085b0eb
623,130
import re def extract_line_from_file(filename, str_search, num=False, integer=False): """ uses regular expression to search a string in a file :param filename: the file to be searched in :param str_search: the string to search for :param num: should a number be extracted? :param integer: should an integer be returned? if False and num=True then a float will be returned :return: True/False for a sentence integer if num=True and integer=True float if num=True and integer=False """ with open(filename, "r") as fh: for line in fh: line = line.strip() if num: tmp = re.search(str_search + ".*?(\d+)", line) if tmp: return int(tmp.group(1)) if integer else float(tmp.group(1)) else: tmp = re.search(str_search, line) if tmp: return True return False
5c0ea116d528397e0bccd35236f309557a5c52ae
194,446
import re def getWordCharCount(w): """ Char count for a word.""" rus = len(re.findall(r"[а-я]",w)) eng = len(re.findall(r"[a-z]",w)) c = len(w) return c, rus, eng
648a398aa3fe3bf25965b1635975ae104ba0f847
383,801
from pathlib import Path def valid_path(path): """Validate the user-supplied path is a existing directory.""" if Path(path).is_dir(): return path else: raise NotADirectoryError(f'Could not find the directory: {path}')
2fa7b62fef5cc05fabaadcef30a3e100fdcb83e9
428,674
def output_formatter(value): """ Output formatter for environment variable values. Parameters ------------ value Value to format. Returns -------- :class:`str` Formatted value. """ if value is not None and not isinstance(value, bool): return str(value) else: return ""
e0bd03f9580ee17d9464178a43b1406e763d1d73
659,363
def get_rank(arr): """ Given a one-dim gradient tensor, return the ranks of all gradients. NOTE: This function is similar to compute_rank but has no constraints on the idxs. """ arr_idx = sorted( [(idx, grad) for idx, grad in enumerate(arr)], key=lambda t: t[1], reverse=True ) arr_rank = [0 for _ in range(len(arr_idx))] for i, (idx, grad) in enumerate(arr_idx): arr_rank[idx] = i + 1 return arr_rank, arr_idx
6c775699977a5c7f443a3458566f4d3101a13d78
547,816
def base10toN(num: int, base: int=12): """Change to given base upto base 36 is supported. Parameters ---------- num : int Integer to be transform to new base. base : int Based to use write the integer. Return ------ str String with number digits in new base. References ---------- * http://code.activestate.com/recipes/577586-converts-from-decimal-to-any-base-between-2-and-26/ """ converted_string = "" currentnum = num if not 1 < base < 37: raise ValueError("base must be between 2 and 36") if not num: return '0' while currentnum: mod = currentnum % base currentnum = currentnum // base converted_string = chr(48 + mod + 7*(mod >= 10)) + converted_string return converted_string
4dd41abf8ae1bc20ecc450bce6eecbbf1ba34f62
457,417
def add_single(arr, val): """ Return sum of array and scalar. """ return [i + val for i in arr]
f089514decec318d8df8954d423d90c2d5b63e0b
105,005
def convert_array_to_dictionary(x, y, int_to_topic_code): """ Convert data in array format to a dictionary keyed by topic code. :param x: the array of articles. :param y: the integer indexes of the topic classes. :param int_to_topic_code: dictionary of integer indexes to topic codes. :returns: a dictionary keyed by topic code with items a list of the articles for that topic. """ dictionary = {} for index in int_to_topic_code.keys(): dictionary[int_to_topic_code[index]] = x[y == index] return dictionary
92a47551916925b95d78e73c5825db99fa5370db
279,878
def mid(p1, p2): """ Returns the midpoint between 2 3D points """ return (p1[0]+p2[0])/2, (p1[1]+p2[1])/2, (p1[2]+p2[2])/2
25da46ae7710c4da6cfc650d3480fbf15e82f152
281,579
def _GetCoveredBuilders(trybot_config): """Returns a dict mapping masters to lists of builders covered in config.""" covered_builders = {} for master, builders in trybot_config.iteritems(): covered_builders[master] = builders.keys() return covered_builders
e759be62c1c57045dca98e40f83beda6a7ddf7e5
8,974
def valid_chrom(arg): """Check if a string represents a valid chromosome""" arg = arg.lower() if arg[:3] == 'chr': return True if arg.isdigit() or arg in ('x', 'y', 'mt'): return True if arg[:8] == 'scaffold' or arg[:2] == 'gl' or arg[:3] == 'par': return True return False
7b1483cbff37bf222a9539719a288d0e6909dbc8
612,375
def expand_date(df): """Adds columns with year, month, day, hour, day of year, week, weekday. Args: dataframe w/ datetime index Returns: copy of dataframe with columns added """ df = df.copy() df['year'] = df.index.year df['month'] = df.index.month df['day'] = df.index.day df['hour'] = df.index.hour df['dayofyear'] = df.index.dayofyear df['week'] = df.index.week df['weekday'] = df.index.weekday return df
8473099e01f7dd36858137e88bcd7ad8a381efc9
522,935
import re def is_line_function_definition(line: str) -> bool: """Returns true if the corresponding line (of a python file) is the start of a function definition. Excludes functions that start with `__` which indicates a private function. Args: line: a line in a python file """ return bool(re.search('^( *)def ', line)) and 'def __' not in line
faf550e59d7eac5686b6881df515db8ab3caedcf
693,255
def read_line(filename): """Helper method to open a file and read a line from it. Args: filename (str): Path to the file. Returns: str: Line read from the file. """ try: with open(filename) as text_file: return text_file.readline() except Exception: return ""
74b99fdabced0b090114f933bd355c26a0533571
131,270
def sub_test_noiser(new_bots, old_bots, turn, should_noise, test_other): """ sub test function to check if noiser worked Parameters ---------- new_bots: bots after noising old_bots: bots before noising turn: which turn is it now? 0,1,2,3 should_noise: should the noiser do something right now, or return same bots? test_other: true: then it checks if what was meant to happen to other bots happened and vice versa Returns ------- a boolean """ if test_other: if not turn % 2: # even case if should_noise: test_bot1 = not old_bots[1] == new_bots[1] test_bot3 = not old_bots[3] == new_bots[3] return test_bot1 or test_bot3 else: test_bot1 = old_bots[1] == new_bots[1] test_bot3 = old_bots[3] == new_bots[3] return test_bot1 and test_bot3 else: if should_noise: test_bot0 = not old_bots[0] == new_bots[0] test_bot2 = not old_bots[2] == new_bots[2] return test_bot0 or test_bot2 else: test_bot0 = old_bots[0] == new_bots[0] test_bot2 = old_bots[2] == new_bots[2] return test_bot0 and test_bot2 else: # test_own should always mean no change if turn % 2: test_bot0 = old_bots[0] == new_bots[0] test_bot2 = old_bots[2] == new_bots[2] return test_bot0 and test_bot2 else: test_bot1 = old_bots[1] == new_bots[1] test_bot3 = old_bots[3] == new_bots[3] return test_bot1 and test_bot3
b289499e82ef6c0e58d9d78fc06786e8108634ff
160,717
def levenshtein(s: str, t: str) -> int: """Levenshtein distance algorithm, implementation by Sten Helmquist. Copied from https://davejingtian.org/2015/05/02/python-levenshtein-distance-choose-python-package-wisely/""" # degenerate cases if s == t: return 0 if len(s) == 0: return len(t) if len(t) == 0: return len(s) # create two work vectors of integer distances v0 = [] v1 = [] # initialize v0 (the previous row of distances) # this row is A[0][i]: edit distance for an empty s # the distance is just the number of characters to delete from t for i in range(len(t)+1): v0.append(i) v1.append(0) for i in range(len(s)): # calculate v1 (current row distances) from the previous row v0 # first element of v1 is A[i+1][0] # edit distance is delete (i+1) chars from s to match empty t v1[0] = i + 1 # use formula to fill in the rest of the row for j in range(len(t)): cost = 0 if s[i] == t[j] else 1 v1[j + 1] = min(v1[j]+1, v0[j+1]+1, v0[j]+cost) # copy v1 (current row) to v0 (previous row) for next iteration for j in range(len(t)+1): v0[j] = v1[j] return v1[len(t)]
4e1d192756808c4b9e562fb60b87fca74a4f4b68
81,055
def validate_transaction(spend_amounts, tokens_amounts): """ A transaction is considered valid here if the amounts of tokens in the source UTXOs are greater than or equal to the amounts to spend. :param spend_amounts: amounts to spend :param tokens_amounts: existing amounts to spend from :return: True if transaction is valid, otherwise False """ for am in spend_amounts: if am not in tokens_amounts or spend_amounts[am] > tokens_amounts[am]: return False return True
5a1de72aefe6d5d401864defc46c225afc218dba
669,347
def in_call_stack(frame, bp_loc, arg_dict, _): """Only break if the given name is in the current call stack.""" name = arg_dict.GetValueForKey('name').GetStringValue(1000) thread = frame.GetThread() found = False for frame in thread.frames: # Check the symbol. symbol = frame.GetSymbol() if symbol and name in frame.GetSymbol().GetName(): return True # Check the function. function = frame.GetFunction() if function and name in function.GetName(): return True return False
a99fef225d186cb6bd6e56e9a3d5bf74925fc95b
281,109
def check_numbers(num_a, num_b): """ Checks to see if the two numbers are evenly divisible by each other. """ div_result = num_a % num_b if div_result == 0: return True, int(num_a / num_b) div_result = num_b % num_a if div_result == 0: return True, int(num_b / num_a) return False, 0
b8e9215dd26c629062781ce3cbb1dc93b8afd3ae
558,997
import fnmatch def filter_channel_priority(channels, key, priorities=None): """ This function takes a dictionary containing channels keys and returns a new one filtered with the given priorities list. All channels matching the first pattern in the list will be retrieved. If one or more channels are found it stops. Otherwise it will attempt to retrieve channels matching the next pattern. And so on. :type channels: list :param channels: A list containing channel names. :type priorities: list of unicode or None :param priorities: The desired channels with descending priority. Channels will be matched by fnmatch.fnmatch() so wildcards and sequences are supported. The advisable form to request the three standard components of a channel is "HH[ZNE]" to avoid getting e.g. rotated components. :returns: A new list containing only the filtered channels. """ if priorities is None: return channels filtered_channels = [] for pattern in priorities: if filtered_channels: break for channel in channels: if fnmatch.fnmatch(getattr(channel, key), pattern): filtered_channels.append(channel) continue return filtered_channels
2c10d4ead5ebfcbbfc19cdc030c61faa9128ea1d
352,564
def version_str_to_tuple(version): """Split version string to tuple of integers.""" return tuple(map(int, (version.split("."))))
c31e5f6055442df31d549dba3f6235ab7505dfc8
153,189
def batch_sum(x): """Sums a tensor long all non-batch dimensions""" return x.sum(tuple(range(1, x.dim())))
f62ae269a632b9b4ef95d8e9d0276e0fad16b203
632,551
def recursive_parse_xml_to_dict(xml): """Recursively parses XML contents to python dict. We assume that `object` tags are the only ones that can appear multiple times at the same level of a tree. Args: xml: xml tree obtained by parsing XML file contents using lxml.etree Returns: Python dictionary holding XML contents. """ # if not xml: if not len(xml): return {xml.tag: xml.text} result = {} for child in xml: child_result = recursive_parse_xml_to_dict(child) if child.tag != 'object': result[child.tag] = child_result[child.tag] else: if child.tag not in result: result[child.tag] = [] result[child.tag].append(child_result[child.tag]) return {xml.tag: result}
d4a3082dfd1bd03a46b2ecaf89ef725e98f59b77
237,507
from typing import List import socket def _find_n_open_ports(n: int) -> List[int]: """Find n random open ports on localhost. Returns ------- ports : list of int n random open ports on localhost. """ sockets = [] for _ in range(n): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 0)) sockets.append(s) ports = [] for s in sockets: ports.append(s.getsockname()[1]) s.close() return ports
91c990d1fb7c3029eb5ebb2fccacfedc8c300a21
365,485
def construct_increment_confirmation_attempts_query(graph_uri, poststuk_uri): """ Construct a SPARQL query for incrementing (+1) the counter that keeps track of how many times the service attempted to send out a conformation for a certain message without succes. :param graph_uri: string :param poststuk_uri: URI of the bericht. :returns: string containing SPARQL query """ q = """ PREFIX ext: <http://mu.semte.ch/vocabularies/ext/> PREFIX schema: <http://schema.org/> DELETE {{ GRAPH <{0}> {{ <{1}> ext:failedConfirmationAttempts ?result_attempts. }} }} INSERT {{ GRAPH <{0}> {{ <{1}> ext:failedConfirmationAttempts ?incremented_attempts. }} }} WHERE {{ GRAPH <{0}> {{ <{1}> a schema:Message. OPTIONAL {{ <{1}> ext:failedConfirmationAttempts ?attempts. }} BIND(0 AS ?default_attempts) BIND(COALESCE(?attempts, ?default_attempts) AS ?result_attempts) BIND((?result_attempts + 1) AS ?incremented_attempts) }} }} """.format(graph_uri, poststuk_uri) return q
5614e318083d493b5e5e7482beebbe9fc303832a
571,712
def get_asg_plotlabels(g, fxn, reshist, t_ind): """ Assigns labels to the ASG graph g in the given fxn from reshist at time t so that it can be plotted Parameters ---------- g : networkx graph The graph to get labels for fxn : FxnBlock Corresponding function block for the graph g reshist : dict The dict of results history over time (from process.hists() or process.typehist() for the typegraph option) t_ind : float The time in reshist to update the graph at Returns ------- labels : dict labels for the graph. faultfxns : dict functions with faults in them degfxns : dict functions that are degraded degflows : dict flows that are degraded faultlabels : dict names of each fault faultedges : dict edges with faults in them faultedgeflows : dict names of flows that are degraded on each edge edgelabels : dict labels of each edge """ labels={node:node for node in g.nodes} fxnname=fxn.name rhist = reshist['functions'][fxnname] actions = fxn.actions faultfxns = [] degfxns = [] degflows = [] faultlabels = {} edgelabels=dict() for edge in g.edges: edgelabels[edge[0],edge[1]]=g.get_edge_data(edge[0],edge[1]).get('name','') for action in actions: if rhist[action]['numfaults'][t_ind]: faultfxns+=[action] if type(rhist[action]['faults']) == dict: faultlabels[action] = {fault for fault, occ in rhist[action]['faults'].items() if occ[t_ind]} else: faultlabels[action] = rhist['faults'][t_ind] if not rhist['status'][t_ind]: degfxns+=[action] flows = [flow for flow in {**fxn.flows, **fxn.internal_flows} if flow in g] for flow in flows: if flow in rhist and any([v[t_ind]!=1 for v in rhist[flow].values()]): degflows+=[flow] elif flow in reshist['flows'] and not reshist['flows'][flow][t_ind]==1: degflows+=[flow] faultedges = [] #[edge for edge in g.edges if any([reshist['flows'][flow][t_ind]==0 for flow in g.edges[edge].keys()])] faultedgeflows = {} #{edge:''.join([' ',''.join(flow+' ' for flow in g.edges[edge] if reshist['flows'][flow][t_ind]==0)]) for edge in faultedges} return labels, faultfxns, degfxns, degflows, faultlabels, faultedges, faultedgeflows, edgelabels
f247e30e03e7333725c21065a4beab0c6d3e3e21
577,319
def validate_is_string_or_number(var): """ Checks if the input is either a str, int or float. :param var: input :raises: TypeError if not :returns: True """ valid_types = (str, int, float) if not isinstance(var, valid_types): raise TypeError("Input is not a string or a number:" + str(var)) return True
ff48389e4be7c2648a3b1e4290cc2fb25661f9ea
181,315
def edits0(word): """ Return all strings that are zero edits away (i.e. the word itself). """ return{word}
06836ba7da2a02eb7d9e9d1a2d12d74fc10f95e8
49,235
import torch def se2_element(G): """ Returns three new tensors corresponding to x, y and theta attributes of the group elements specified by the se2 group elements in matrix formulation. Args: G (`torch.FloatTensor`): matrix formulation of the group elements. Returns: (`torch.FloatTensor`): x attributes of the group elements. (`torch.FloatTensor`): y attributes of the group elements. (`torch.FloatTensor`): theta attributes of the group elements. """ return G[..., 0, 2], G[..., 1, 2], torch.atan2(G[..., 1, 0], G[..., 0, 0])
bf54b3a83df120607a04288b2626bd643e08f6c6
210,527
import math def R(x, y): """ Return the polar coordinates (r, theta) of the point (x, y) """ r = math.hypot(x, y) theta = math.atan2(y, x) return r, theta
bfbb43b63b5dc8ca316d7705cef64553a457d672
558,528
from typing import Callable def raises(e: Exception) -> Callable: """Return a function that raises the given exception when called. Args: e: The exception to be raised. Returns: A function that can take any arguments, and raises the given exception. """ def f(*args, **kwargs): raise (e) return f
d7ad75ee6cca97f90eca861c794dd385a7c809df
265,086
def snd(tup): """``snd :: (a, b) -> b`` Extract the second component of a pair. """ _x, y = tup return y
794bff44744897444ce66450920304522f600cb0
300,453
def temp_output_file(temp_folder, temp_output_filename): """Create a file path to for the output file.""" return temp_folder.join(temp_output_filename)
cbfb5bc72ec4e91dec1eb12b83139a274f518597
214,895
import torch def _interp_array(start: torch.Tensor, end: torch.Tensor, num_steps: int) -> torch.Tensor: """Linearly interpolate 2D tensors, returns 3D tensors. Args: start: 2D tensor for start point of interpolation of shape [x,y]. end: 2D tensor as end point of interpolation of shape [x,y] (same as start). num_steps: number of steps to interpolate. Returns: New tensor of shape [num_steps, x, y] """ alpha = torch.linspace(0., 1., num_steps) beta = 1 - alpha return torch.einsum('a,bc->abc', alpha, end) + torch.einsum('a,bc->abc', beta, start)
232ca65e13d4896c7c0e580bd48f8629b4622573
630,975
from pathlib import Path def is_doorstop_item_file(file_name, *args): """ Returns whether the given file is most likely to be a file that represents a doorstop item """ if not file_name: return False file_name = Path(file_name) if file_name.suffix != ".yml": return False if not (file_name.parent / ".doorstop.yml").exists(): return False if file_name.name.startswith("."): return False if file_name.name == ".doorstop.yml": return False return True
0df41af3186823ec2477f9b42c0eb41e3f3bc915
350,789
def S_IFMT(mode): """Return the portion of the file's mode that describes the file type. """ return mode & 0o170000
ae5fdc44ce7d7f94b04c424b5f8e885a6d0c97f1
684,632
def is_combined_texture_plan(plan): """ If true, this texture is composed of three other single-channel textures, which are copied into the R, G, and B channels of the destination image. """ return 'r' in plan and 'g' in plan and 'b' in plan
6b795628e07bb9e73fae621c41ba06a230ff18ba
144,909
def snake_to_camel(word): """ changes word snake to camel case example: my_plan -> MyPlan """ return ''.join(x.capitalize() or '_' for x in word.split('_'))
06e2704fcaaf7c65d2be4c5c104f55f1df1fb207
114,107
def max(x, y): """Maximum""" return max(x, y)
8ce365a5c5932020377107e9c9bbddf548474795
349,461
def to_gcal_datetime(date, time): """Convert date and time to google formatted datetime. Arguments: date -- datetime object time -- time string containing the time Return: gcal_datetime -- formatted date string """ gcal_datetime = '{}-{:>02}-{:>02}T{}:00+08:00'.format( date.year, date.month, date.day, time) return gcal_datetime
cdbb34548092414ee32b186bcd6198d5286fe723
450,008
def population_attributable_fraction(a, b, c, d): """Calculates the Population Attributable Fraction from count data Returns population attribuable fraction a: -count of exposed individuals with outcome b: -count of unexposed individuals with outcome c: -count of exposed individuals without outcome d: -count of unexposed individuals without outcome """ if (a < 0) or (b < 0) or (c < 0) or (d < 0): raise ValueError('All numbers must be positive') rt = (a + c) / (a + b + c + d) r0 = c / (c + d) return (rt - r0) / rt
a5f1618e9d4a62376ea7237449db065d6ae7ca55
474,786
import textwrap def wrap(s): """ Wrap lines of text, retaining existing newlines as paragraph markers. >>> print(wrap(lorem_ipsum)) Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. <BLANKLINE> Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus magna felis sollicitudin mauris. Integer in mauris eu nibh euismod gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue, eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis, neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis, molestie eu, feugiat in, orci. In hac habitasse platea dictumst. """ paragraphs = s.splitlines() wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs) return '\n\n'.join(wrapped)
f77a6b98b9a5ab729245fdfd9dfa1c9c823737d4
661,827
def get_timestamps(files, idx): """Extract timestamps from a pose or relocalization file.""" lines = [] for p in files.parent.glob(files.name): with open(p) as f: lines += f.readlines() timestamps = set() for line in lines: line = line.rstrip('\n') if line[0] == '#' or line == '': continue ts = line.replace(',', ' ').split()[idx] timestamps.add(ts) return timestamps
a856117394aaa3906a60a8bb2b6590dcb00d41e7
388,905
def _fix_coords(coords, offset): """Adjust the entity coordinates to the beginning of the sentence.""" if coords is None: return None return tuple([n - offset for n in coords])
59eb86a89d9a68f357fb02e6abb91380332d8299
587,122
import re def idsub(tag): """In aSc, "id" fields may only contain ASCII alphanumeric characters, '-' and '_'. Substitute anything else by '_'. """ return re.sub('[^-_A-Za-z0-9]', '_', tag)
3299b7a88e512e32686780e31e52b71565cf6944
668,166
def say_hello(to): """Say hello to someone (or the whole world).""" return f"Hello {to}!"
1d345824048aba9777b3ce3fadee3aed90f6dcd2
544,293
def rates_for_yr(rates_all_years, sim_year): """ Filter specific rates for a given year Parameters ---------- rates_all_years : pandas DataFrame rates, to be filtered by year sim_year : int year being simulated Returns ------- pop_w_rates : pandas DataFrame rates for a given year """ rates_yr = rates_all_years[rates_all_years['yr'] == sim_year] return rates_yr
616cec13b0a686c2c7504c187c31dafaa7f88b6f
701,936
def _thermr_input(endfin, pendfin, pendfout, mat, temperatures=[293.6], angles=20, iprint=False, err=0.001, emax=10, **kwargs): """Write thermr input for free-gas. Parameters ---------- endfin : `int` tape number for input ENDF-6 file pendfin : `int` tape number for input PENDF file pendfout : `int` tape number for output PENDF file mat : `int` MAT number temperatures : iterable of `float` iterable of temperature values in K (default is 293.6 K) angles : `int` number of equi-probable angles (default is 20) iprint : `bool` print option (default is `False`) err : `float` tolerance (default is 0.001) emax : `float` maximum energy for thermal treatment (default is 10 eV) Returns ------- `str` thermr input text """ text = ["thermr"] text += ["{:d} {:d} {:d} /".format(endfin, pendfin, pendfout)] text += ["0 {:d} {:d} {:d} 1 0 0 1 221 {:d} /".format(mat, angles, len(temperatures), int(iprint))] text += [" ".join(map("{:.1f}".format, temperatures)) + " /"] text += ["{} {} /".format(err, emax)] return "\n".join(text) + "\n"
4ad4332bc7fc2d9b78f886e415d3aa844c2d8ea0
507,643
def is_eiffel_event_type(event, event_type): """ Checks if an event is of a given type. :param event: the Eiffel event as a dict :param event_type: the Eiffel event type :return: True if meta.type equals event_type """ return event['meta']['type'] == event_type
9fa4c66825ef194f1b54ac4771829f519c14bde5
433,356
def _get_num_args(call_stmt): """ Get the number of arguments in a call statement. """ return len(call_stmt.args) + len(call_stmt.keywords)
96f18fe2b3a924c77c30730626e4b1c146896fc9
227,354
def get_console_input(prompt): """Get console input.""" return input(prompt)
8c9d751fd64650f89c9d5df777f53245d60b9fb8
234,782
import re def validate_version(version: str): """ Validates the provided version is in the form 'M.m'. Returns: The same string provided as input if the format is valid. Raises: ValueError If the provided version does not match the expected pattern. """ version_re = re.compile(r'(:?^\d+\.\d+$)') if not version_re.match(version): raise ValueError return version
9dd2ef9e0d19db7825260a4ae2b595c83459849a
267,501
def socketexpose(func): """Decorator to expose functions over websocket""" func.socketexposed = True return func
6facf29ea38d37f44944fcab55889b6e3ec9aa9c
165,581
def store(src, rel, dst): """ Returns an SQL statement to store an edge into the SQL backing store. :param src: The source node. :param rel: The relation. :param dst: The destination node. """ smt = 'INSERT INTO %s (src, dst) VALUES (?, ?)' return smt % rel, (src, dst)
1fcb76ff722fbf0a43c125a4ff42405b12d54ec6
9,603
def SelectColumn(lig_dict, colname): """ Prune the dictionary, only attribute in colname will be left. :param lig_dict: a tree like dictionary :param colname: what attribute you want to keep. :return: a new dictionary """ lig_new = dict() for k in lig_dict: lig_new[k] = {sk:v for sk, v in lig_dict[k].items() if sk in colname} return lig_new
766c28ccf7f9e15151ddc2fd55a1fc707c150ec1
244,842
def comp(DNA: str, pat_len: int) -> list: """Sort all substrings of pat_len length :param DNA: the string to pull substrings from :type DNA: str :param pat_len: the length of substrings to pull :type pat_len: int :returns: all substrings, sorted :rtype: list (of strs) """ if not DNA: raise ValueError('Cannot pull substrings from empty string') if pat_len < 1: raise ValueError('Substrings must be at least length 1') DNA_len = len(DNA) if DNA_len < pat_len: raise ValueError('No substrings of that length') return sorted([DNA[i:i + pat_len] for i in range(DNA_len - pat_len + 1)])
04cddfb893fe1e4b0b03d9997b53ac80b8f6e606
655,009
def get_ycbcr_bands(image): """Returns a tuple of the 3 bands (Y, Cb, Cr).""" color_transformed = image.convert(mode='YCbCr') return color_transformed.split()
b1a4a5656af3aac7787309d21dfd2e0bdecc0f7f
594,213
import hashlib import pprint def get_dict_hash(d, shorten: bool = True): """ Create string that uniquely identifies the given dict :param d: :param shorten: if `True`, will return only the first 8 characters of the hash :return: """ # pretty print (necessary to keep a consistent order for dictionary entries, otherwise hash varies for same config), then md5 hash and keep first 8 chars hash = hashlib.md5(pprint.pformat(d).encode('utf-8')).hexdigest() return hash[:8] if shorten else hash
62bdbb5a61e47a11d36f8a1fed12a47edb61491c
484,576
from typing import Any import struct def pack_into(fmt: str, buffer: bytearray, offset: int, *args: Any) -> int: """ Pack data with struct.pack_into and given data format. return the size of the output data with that format. Use offset += pack_into() to update the offset for next call """ struct.pack_into(fmt, buffer, offset, *args) return struct.calcsize(fmt)
f7d461b693cc59062481f7941573ff485235ab38
341,909
def is_palindrome(num: int) -> bool: """Checks if a number is a palindrome.""" return str(num) == str(num)[::-1]
bb6b271a1fbf77efb08091b625acd0ee932f292d
642,902
def bandwidth_converter( number, *, from_unit, to_unit, from_time="seconds", to_time="seconds" ): """ Bandwidth Calculator. Convert data rate from one unit to another. Arguments: number (int): number to be converted Keyword arguments: from_unit (str): convert from this data unit. Example: (bps, Kbps, Mbps, Gbps... KB, KiB, MB, MiB...) to_unit (str): convert to this data unit. Example: (bps, Kbps, Mbps, Gbps... KB, KiB, MB, MiB...) Keyword arguments (opt): from_time (str): Specify the time frame used in from_unit (seconds, minutes, hours, days, months) default: seconds to_time (str): Specify the time frame used in to_unit (seconds, minutes, hours, days, months) default: seconds bps, Kbps, Mbps, Gbps... = decimal base = 1000^n KB, MB, GB, TB... = decimal base = 1000^n KiB, MiB, GiB, TiB... = binary base = 1024^n References: - https://en.wikipedia.org/wiki/Units_of_information - https://physics.nist.gov/cuu/Units/binary.html Returns: tuple (number_converted, to_unit/to_time) Example: >>> bandwidth_converter(100, from_unit="Mbps", to_unit="MB") (12.5, 'MB/seconds') >>> bandwidth_converter(100, from_unit="Mbps", to_unit="GB", to_time="hours") (45.0, 'GB/hours') >>> bandwidth_converter(1, from_unit="Gbps", to_unit="MB") (125.0, 'MB/seconds') >>> bandwidth_converter(10, from_unit="Gbps", to_unit="GB") (1.25, 'GB/seconds') >>> bandwidth_converter(10, from_unit="Gbps", to_unit="TB", to_time="hours") (4.5, 'TB/hours') >>> bandwidth_converter(10, from_unit="GB", to_unit="Gbps") (80.0, 'Gbps/seconds') >>> Convert 2.25 GB per hours to Mbps # doctest: +SKIP >>> bandwidth_converter(2.25, from_unit="GB", from_time="hours", to_unit="Mbps", to_time="seconds") # noqa (5.0, 'Mbps/seconds') """ unit_power = { "bps": 1, "Kbps": 1000, "Mbps": 1000 ** 2, "Gbps": 1000 ** 3, "Tbps": 1000 ** 4, "Pbps": 1000 ** 5, "Ebps": 1000 ** 6, "Bytes": 1, "KB": 1000, "MB": 1000 ** 2, "GB": 1000 ** 3, "TB": 1000 ** 4, "PB": 1000 ** 5, "EB": 1000 ** 6, "KiB": 1024, "MiB": 1024 ** 2, "GiB": 1024 ** 3, "TiB": 1024 ** 4, "PiB": 1024 ** 5, "EiB": 1024 ** 6, } time_in_sec = { "seconds": 1, "minutes": 60, "hours": 3600, "days": 3600 * 24, "months": 3600 * 24 * 30, } if from_unit not in unit_power or to_unit not in unit_power: raise ValueError( "invalid unit. It must be {}".format(", ".join(unit_power.keys())) ) if from_time not in time_in_sec or to_time not in time_in_sec: raise ValueError( "invalid time. It must be {}".format(", ".join(time_in_sec.keys())) ) # Convert input number to bps bps = (float(number) * int(unit_power[from_unit])) / time_in_sec[from_time] if not from_unit.endswith("bps"): bps = bps * 8 # to_unit is bits or bytes new_unit = bps if to_unit.endswith("bps") else bps / 8 # Convert to new unit new_unit = (new_unit / unit_power[to_unit]) * time_in_sec[to_time] return new_unit, "{}/{}".format(to_unit, to_time)
93068fb52bc3cd04615749e61a6ecea54b2eda0f
645,208
def close_parens_needed(expr): """Return the number of left-parentheses needed to make 'expr' balanced. >>> close_parens_needed("1+2") 0 >>> close_parens_needed("(1 + 2)") 0 >>> close_parens_needed("(1 + 2") 1 >>> close_parens_needed("(1 + (2 *") 2 >>> close_parens_needed("(1 + (2 * 3) + (4") 2 """ return expr.count("(") - expr.count(")")
ad93df4989dc35d5a60f8e00e2c1cc13893d2ca2
399,592
def context_to_airflow_vars(context): """ Given a context, this function provides a dictionary of values that can be used to externally reconstruct relations between dags, dag_runs, tasks and task_instances. :param context: The context for the task_instance of interest :type context: dict """ params = {} dag = context.get('dag') if dag and dag.dag_id: params['airflow.ctx.dag.dag_id'] = dag.dag_id dag_run = context.get('dag_run') if dag_run and dag_run.execution_date: params['airflow.ctx.dag_run.execution_date'] = dag_run.execution_date.isoformat() task = context.get('task') if task and task.task_id: params['airflow.ctx.task.task_id'] = task.task_id task_instance = context.get('task_instance') if task_instance and task_instance.execution_date: params['airflow.ctx.task_instance.execution_date'] = ( task_instance.execution_date.isoformat() ) return params
50dcdc37c708cfb18f8a39d5fd8380782f3ef8d2
178,239
def cols(rows): """ >>> a = [ ... [1, 2], ... ['a', 'b'], ... [4, 5], ... ] >>> for c in cols(a): ... print(c) [1, 'a', 4] [2, 'b', 5] >>> a = [ ... [1, 2, 3], ... ['a', 'b', 'c'], ... ] >>> for c in cols(a): ... print(c) [1, 'a'] [2, 'b'] [3, 'c'] """ all_c = [] for ci in range(len(rows[0])): all_c.append([]) for ci in range(len(rows[0])): for ri in range(len(rows)): assert len(rows[ri]) == len(all_c), "len(%r) != %i" % (rows[ri], len(all_c)) all_c[ci].append(rows[ri][ci]) return all_c
3cf7ffdb7fd93f72edab1745c1c1e7b6be6f2174
275,508
def calc_candidates(sudoku, row, col): """Return a set of candidates of the sudoku at (row, col). Args: sudoku (Sudoku): The :class:`Sudoku` instance for which the candidates are calculated. row (int): The row of the field col (int): The column of the field. Returns: set: A set of candidates for the field at (row, col) """ if sudoku[row, col]: return {sudoku[row, col]} candidates = set(sudoku.numbers) for (i, j) in sudoku.surrounding_of(row, col, include=False): candidates.discard(sudoku[i, j]) return candidates
dc91a345892dab6bb852ba8e1f7543005c64e27e
310,142
def dna_to_rna(seq): """ Convert a DNA sequence to RNA. """ # Determine if original sequence was uppercase seq_upper = seq.isupper() # Convert to lowercase seq = seq.lower() # Swap out 't' for 'u' seq = seq.replace('t', 'u') # Return upper or lower case RNA sequence if seq_upper: return seq.upper() else: return seq
dc07509d175976c8efd4d1e0e19ca801259ca8df
524,243
def make_vect(point1, point2): """makes a vector out of 2 points Args: point1: first point point2: second point Returns: vector of 2 points (tuple) """ return ((point2[0] - point1[0]), (point2[1] - point1[1]))
ae2041d60b34c1d23a5f94bc75c8c8a412e62acb
424,591
import torch import math def double_phase_amp_phase(amplitudes, phases, three_pi=True, mean_adjust=True): """converts amplitude and phase to double phase coding amplitudes: per-pixel amplitudes of the complex field phases: per-pixel phases of the complex field three_pi: if True, outputs values in a 3pi range, instead of 2pi mean_adjust: if True, centers the phases in the range of interest """ # normalize amplitudes = amplitudes / amplitudes.max() phases_a = phases - torch.acos(amplitudes) phases_b = phases + torch.acos(amplitudes) phases_out = phases_a phases_out[..., ::2, 1::2] = phases_b[..., ::2, 1::2] phases_out[..., 1::2, ::2] = phases_b[..., 1::2, ::2] if three_pi: max_phase = 3 * math.pi else: max_phase = 2 * math.pi if mean_adjust: phases_out -= phases_out.mean() return (phases_out + max_phase / 2) % max_phase - max_phase / 2
888557d96922d60d04ff0e1246055a71157ec8fd
569,782
def find_one_row(substr, df, col_name): """ Return one row from `df`. The returned row has in `col_name` column a value with a sub-string as `substr. Raise KeyError if no row is found. """ for name in df[col_name]: if substr.lower() in name.lower(): return df[df[col_name] == name] raise KeyError("Could not find {} in the " "pandas dataframe.".format(substr))
b577bc3e6e7fed7b9f110d94a38c74d6293e17e9
110,962
def manhattan(t1b: tuple, t2b: tuple) -> int: """For parm pair of coordinate tuples, each (x, y). Return the Manhattan distance between them.""" t1x, t1y = t1b t2x, t2y = t2b return abs(t1x - t2x) + abs(t1y - t2y)
71ad347a8a05087e861176cfcb8533646060f880
650,511
from pathlib import Path def enterprise_1_9_artifact() -> Path: """ Return the path to a build artifact for DC/OS Enterprise 1.9. """ return Path('/tmp/dcos_generate_config_1_9.ee.sh')
b43fb03fe608f3cac6269eef091d6770150b0a91
128,934
import math def create_xp_table(start=1, end=100, base_xp=10, exponent=1.5): """ Creates an experience/level table Keyword Arguments: start {int} -- [description] (default: {1}) end {int} -- [description] (default: {100}) base_xp {int} -- [description] (default: {1000}) exponent {float} -- [description] (default: {1.5}) Returns: list -- experience points needed to reach this level """ levels = [0] for level in range( start, end): xp = math.floor(base_xp * math.pow(level, float(exponent))) levels.append(xp) return levels
09e0a74311f2c4d35bdcbd467716c7e49d18bcb4
500,011
def get_applicable_styles(node): """ Generates a list of dictionaries that contains all the styles that *could* influence the style of an element. This is the collection of all styles from an element and all it parent elements. Returns a list, with each list item being a dictionary with keys that correspond to CSS styles and the values are the corresponding values for each ancestor element. """ styles = [] for parent in node.xpath('ancestor-or-self::*[@style]'): style = parent.get('style', "") style = style.rstrip(";") if not style: continue styles.append( dict([ tuple( s.strip().split(':', 1) ) for s in style.split(';') ]) ) return styles
69c4d1d190cb20ebf043c1f97e47f653cc86af52
355,215
import re def is_url(maybe_url): """ Determine whether a path is a URL. :param str maybe_url: path to investigate as URL :return bool: whether path appears to be a URL """ # from Django 1.3.x # https://github.com/django/django/blob/6726d750979a7c29e0dd866b4ea367eef7c8a420/django/core/validators.py#L45-L51 regex = re.compile( r"^(?:http|ftp)s?://" r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" r"localhost|" r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" r"(?::\d+)?" r"(?:/?|[/?]\S+)$", re.IGNORECASE, ) return re.match(regex, maybe_url) is not None
6f56557ace9dfaa580c5f8b6723282c80e0d1949
438,536
def IC_NIS(ic_cc, ic_ca, ic_pp, ic_pa, p_nis_a, p_nis_c): """ Calculates the predicted binding affinity value based on the IC-NIS model. """ return -0.09459*ic_cc + -0.10007*ic_ca + 0.19577*ic_pp + -0.22671*ic_pa \ + 0.18681*p_nis_a + 0.13810*p_nis_c + -15.9433
8b4d549cd2eb9162c33a8f984c295a516cb88ad3
657,641
from typing import Any from typing import Dict def check_dict_str_str(data: Any) -> Dict[str, str]: """Check if data is `Dict[str, str]` and return it.""" if not isinstance(data, dict): raise TypeError(data) for key, value in data.items(): if not isinstance(key, str): raise TypeError(key) if not isinstance(value, str): raise TypeError(value) return data
ca5e400809f90db60230642670c61fc64191c797
326,603
def get_form_details(form): """ This function extracts all possible useful information about an HTML `form` """ details = {} # get the form action (target url) action = form.attrs.get("action").lower() # get the form method (POST, GET, etc.) method = form.attrs.get("method", "get").lower() # get all the input details such as type and name inputs = [] for input_tag in form.find_all("input"): input_type = input_tag.attrs.get("type", "text") input_name = input_tag.attrs.get("name") inputs.append({"type": input_type, "name": input_name}) # put everything to the resulting dictionary details["action"] = action details["method"] = method details["inputs"] = inputs return details
3868f9edf07ea731cb2ce8e68be603dd7a9a936c
628,759
def compound_interest(principal, rate, compounds_per_year, time): """Calculates the compound interest earned over a given period of time""" principal, rate, compounds_per_year, time = float(principal), float(rate), float(compounds_per_year), float(time) return principal * ((1 + ((rate / 100) / compounds_per_year)) ** (compounds_per_year * time))
8d7e56eb89678828da4c4b42ecce35fecced9767
389,130
import pkg_resources def load_description(package: str, path: str, filename: str): """ This function loads a static description file. Parameters: package (str): Package name where file is located in path (str): Path within the package. filename (str): Name of file to load. Returns: str: Content of loaded file """ with open(pkg_resources.resource_filename( package, f"{path}{filename}"), "r+") as file: return file.read()
1a4afa40366d054f8ebf491c301c5cbc2a3a694a
96,371
def convert_timedelta(td): """ Convert a timedelta object to a timespec usable by `at`. Note that `at` does not understand seconds, so extra seconds are rounded down. """ total_seconds = (td.microseconds / 1000000) + (td.days * 24 * 60 * 60) + \ td.seconds total_minutes = total_seconds // 60 plural = (abs(total_minutes) > 1) return 'now + {0} minute{1}'.format(total_minutes, 's' if plural else '')
2ad2fa2a589c12ee5f7d9461f47f1538e93151c7
164,944
def entitydata_values_add_field(data, property_uri, dup_index, value): """ Add field to data; if duplicate then reformat appropriately. Updates and returns supplied entity value dictionary. """ if property_uri in data: suffix = "__%d"%dup_index else: suffix = "" data[property_uri+suffix] = value return data
6d4eb2b05a32108ad6e75215bbc87b37636e64c2
248,777
def __get_col_name(col): """ Internal helper function to get the actual name of a pandas DataFrame column from a column name that may consists of an arbitrary sequence of tuples. The function will return the last value of the innermost tuple. """ curr_val = col while isinstance(curr_val, tuple): curr_val = curr_val[-1] return curr_val
faf9a338463b8eae0d6331d9562ea634049aaba3
204,650
def sequenceLength(sequenceFile): """Get the total # of bp from a fasta file.""" seqLength = 0 for line in open(sequenceFile): line = line.strip() if line == '' or line[0] == '>': continue seqLength += len(line) return seqLength
3cad40e95d873c4085e848c5217a89cd44b3eed7
477,618
def comp_diff_emerge(result, loc1, loc2, time, obs='sstopo'): """Compute differential glacial isostatic adjustment between locs 1 and 2. Parameters ---------- result : GiaSimOutput loc1, loc2 : tuples of (lon, lat) time : the time for comparison obs : str The type of observation, defaults to 'sstopo', which is emergence. See print(result) for all possible fields. Note: fields computed in spectral space must be transformed prior to comparison, use result.transformObservers(). """ obs = getattr(result, obs) field = obs.nearest_to(time).T interper = result.inputs.grid.create_interper(field) xx, yy = result.inputs.grid.basemap((loc1[0], loc2[0]), (loc1[1], loc2[1])) diffs = interper.ev(xx, yy) return diffs[1] - diffs[0]
69c2dd7b0396132c86d02cb72d5d1b02c545ce75
574,681
def check_nonconvex_invest_type(dct): """ Checks if flow attribute 'invest.nonconvex' is type bool, if the attribute is present. Parameters ---------- dct : dict Dictionary with all paramerters for the oemof-solph components. Returns ------- dict : Updated Dictionary is returned. """ for k, v in dct.items(): if 'invest.nonconvex' in v.columns: v['invest.nonconvex'] = v['invest.nonconvex'].astype('bool') dct[k] = v return dct
a0c5f3f4e3e3bd97be795986a9ebf7b334ad6a60
505,411
def _get_offset(cmd): """Returns the offset into the cmd based upon if it's a dictionary page or a data page""" dict_offset = cmd.dictionary_page_offset data_offset = cmd.data_page_offset if dict_offset is None or data_offset < dict_offset: return data_offset return dict_offset
ed280e089327bfa5e1d5895b7c323a589338ea84
323,357
def TruncateInSpace(labText,maxLenLab): """ This truncates a string to a given length but tries to cut at a space position instead of splitting a word. """ if len( labText ) > maxLenLab: idx = labText.find(" ",maxLenLab) # sys.stderr.write("idx=%d\n"%idx) if idx < 0: idx = maxLenLab # BEWARE: This must not fall in the middle of an html entity "&amp;", etc... ... idxSemiColon = labText.find(";",idx) # sys.stderr.write("idxSemiColon=%d\n"%idxSemiColon) if idxSemiColon < 0: idx = maxLenLab else: idx = idxSemiColon + 1 # Just after the semi-colon. # sys.stderr.write("labText=%s idx=%d\n"%(labText,idx)) return labText[:idx] else: return labText
fe30f2031970faedaf8d04b9e25d522689d281a6
457,827
def no_duplicates(route): """ This function removes duplicate nodes that may be present in a route, ensuring it can be plotted. Parameters ---------- route : list list of nodes traversed by route, in order Returns ------- route : list list of nodes traversed by route, in order, with any immediate duplicates removed """ for i in range(len(route)-2): if route[i] == route[i+1]: del route[i] return route
aeb97e7370c0bf1d4a25c40264d5673fcea7f162
671,082
import click def generate_mapping(mapper, molA, molB): """Utility method to extract a single mapping from a mapper. Parameters ---------- mapper : :class:`.LigandAtomMapper` the mapper to use to generate the mapping molA, molB : :class:`.SmallMoleculeComponent` molecules to map between Returns ------ :class:`.LigandAtomMapping` : the mapping generated by the mapper; errors if there is not exactly one mapping generated """ mappings = list(mapper.suggest_mappings(molA, molB)) if len(mappings) != 1: raise click.UsageError( f"Found {len(mappings)} mappings; this command requires a mapper " "to provide exactly 1 mapping" ) return mappings[0]
cd718e40337e4165f8fe20b9264381e08fb2c152
341,767
def sort_load_list_by_size(load_list): """Given the standard load list return a list ordered by load_set size""" return sorted(load_list, key=lambda t: len(t[2]))
2ec4b799e631dfae7e7dd80e7e13da78287ba999
212,540
def utf8(obj): """ This helper function attempts to decode a string like object to utf-8 encoding. For Python2 it just works. For Python3 a string object does not have decode mehtod whilst a byte object does. """ try: value = obj.decode('utf-8') except AttributeError: value = obj return value
bd02bf79cb1950ee1e494b8f7622a338b4825d99
452,039
def indent(code, level): """ indent code to the given level """ return code.replace("\n", "\n" + (" "*level))
71e1584165cac2ea76ac7a28c2b3368bf6ae9ef1
618,935
import socket def can_resolve(target): """Tries to look up a hostname then bind to that IP address. Args: target: a hostname or IP address as a string Returns: True if the target is resolvable to a valid IP address """ try: socket.getaddrinfo(target, None) return True except socket.error: return False
5d3394e95a94d79bfdd40e7b0ed6370bef35deb8
228,994
import time def format_time_delta(cgxtimestamp): """ Format a CloudGenix timestamp into Human Readable string. :param cgxtimestamp: Timestamp from CloudGenix API :return: """ # cgxtimestamp may be milliseconds or nanoseconds. Drop the digits after the first 10. # this should work until 2264 or so :D cgx_seconds = int(str(cgxtimestamp)[:10]) now_seconds = int(time.time()) seconds = now_seconds - cgx_seconds # sanity check clock sku if seconds < 0: return "<=0s (client clock skew)" days, seconds = divmod(seconds, 86400) hours, seconds = divmod(seconds, 3600) minutes, seconds = divmod(seconds, 60) if days > 1: return f"{days}d{hours}h{minutes}m{seconds}s" elif hours > 0: return f"{hours}h{minutes}m{seconds}s" elif minutes > 0: return f"{minutes}m{seconds}s" else: return f"{seconds}s"
61685bcbaa822722b993c1eae41ccffad7177014
295,927
def get_async_result_silent(result, timeout=None): """ Calls the ``get([timeout])`` method of an `AsyncResult <https://docs.python.org/latest/library/multiprocessing.html#multiprocessing.pool.AsyncResult>`__ object *result* and returns its value. The only difference is that potentially raised exceptions are returned instead of re-raised. """ # noqa try: return result.get(timeout) except Exception as e: return e
1f06fa75db2957583f17edcd3ebb58179da5e6b1
400,830
def _run_autocommit_on(cnx, db_parameters): """Run autocommit on test. Args: cnx: The database connection context. db_parameters: Database parameters. """ def exe(cnx, sql): return cnx.cursor().execute(sql.format(name=db_parameters["name"])) exe( cnx, """ INSERT INTO {name} VALUES(True), (False), (False) """, ) cnx.rollback() res = exe( cnx, """ SELECT COUNT(*) FROM {name} WHERE NOT c1 """, ).fetchone() assert res[0] == 4
636741c934bae0c7791ed7946c87e42600c2b0d2
92,027
def fix_json_fname(fname): """Add JSON suffix to file name if it's missing""" if fname.lower().endswith('.json'): return fname else: return fname + ".json"
dcf343066a36be29333eefc406997f3bed60b938
484,276
def _odd(x: int) -> bool: """Checks if integer is odd""" return x%2 == 1
eaf6d8f6ae2ac5bda2585edf740893ec69772c87
307,664