content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def get_pad_params(desired_size, cur_size): """ Get padding parameters for np.pad function Args: desired_size: int, Desired padded output size cur_size: int, Current size. Should always be less than or equal to cur_size Returns: pad_params: tuple(int), Number of values padded to the edges (before, after) """ assert desired_size >= cur_size # Calculate amount to pad diff = desired_size - cur_size pad_params = (0, diff) return pad_params
d5e69bd9ac14f4b5240cbab3bfa5e66fcd11266a
421,619
def schedd_states(schedd_classad): """ Returns information about the number of jobs in each job state for a schedd :param schedd_classad: classad for schedd to query :return: a dictionary with job states as keys and number of jobs in given state as a value """ return {'Running': schedd_classad['TotalRunningJobs'], 'Idle': schedd_classad['TotalIdleJobs'], 'Held': schedd_classad['TotalHeldJobs'], 'Removed': schedd_classad['TotalRemovedJobs']}
544ef96fa6e0e4d13d3972cca21e201e58292144
342,073
import fnmatch def matchPattern(string, pattern): """ > matchPattern("nameTest1", "nameTest") False > matchPattern("nameTest1", "nameTest*") True """ return fnmatch.fnmatch(string, pattern)
ff8bf4ee28af701139e9e4b900171338c6a354d1
692,816
import math def divisors(n, non_trivial=False): """Return the set of divisors of n. Setting non_trivial=True, returns all divisors except for 1 & n.""" result = set() for i in range(1, int(math.sqrt(n)) + 1): quot, rem = divmod(n, i) if rem == 0: result.update({i, quot}) res = sorted(list(result)) if non_trivial: return res[1:-1] else: return res
a545540ca284427ed5a95b18ee7671e9f53aac85
273,292
def get_unique_level_values(index): """ Returns the unique values for all levels of an Index object, in the correct order. Parameters ---------- index : pandas.core.index/ pandas.core.index.MultiIndex Index object Returns ------- list of lists """ return [ index.get_level_values(i).unique().tolist() for i in range(len(index.levels))]
bf227be082ffd8c01355ddb319fc5c750968883a
159,754
def pt2mm(value): """Converts given value in points to millimeters. Args: value (int): value in points. Returns: (float): value, in millimeters. """ return value * 0.352777778
530078d1cae9bda7fc3cb2ddde3411346dae37d7
561,875
import hashlib def get_hash_of(plain_text, algo): """ Getting hash value of plain text with a specific hashing algo. Args: plain_text ([string]) : [word that was readed from the word list] algo ([string]) : [hashing algorithm] Returns: hashed([string]) : [the hashed value of plain text with passed hashing algo.] """ hashed = hashlib.new(algo) # passing the hashing algo. hashed.update(plain_text.encode()) # get the hash value with given hash algo. return hashed.hexdigest()
e9ba9e1e6eed5ef3ce8c190e899f44c1c834e38e
454,118
def _change_dict_key(the_dict, old_key, new_key, default=None): """ Changes a dictionary key name from old_key to new_key. If old_key doesn't exist the value of new_key becomes and empty dictionary (or the value of `default`, if set). """ if default is None: default = dict() v = the_dict.get(old_key, default) if new_key not in the_dict: the_dict[new_key] = v if old_key in the_dict: del the_dict[old_key] return the_dict
79dfe82bf7597e714d4a3234ff3694c194fd5726
273,431
def perform_column(args, src, target): """Execute xSniper in column mode. In this mode, xSniper add a single or several columns to an existing CSVFile (src) from another one (target). Args: args: docopt dictionnary. src: CSVFile object containing <src.csv>. target: CSVFile object containing <target.csv> Returns: src with some columns from target. """ if args['-a'] or args['--all']: columns = target.df_content elif len(args['<target-header>']) > 1: columns = target.get_columns(args['<target-header>']) else: columns = target.get_single_column(args['<target-header>']) src.add_columns(columns) return src
c03bc081a7f71e7a2a12304ba9eb9271028865fb
148,288
def next_pow_two(max_sent_tokens): """ Next power of two for a given input, with a minimum of 16 and a maximum of 512 Args: max_sent_tokens (int): the integer Returns: int: the appropriate power of two """ pow_two = [16, 32, 64, 128, 256, 512] if max_sent_tokens <= pow_two[0]: return pow_two[0] if max_sent_tokens >= pow_two[-1]: return pow_two[-1] check = [max_sent_tokens > j for j in pow_two] idx = check.index(False) return pow_two[idx]
65cbd30b270d976f8bbf1613582e2799169e26e8
251,518
def format_action(action): """Format request action.""" return '<name>{0}</name>'.format(action)
71871fa855310f7af5ca03f681917fa3962709e9
58,348
from datetime import datetime def unrets_date(rets_date): """ Converts a RETS date (ISO 8601 format) into a Python datetime :param rets_date: a RETS/ISO 8601 date :type rets_date: str :rtype: datetime.datetime :return: rets_date as a Python datetime """ # For datetimes with microseconds return datetime.strptime(rets_date, '%Y-%m-%dT%H:%M:%S.%f')
9f8cae6880ac4d2f285eff856db09da0a39ec4ee
17,946
def _get_all_cnames(gi): """ Returns a set of all constraints used by a given gi (generator_info - represents a single NT) """ cnames = [] for rule in gi.parser_output.instructions: cnames.extend(list(rule.xed3_constraints.keys())) return set(cnames)
8235144d221aded5623b5b76a72b036f6f6082fd
307,502
from pathlib import Path def read_python(path): """Read a Python file. Parameters ---------- path : str or Path Returns ------- metadata : dict A dictionary containing all variables defined in the Python file (with `exec()`). """ path = Path(path) if not path.exists(): # pragma: no cover raise IOError("Path %s does not exist.", path) contents = path.read_text() metadata = {} exec(contents, {}, metadata) metadata = {k.lower(): v for (k, v) in metadata.items()} return metadata
6cac66e86a566b9181f1ae15be3c2eba879083d6
223,278
def htmlDataset(dataset = {}, title=""): """ Utility function to generate HTML Table from a Dataset""" content = "<TABLE cellpadding=5> <caption align=top>" + title + " </caption><TR></TR><TR></TR>" for row in dataset['DATA']: content += "<TR>" for col in dataset['DATA'][row]: if row==0: content += "<TH align=left bgcolor=#BBDB88>" else: content += "<TD align=left bgcolor=#FFFAB2>" content += col if row==0: content += "</TH>" else: content += "</TD>" content += "</TR>" content += "</TABLE>" return content
871c44487fadacdb8c255389a57065b0887df1c6
49,075
def get_project_url(pk): """ Return project's page url. """ return "/projects/" + str(pk) + "/tasks"
753150730f07543d6be69da1356483097d9d395b
188,627
def nickname_commands(*command_list): """Decorate a function to trigger on lines starting with "$nickname: command". :param str command_list: one or more command name(s) to match (can be regular expressions) This decorator can be used to add multiple commands to one callable in a single line. The resulting match object will have the command as the first group; the rest of the line, excluding leading whitespace, as the second group; and parameters 1 through 4, separated by whitespace, as groups 3-6. Example:: @nickname_commands("hello!") # Would trigger on "$nickname: hello!", "$nickname, hello!", # "$nickname hello!", "$nickname hello! parameter1" and # "$nickname hello! p1 p2 p3 p4 p5 p6 p7 p8 p9". @nickname_commands(".*") # Would trigger on anything starting with "$nickname[:,]? ", # and would never have any additional parameters, as the # command would match the rest of the line. """ def add_attribute(function): if not hasattr(function, 'nickname_commands'): function.nickname_commands = [] for cmd in command_list: if cmd not in function.nickname_commands: function.nickname_commands.append(cmd) return function return add_attribute
8a11fcf36a8dfdda00edbe0014f662b7a3e0746c
520,777
def FlagIsExplicitlySet(args, flag): """Return True if --flag is explicitly passed by the user.""" # hasattr check is to allow the same code to work for release tracks that # don't have the args at all yet. return hasattr(args, flag) and args.IsSpecified(flag)
b6b75bfe4927bc0aa1cab2d6a755c310626ccbf3
489,058
def trim_exams(image_channels): """ Trims the larger exam to match length. Receives arbitrary list of (num_frames, height, width) as input. The number of frames between the exams will be trimmed to match the dimensionality of the smallest exams. """ min_frames = min([channel.shape[0] for channel in image_channels]) return [channel[:min_frames] for channel in image_channels]
af466a19cae3ccf066ad5bb93dda112d81f19c6d
90,402
def ntToPosixSlashes(filepath): """ Replaces all occurrences of NT slashes (\) in provided filepath with Posix ones (/) >>> ntToPosixSlashes('C:\\Windows') 'C:/Windows' """ return filepath.replace('\\', '/') if filepath else filepath
528caf6ee8e1514fd73cd9b65006d797331eadd9
56,145
def percent_decrease(v1, v2): """Compute percent difference. old_value (v1) - new_value (v2) ------------------------------- * 100% | old_value (v1) | """ return (v2 - v1) / (abs(v1) + 1e-10) * 100
f21f4362e300dded9745597d9c2df005b9eea5fb
373,475
def is_java_file(file_name): """ Check if a file name has a .java ending :param file_name: file name to check :type file_name: str :return: True if file has .java ending :rtype bool """ return file_name.endswith(".java")
dac9d4a55934e172d602f79bfc108ffbf888ea1c
398,331
import json def get_mass_calibration(calibration_file, volume): """ This function computes the mass given a calibration curve: mass = a / vol. Parameters ---------- calibration_file: str filename of the json file with the slope of the calibration curve volume: float volume of the blob from GC Image Returns -------- mass_IS: float mass of the compound used as refernece. """ with open(calibration_file) as fp: data = json.load(fp) slope = float(data["slope"]) mass_IS = volume / slope return mass_IS
ace1774f5c9adda33ceb9c1edfa1cb58a066ec8a
564,696
def strHypInd(i, j): """ Returns string identifier for a hyperplane of ith and jth observation, regardless of ordering. """ if i > j: i, j = j, i return str(i) + '-' + str(j)
f66c462d8ba0ca62878cfa92db7ce7d06bf47024
693,538
def split_duration(total_duration, target_duration, split_last=False): """ Splits total_duration into parts :param total_duration: total duration of file :param target_duration: target duration of parts :param split_last: if True, last part may be less than others, otherwise it can be more that others :return: array of object with fields - start - start of part - end - end of part - duration - duration of part Note that the last part's duration will probably be not equal target_duration """ res = [] start = 0 pre_end = total_duration - target_duration * 2 total_count = total_duration // target_duration if split_last: pre_end = pre_end + target_duration while start < pre_end: res.append({'start': start, 'end': start + target_duration, 'duration': target_duration}) start = start + target_duration if split_last: res.append({'start': start, 'end': start + target_duration, 'duration': target_duration}) res.append({'start': start + target_duration, 'end': total_duration, 'duration': total_duration - target_duration * (total_count - 1)}) else: res.append({'start': start, 'end': total_duration, 'duration': total_duration - target_duration * (total_count - 2)}) return res
b07a264c79629e567947c1e1be4d598f1c274080
138,738
from typing import Sequence from typing import Optional def _cast_covariates(listlike: Sequence[str]) -> Optional[list]: """We are unsure if scVI works with duck typing, so we explicitly want to cast a given sequence to a list (or None). Moreover, scVI doesn't accept empty lists -- we need to cast empty list to None. Args: listlike: None or anything that behaves like a list (and can be explicitly casted) Returns: None, if `listlike` is None or is empty, or a list with the same elements as `listlike`, if it's nonempty """ if listlike is None or len(listlike) == 0: return None else: return list(listlike)
afaa40bc3cf8174733e0de6ad148e6099a60b345
267,406
def split_on_attributes(keys,rows): """ Given a tuple of column names 'keys', and a collection of dict-like rows, returns a dictionary where every unique value as defined by keys is a key in the dictionary, and the value under that key is a list containing the corresponding rows. """ ret = {} for row in rows: key = tuple([row[k] for k in keys]) vals = ret.get(key) if vals is None: vals = [] ret[key] = vals vals.append(row) return ret
a4dd36dcec591ab77709678995f483a6e00e863e
507,856
def avg(num_1, num_2): """computes the average of two numbers""" return (num_1 + num_2) / 2.0
b2ffc52e06f5b142d66aca7fe04cd89d81cfeb02
131,905
def start_stop_to_one_based(dictionary): """Convert start/stop indices given by the SNP metadata to int instead of str, and add one to make them 1 based instead of 0 based""" for lists in dictionary: for each_list in range(len(dictionary[lists])): for start_stop in range(0, 2): dictionary[lists][each_list][start_stop] = (int(dictionary[lists][each_list][start_stop]))+1 return dictionary
add9e95db7bedb8e2f8bf334cbd4644a10fe043f
295,036
def split_network_line(line): """Parses line of /proc/virtual/<xid>/cacct for network usage. The cacct file has a header followed by usage counts for multiple protocols. Header (provided for context): Type recv #/bytes send #/bytes fail #/bytes Each protocol's usage counts are formatted like this example: INET: 32/9909 43/253077 0/0 Args: line: str, a line of text from cacct file. Returns: 4-tuple of int: representing ('recv' syscalls, received octets, 'send' syscalls, sent octets). """ fields = line.strip().split() receive_field = fields[1] transmit_field = fields[2] (recv_calls, rx_octets) = receive_field.split('/') (send_calls, tx_octets) = transmit_field.split('/') return (int(recv_calls), int(rx_octets), int(send_calls), int(tx_octets))
9f8899595da8aba6d4cd29ea2ada67cfeb6b5a14
606,721
def _summarize_accessible_fields(field_descriptions, width=40, section_title='Accessible fields'): """ Create a summary string for the accessible fields in a model. Unlike `_toolkit_repr_print`, this function does not look up the values of the fields, it just formats the names and descriptions. Parameters ---------- field_descriptions : dict{str: str} Name of each field and its description, in a dictionary. Keys and values should be strings. width : int, optional Width of the names. This is usually determined and passed by the calling `__repr__` method. section_title : str, optional Name of the accessible fields section in the summary string. Returns ------- out : str """ key_str = "{:<{}}: {}" items = [] items.append(section_title) items.append("-" * len(section_title)) for field_name, field_desc in field_descriptions.items(): items.append(key_str.format(field_name, width, field_desc)) return "\n".join(items)
164e4bfc3fc1952bae8b3c80ca1fe738a6d53e14
351,125
def shorten_dfs(dfs, plot_start=None, plot_end=None): """Shorten all incidence DataFrames. All DataFrames are shortened to the shortest. In addition, if plot_start is given all DataFrames start at or after plot_start. Args: dfs (dict): keys are the names of the scenarios, values are the incidence DataFrames. plot_start (pd.Timestamp or None): earliest allowed start date for the plot plot_start (pd.Timestamp or None): latest allowed end date for the plot Returns: shortened (dict): keys are the names of the scenarios, values are the shortened DataFrames. """ shortened = {} start_date = max(df.index.min() for df in dfs.values()) end_date = min(df.index.max() for df in dfs.values()) if plot_start is not None and plot_start < end_date: start_date = max(plot_start, start_date) if plot_end is not None and plot_end > start_date: end_date = min(plot_end, end_date) for name, df in dfs.items(): shortened[name] = df.loc[start_date:end_date].copy(deep=True) return shortened
a059d0a27ec521d816679aeba1e5bf355cb5dc16
672,201
def _remove_trailing_nones(array): """ Trim any trailing Nones from a list """ while array and array[-1] is None: array.pop() return array
f5f76c6d3ac2336c3345cfb20067882252ef19ae
276,320
def is_file(value: str): """ Return True if value is an direction to a file. False otherwise. """ return value.startswith("file")
360ba82e8c4bb25bba2bee96f6ce205b231647f7
348,445
def bearing_flipped(dir1, dir2): """ direction-flipped check. Return true if dir2 travels in opposite direction of dir1. """ return (0, 0) == (dir1.y + dir2.y, dir1.x + dir2.x)
ae13aab868e1a0cd61f6ed7b32803da63b0bb816
176,737
from typing import List def build_graph(order: int, edges: List[List[int]]) -> List[List[int]]: """Builds an adjacency list from the edges of an undirected graph.""" adj = [[] for _ in range(order)] for u, v in edges: adj[u].append(v) adj[v].append(u) return adj
86bdd0d4314777ff59078b1c0f639e9439f0ac08
660
def xor(a, b): """Computes the exclusive or between two blocks of 16 bytes""" s = [] for i in range(0, 16): s.append(a[i] ^ b[i]) return s
ae5adba23df1b979ac6d69c436e6dd1ec527ab6a
97,864
def aspectRatio(size, maxsize=None, maxw=None, maxh=None): """scale a size tuple (w,h) to - maxsize (max w or h) - or max width maxw - or max height maxh.""" w, h = size denom = maxcurrent = 1 if maxsize: maxcurrent = max(size) denom = maxsize elif maxw: maxcurrent = w denom = maxw elif maxh: maxcurrent = h denom = maxh if maxcurrent == denom: return size elif maxsize == 0: return size ratio = maxcurrent / float(denom) neww = int(round(w / ratio)) newh = int(round(h / ratio)) return neww, newh
e537c02f4b1f413b7c7b17ec8108b1a6157c17fe
451,162
def calculate_number_of_validation_images(n): """ Calculate number of validation images that should be selected, given an image pool of size n """ return n // 6
7199acceca2c6026a926991871d52c6fc864d124
225,500
def test_chess_cell(x, y): """ Source https://pythontutor.ru/lessons/ifelse/problems/chess_board/ Condition Two checkerboard squares are set. If they are painted the same color, print the word YES, and if in different colors - then NO. The program receives four numbers from 1 to 8 each, specifying the column number and   line number first for the first cell, then for the second cell. """ if x % 2 != 0 and y % 2 != 0: # False if cell white, True if cell black return True if x % 2 != 0 and y % 2 == 0: return False if x % 2 == 0 and y % 2 != 0: return False else: return True
2f5d597fa869949ba0ca205c799aa3f98a2fa75d
683,330
async def resolve_user(_root, info, **args): """Resolver function for fetching a user object""" return await info.context["registry"].get(args["id"])
ee01506eedae0c28a3ae6ae5df9593909f46c109
235,984
def trimMatch(x, n): """ Trim the string x to be at most length n. Trimmed matches will be reported with the syntax ACTG[a,b] where Ns are the beginning of x, a is the length of the trimmed strng (e.g 4 here) and b is the full length of the match EXAMPLE: trimMatch('ACTGNNNN', 4) >>>'ACTG[4,8]' trimMatch('ACTGNNNN', 8) >>>'ACTGNNNN' """ if len(x) > n and n is not None: m= x[0:n] + '[' + str(n) + ',' + str(len(x)) + ']' else: m= x return(m)
43bfbcfa0286646fae75c73bccb245a9f113131e
77,611
def translate_none_or_blank_to_empty(my_string): """ return "" if blank or empty or None, string otherwise """ if my_string and my_string.strip(): return my_string.strip() return ""
84af4da22dbcdbacc0570d80563b36d024b63ffc
177,094
from typing import List from pathlib import Path from typing import Set def get_cl_tags(files: List[Path]) -> List[str]: """ Come up with descriptive tags given a list of files changed. For each path, use the first rule that applies in the following order: 1) Pick the path component right after the last "lib", "bin", "drivers", or "devices". 2) If the path begins with "src", then - if the third path component is "tests" then pick the fourth component - pick the third path component, e.g. src/developer/shell -> shell 3) If the path begins with "zircon", then pick the path component after either "ulib" or "utest", e.g. zircon/system/ulib/fs-pty/test/service-test.cc -> fs-pty Example: get_cl_tags([ "src/lib/loader_service/loader_service_test.cc", "src/lib/loader_service/loader_service_test_fixture.cc", ]) == ["loader_service"] """ def get_tag(p: Path) -> str: tag: str = "" for part, next_part in zip(p.parts, p.parts[1:]): if ( part == "lib" or part == "bin" or part == "drivers" or part == "devices" ): if next_part != "tests" and not next_part.endswith(".cc"): tag = next_part if tag != "": return tag if p.parts[0] == "build": return "build" if p.parts[0] == "src": if len(p.parts) >= 3: if p.parts[2] == "tests" and not p.parts[3].endswith(".cc"): return p.parts[3] return p.parts[2] if p.parts[0] == "zircon": for part, next_part in zip(p.parts, p.parts[1:]): if part == "ulib" or part == "utest": return next_part raise RuntimeError(f"Could not infer tags from path {p}") tags: Set[str] = set() for file in files: tags.add(get_tag(file)) return sorted(list(tags))
bb274802e7e9f00af3c76c36b8ba48b085266db2
648,466
import torch def rmsprop(opfunc, x, config, state=None): """ An implementation of RMSprop ARGS: - 'opfunc' : a function that takes a single input (X), the point of a evaluation, and returns f(X) and df/dX - 'x' : the initial point - 'config` : a table with configuration parameters for the optimizer - 'config['learningRate']' : learning rate - 'config['alpha']' : smoothing constant - 'config['epsilon']' : value with which to initialise m - 'config['weightDecay']' : weight decay - 'state' : a table describing the state of the optimizer; after each call the state is modified - 'state['m']' : leaky sum of squares of parameter gradients, - 'state['tmp']' : and the square root (with epsilon smoothing) RETURN: - `x` : the new x vector - `f(x)` : the function, evaluated before the update """ # (0) get/update state if config is None and state is None: raise ValueError("rmsprop requires a dictionary to retain state between iterations") state = state if state is not None else config lr = config.get('learningRate', 1e-2) alpha = config.get('alpha', 0.99) epsilon = config.get('epsilon', 1e-8) wd = config.get('weightDecay', 0) # (1) evaluate f(x) and df/dx fx, dfdx = opfunc(x) # (2) weight decay if wd != 0: dfdx.add_(wd, x) # (3) initialize mean square values and square gradient storage if 'm' not in state: state['m'] = x.new().resize_as_(dfdx).zero_() state['tmp'] = x.new().resize_as_(dfdx) # (4) calculate new (leaky) mean squared values state['m'].mul_(alpha) state['m'].addcmul_(1.0 - alpha, dfdx, dfdx) # (5) perform update torch.sqrt(state['m'], out=state['tmp']).add_(epsilon) x.addcdiv_(-lr, dfdx, state['tmp']) # return x*, f(x) before optimization return x, fx
4b0dd89ed67cf6403178df13e0b3a0f51f8a7b4d
327,551
import asyncio def async_return(result): """Mock a return from an async function.""" future = asyncio.Future() future.set_result(result) return future
8dcf55b5fc1ded019c9bec668d17f919215b09c7
680,251
import torch def swish(x: torch.Tensor, beta: torch.Tensor) -> torch.Tensor: """Swish activation function from arXiv:1710.05941 Args: x (torch.Tensor): input tensors can be either 2D or 4D. beta (torch.Tensor): swish-beta can be a constant or a trainable params. Returns: torch.Tensor: output tensors """ if len(x.size()) == 2: return x * torch.sigmoid(beta[None, :] * x) else: return x * torch.sigmoid(beta[None, :, None, None] * x)
8fd639ad9e54394e1d1499bb2dc6385d843b87a2
577,548
import random def single_point(parent1, parent2, locus=None): """Return a new chromosome created with single-point crossover. This is suitable for use with list or value encoding, and will work with chromosomes of heterogenous lengths. Args: parent1 (List): A parent chromosome. parent2 (List): A parent chromosome. locus (int): The locus at which to crossover or ``None`` for a randomly selected locus. Returns: List[List]: Two new chromosomes descended from the given parents. """ if len(parent1) > len(parent2): parent1, parent2 = parent2, parent1 if locus is None: locus = int(random.triangular(1, len(parent1) / 2, len(parent1) - 2)) child1 = parent1[0:locus] + parent2[locus:] child2 = parent2[0:locus] + parent1[locus:] return [child1, child2]
ecd8cce55aaf6b10e445c36dfe052f1d00f22698
518,144
def number_to_binary_array(n, r): """ input integer output binary representation of integer as values in an array that can hold numbers less than 2^r """ res = [int(i) for i in bin(n)[2:]] while len(res) < r: res = [0] + res return res
2afbc382fc9ab5206527f4a2c8b7317f6177e7d5
386,518
def get_path(data, path): """ Fetch a value in a nested dict/list using a path of keys/indexes If it fails at any point in the path, None is returned example: get_path({'x': [1, {'y': 'result'}]}, ['x', 1, 'y']) """ current = data for p in path: try: current = data[p] except Exception: return None return current
e4c978b70dea9b8c291a9003a7e55bbd36616869
107,819
import json def response(message, status_code): """ Shape a valid http response from a message and status code """ output = { 'statusCode': str(status_code), 'headers': { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*' } } if message: output['body'] = json.dumps(message) return output
e8ddc944a17443401682c3cdebe7caa605ce25aa
338,070
def api_url(domain): """Returns the Freshbooks API URL for a given domain. >>> api_url('billing.freshbooks.com') 'https://billing.freshbooks.com/api/2.1/xml-in' """ return "https://%s/api/2.1/xml-in" % (domain, )
4507564b1594e982963c80c27a55cafb4ae83652
522,025
def unique_colname(suggested, existing): """Given a suggested column name and a list of existing names, returns a name that is not present at existing by prepending _ characters.""" while suggested in existing: suggested = '_{0}'.format(suggested) return suggested
21e722e0bb18991564a2e4fa6d9fb870166a24a3
377,834
import re def regex_overlap(text, regex): """ for a list of tokens in text and a regex, match it in the tokens and return a list of booleans denoting which tokens are a part of the match """ overlap = [False for t in text] for m in re.finditer(regex, ' '.join(text)): begin, end = m.span() i = 0 for j, t in enumerate(text): if begin <= i < end or begin <= i+len(t) < end: overlap[j] = True i += len(t) + 1 return overlap
77fbb138cb98f2e4f7a6d14c932c17666cac6c1e
688,024
from bs4 import BeautifulSoup def read_xml_conf(filename): """ Read configuration file from local filesystem. The configuration file should be in valid XML format. :param filename: The XML file which has the configuration options. :returns: Configuration loaded from XML configuration file. :raises: None """ with open(filename) as f: content = f.read() conf = BeautifulSoup(content, features='lxml') return conf
fca464152c1bd192ca4b5b578ddfb0c34f309e0a
311,175
import math def _sine_sample(amp, freq, rate, i) -> float: """ Generates a single audio sample taken at the given sampling rate on a sine wave oscillating at the given frequency at the given amplitude. :param float amp The amplitude of the sine wave to sample :param float freq The frequency of the sine wave to sample :param int rate The sampling rate :param int i The index of the sample to pull :return float The audio sample as described above """ return float(amp) * math.sin(2.0 * math.pi * float(freq) * (float(i) / float(rate)))
223a48fcbea6f9ef98418d9a3d161f37f64f2a05
129,029
import math def compute_idfs(documents): """ Given a dictionary of `documents` that maps names of documents to a list of words, return a dictionary that maps words to their IDF values. Any word that appears in at least one of the documents should be in the resulting dictionary. """ idfs = dict() # Get unique vocabulary unique = [] for vocab in documents.values(): unique.extend(set(vocab)) unique = set(unique) # calculate idf's' num_docs = len(documents) for word in unique: doc_count = 0 for vocab in documents.values(): if word in vocab: doc_count += 1 idfs[word] = math.log(num_docs / doc_count) return idfs
710b068c957c0ea662e84604d6008ac257c4079b
456,694
def get_tag(ec2_instance, tag_name): """ :param ec2_instance: a boto3 resource representing an Amazon EC2 Instance. :param tag_name: A string of the key name you are searching for. This method returns None if the ec2 instance currently has no tags or if the tag is not found. If the tag is found, it returns the tag value. """ if ec2_instance.tags is None: return None for tag in ec2_instance.tags: if tag['Key'] == tag_name: return tag['Value'] return None
481661a3fa14005b932d78c2065327e2edc3c7cf
288,688
import re def format_date(date): """ format a date object in month/day/year format, but convert dates like: 01/02/2013 to: 1/2/2013 """ return re.sub("\\b0(\\d)", "\\1", date.strftime("%m/%d/%Y"))
9a078220cb59aa33854c38ebe63a4c72ba7f7e57
359,392
def ask_user(question): """ Simple yes/no screen for user, where the "question" string is asked. Takes y(yes)/n(no) as valid inputs. If no valid input is given, the question is asked again. Args: question (str): What question should be asked for the y/n menu Returns: (bool): true/false, answer to question """ check = str(input(f"{question} (Y/N): ")).lower().strip() try: if check[0] in ["y", "yes"]: return True elif check[0] in ["n", "no"]: return False else: print('Invalid Input') return ask_user(question) except Exception as error: print("Please enter valid inputs") print(error) return ask_user(question)
6c30dc5c3aed8a0e436bb1fd6e8ed6372c1ddb34
56,498
def build_reagent_vectors(portion_reagents, portion_chemicals): """Write the reagents in a vector form, where the vectors live in the complete portion basis The basis is spanned by all chemicals in all reagents in the portion (sans solvent) :param portion_reagents: a list of all reagents in the portion :param portion_chemicals: a list of all chemical names in the poriton :param rdict: :return: """ # find the vector representation of the reagents in concentration space reagent_vectors = {} if isinstance(portion_reagents, dict): portion_reagents = [v for v in portion_reagents.values()] for reagent in portion_reagents: name = 'Reagent{} (ul)'.format(reagent.name) comp_dict = reagent.component_dict vec = [comp_dict.get(elem, 0) for elem in portion_chemicals] reagent_vectors[name] = vec return reagent_vectors
f8d12ef19e84111dde7a40fa5d1113b4f423ab6f
639,745
def marathon_app_id_to_mesos_dns_subdomain(app_id): """Return app_id's subdomain as it would appear in a Mesos DNS A record. >>> marathon_app_id_to_mesos_dns_subdomain('/app-1') 'app-1' >>> marathon_app_id_to_mesos_dns_subdomain('app-1') 'app-1' >>> marathon_app_id_to_mesos_dns_subdomain('/group-1/app-1') 'app-1-group-1' """ return '-'.join(reversed(app_id.strip('/').split('/')))
a0e9e755aa4987fa0e35dfc238f8018c2ee89e71
582,831
import time import calendar def utc2timestamp(utc_datetime=None): """ Convert utc datetime to seconds since epoch (UTC) """ if utc_datetime is None: return int(time.time()) return calendar.timegm(utc_datetime.utctimetuple())
1ea1e512290a94937c047c35e1fa5b8f7242d358
329,557
from typing import Callable def save_func_and_its_meta(func: Callable) -> Callable: """Decorator for a wrapper function. Updates a wrapper function to look like the wrapped function (func). To use: add decorator @save_func_and_its_meta(func) before the wrapper function declatation. Args: func: original function that we want to save. Returns: inner function that saves __name__ and __doc__ attributes of recieved func's and func itself. """ def inner(wrapper: Callable) -> Callable: """Saves attributes __name__ and __doc__ of recieved func, saves this func in the attribute __original_func of the wrapper function. Args: wrapper: the decorated wrapper function. Returns: wrapper function with changed attributes. """ wrapper.__doc__ = func.__doc__ wrapper.__name__ = func.__name__ wrapper.__original_func = func # type: ignore return wrapper return inner
85107e6dcd79ec576906d4e5390cf54ef11a5309
577,981
def hexd(n): """Return hex digits (strip '0x' at the beginning).""" return hex(n)[2:]
7370225183ffb7ebcad1c2622917f06fdca83bbb
69,231
import json def _get_session(gc, tale=None, version_id=None): """Returns the session for a tale or version""" session = {'_id': None} if tale is not None and tale.get('dataSet') is not None: session = gc.post( '/dm/session', parameters={'taleId': tale['_id']}) elif version_id is not None: # Get the dataset for the version dataset = gc.get('/version/{}/dataSet'.format(version_id)) if dataset is not None: session = gc.post( '/dm/session', parameters={'dataSet': json.dumps(dataset)}) return session
1de83816b4d8ca9b6a657de81fc5fa3e04d188df
136,145
def search_songs_db(term, db): """ Performs a search of the songs database. Searches all fields' values, as well as all lyrics' values. The search performed is strictly a sub-string search; capitalization must match in order for the search to be done right. :param term: Search term. :type term: `str` :param db: The database object to search. In this project, we use TinyDB for simplicity and portability (it's very, very small). :type db: `tinydb.TinyDB()` :returns: one of `filtered_songs` or `all_songs`, the search results. :rtype: `iterable(dict)` """ filtered_songs = list() all_songs = db.all() if term: for song in all_songs: for k, v in song.items(): if k == "lyrics": for sec, txt in v.items(): if txt and term in txt and song not in filtered_songs: filtered_songs.append(song) else: if v and term in v and song not in filtered_songs: filtered_songs.append(song) return filtered_songs else: return all_songs
cc73dd895e1088b403f1640347b0132d69862ee2
141,390
def _get_arg(a, x): """Get index of array a that has the closest value to x""" return abs(a-x).argmin()
02efb8d6ca4ea46ea8bdc4cdcfcedc121b8ec2d7
166,022
def celsius_to_fahrenheit(celsius): """ Celsius to Fahrenheit :param celsius: Degrees Celsius :return: Fahrenheit """ return float(celsius) * 9.0/5.0 + 32
8ff05c95a1f8029a7fc73e5b834bcf9d614d7922
623,778
import requests def download_url(url): """Downloads a URL file as a browser.""" headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0'} r = requests.get(url, headers=headers, stream=False) return r.content.decode("utf-8")
e62cf1016410fd103d027f423f0c42860d239133
405,108
import asyncio def _async_callback(coro): """Wrap an asynchronous callback to be called from a synchronous context""" return lambda val: asyncio.create_task(coro(val))
c19b9543711a7edfba70bc61abcf435e977f5486
128,831
import json def to_pretty_json(obj): """Encode to pretty-looking JSON string""" return json.dumps(obj, sort_keys=False, indent=4, separators=(',', ': '))
b325c4e6e150e089da1d9027299831bd1576e57f
5,039
import csv from io import StringIO import requests def csv_reader(url): """ Reads a remote CSV file. """ return csv.reader(StringIO(requests.get(url).text))
5d8113d19a7cc2e69c6fff7b483c44c8aad078d4
461,104
def Linspace(start, stop, n): """Makes a list of n floats from start to stop. Similar to numpy.linspace() """ return [start + (stop-start) * float(i)/(n-1) for i in range(n)]
02caf1d688568c671464d8c562580f1f1976a4c3
320,967
import re def create_word_tokens(expr): """ Take raw expr after input and create tokens for AST :param expr: str :return: [[]] >>> create_word_tokens('a and (b xor x) or c') ['a', 'and', '(', 'b', 'xor', 'x', ')', 'or', 'c'] """ def complex_repl(char): """char: re.Math object """ variants = { '(': '( ', ')': ' )' } return variants[char.group()] tokens = re.sub(r'[(]|[)]', complex_repl, expr).split() return tokens
9fa802e02370dce0f9e965b703b301d223df8e08
344,118
def commute(A, B): """ Return the commutator of two operators""" return A*B - B*A
3edba7e68fe372bb619990b0b830d5e9f96e1e77
601,298
def add(num, hist): """ Добавление суммы покупки и ее наименования покупки :param num: количество наличных денег :param hist: список покупок :return: измененное количество наличных денег и список покупок """ sale = input('Введите сумму покупки: ') # ввод суммы покупки while not sale.replace('.', '', 1).isdigit(): # пока ввод пользователя с убиранием точек не цифры sale = input('Введите сумму покупки: ') # снова ввести суммы sale = float(sale) # перевод суммы в дробный тип данных if sale > num: # если сумма покупки больше, чем сумма наличных денег print('Сумма покупки больше наличных денег') else: # иначе name = input('Введите название покупки: ') # ввод название покупки hist.append((name, sale)) # добавление названия и суммы покупки в архив истории num -= sale # вычитание суммы покупки из наличных денег return num, hist
94c6b4ed6db8cb7bf3bbba114453c67056c0b299
398,735
def get_reviews_for_business(bus_id, df): """ INPUT: business id, pandas DataFrame OUTPUT: Series with only texts For a given business id, return the review_id and text of all reviews for that business. """ return df.text[df.business_id==bus_id]
f2f3f6188752d436b20337f5e299d070f590f496
312,245
def list2str(ll): """convert list into string with seperator ',' Parameters ---------- ll : list the input list or numpy ndarray """ string = '' for i in ll: string += str(i)+',' return string[:-1]
adc9108529fefa68a7e4ec411825cde7b883f0d2
483,948
import torch def box_iou(box_a, box_b): """ Arguments: boxe_a (Tensor[N, 4]) boxe_b (Tensor[M, 4]) Returns: iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in box_a and box_b """ lt = torch.max(box_a[:, None, :2], box_b[:, :2]) rb = torch.min(box_a[:, None, 2:], box_b[:, 2:]) wh = (rb - lt).clamp(min=0) inter = wh[:, :, 0] * wh[:, :, 1] area_a = torch.prod(box_a[:, 2:] - box_a[:, :2], 1) area_b = torch.prod(box_b[:, 2:] - box_b[:, :2], 1) return inter / (area_a[:, None] + area_b - inter)
055b78b7e03e82fec4c901bd30875d689fcc4ae0
319,352
def get_fullurl_from_abbrev(name): """return the full url address from a abrev name """ return "http://www.aitaotu.com/guonei/%s.html" % name pass
426300c73cb7ce5bcea0f5c27ed7f6faa41fd78d
120,937
def column_to_list(data, index): """ Função que retorna a coluna de uma lista de listas como uma lista Argumentos: data: lista de listas index: posição (coluna) à ser acessada e retornada Retorna: Uma lista com os valores da coluna definida através do argumento index """ return [item[index] for item in data]
6fee91ff0130c6c3c1e8f90fb024537a04cce36c
101,242
def quoteStr(astr, escChar='\\', quoteChar='"'): """Escape all instances of quoteChar and escChar in astr with a preceding escChar and surrounds the result with quoteChar. Examples: astr = 'foo" \bar' quoteStr(astr) = '"foo\" \\bar"' quoteStr(astr, escChar = '"') = '"foo"" \bar"' This prepares a string for output. """ if escChar != quoteChar: # escape escChar astr = astr.replace(escChar, escChar + escChar) # escape quoteChar and surround the result in quoteChar return quoteChar + astr.replace(quoteChar, escChar + quoteChar) + quoteChar
aea3806ec79159905e9bc2775416f296bfb4583c
252,691
import torch def cosine_similarity(x1, x2): """Calculates cosine similarity of two tensor.""" dist = torch.sum(torch.multiply(x1, x2), dim=-1) dist = dist / (torch.linalg.norm(x1, dim=-1) * torch.linalg.norm(x2, dim=-1)) return dist
7506fd6379a7ba604852f4bd9f969c9dfb82f09a
687,333
from typing import List def change_station_dropdown(order_list: List[dict], station_name: str) -> List[dict]: """ This function creates the dropdown options for the 'change station' window. It passes the list of orders and a name of a station. Now a structured output is generated, in which all orders are contained, in which this station occurs. The output has the following structure: [{'label': 'order_name', 'value': 'order_name'}] """ dropdown_opt: List[dict] = [] for order in order_list: if station_name in order['station']: order_name: str = order['name'] dropdown_opt.append({'label': order_name, 'value': order_name}) return dropdown_opt
2018fbfce20d24a86ff3699c004507d1a844758f
425,556
def parse_reaction(line): """Parse line to tuple of product, amount of product and requirements.""" required, producing = line.split(' => ') amount, product = [int(part) if part.isdigit() else part for part in producing.split()] requirements = {} for requirement in required.split(', '): needed, chemical = [int(part) if part.isdigit() else part for part in requirement.split()] requirements[chemical] = needed return product, amount, requirements
a7c59f32ae0400aa043646bb847ec1b252cb62d4
322,849
import zipfile import zlib import logging def create_lambda_deployment_package(srcfile, deployment_package): """Create a Lambda deployment package (ZIP file) :param srcfile: Lambda function source file :param deployment_package: Name of generated deployment package :return: True if deployment package created. Otherwise, False. """ # Create the deployment package with zipfile.ZipFile(deployment_package, mode='w', compression=zipfile.ZIP_DEFLATED, compresslevel=zlib.Z_DEFAULT_COMPRESSION) as deploy_pkg: try: deploy_pkg.write(srcfile) except Exception as e: logging.error(e) return False return True
78f8c55cbb1451cd7e1bdb153401e9326ec51d84
405,470
def _timestamps(soup): """ .. versionadded:: 0.3.0 'Publication' and 'last updated' are two available timestamps. If only one timestamp is listed, the story's update and publication time should be the same. :param soup: Soup containing a page from FanFiction.Net :type soup: bs4.BeautifulSoup class :returns: Tuple where the first item is the publication time and the second item is the update time. :rtype: tuple """ metadata_html = soup.find('span', {'class': 'xgray xcontrast_txt'}) timestamps = metadata_html.find_all(attrs={'data-xutime': True}) # Logic for dealing with the possibility that only one timestamp exists. if len(timestamps) == 1: when_updated = timestamps[0]['data-xutime'] when_published = when_updated else: when_updated = timestamps[0]['data-xutime'] when_published = timestamps[1]['data-xutime'] return when_published, when_updated
5f73dc46fe1ffa60c1164276cc5b87e72c52a0d2
308,355
def run_dqm_and_collect_solutions(dqm, sampler): """ Send the DQM to the sampler and return the best sample found.""" # Initialize the solver print("\nSending to the solver...") # Solve the DQM problem using the solver sampleset = sampler.sample_dqm(dqm, label='Example - Immunization Strategy') # Get the first solution sample = sampleset.first.sample return sample
6cd08620af1a5044570eb87fea105c6c8a532af8
82,750
def read_lines_decimal(read_file): """ Read all lines of a file that are decimal values into a list. :param read_file: File handler for the file to read. :return: List of decimal values. """ # Read all lines with decimal values into list. data_list = [] for line in read_file: # Strip whitespace from line value = str.strip(line) # Try converting to floating point. If successful, add to list of numbers. try: data_list.append(float(value)) # Do nothing if line is not decimal except ValueError: pass return data_list
fbb0fd51b161bc2ec3cf38991b92d4bd55432b22
205,542
def remove_enclosing_dirs(full_path): """ Remove any directories in a filepath If you pass a filename to this function, meaning it doesn't contain the "/" string, it will simply return the input back to you. Args: full_path (str): a Unix-based path to a file (with extension) Returns: filename (string): just the filename (with extension) from the path """ if full_path.find("/") != -1: filename = full_path.split("/")[-1] return filename elif full_path.find("/") == -1: filename = full_path print(" ".join(["Your path", full_path, "is already a filename."])) return filename else: print("Something went wrong. Your path isn't a path or a filename.")
9372a06b75936be39569d7476d59e53b021d7e17
303,793
def create_text(text, hashtags): """Returns a solid string containing the entire text of the posting Parameters: text (string): text of your posting hashtags (list): list of hashtags e.g. from get_random_hashtags() Returns: string that contains the posting """ output = text + '\n.\n.\n.\n.\n' + ' '.join(map(str, hashtags)) return output
155f085ddbf195df6fcfa172eaacd8cd0a2e631f
182,425
def every_pred(*preds): """ Create a function that returns true if all ``preds`` return true against the provided arguments. :param *preds: Predicates. :rtype: Callable[[Any], bool] """ return lambda *a, **kw: all(pred(*a, **kw) for pred in preds)
3a8c2a8738b05557c6a58f8d502c22f5eb8a4779
240,170
def from_gca_linestring(gca_obj): """ Converts LineString GCA to an EGF string. Returns ------- egf_str : str LineString GCA as an EGF string """ if gca_obj.geometry_type != "LS": raise ValueError(f"Expected geometry type to be 'LS' and got '{gca_obj.geometry_type}' instead") features = gca_obj.features egf_str = "" egf_str += str(gca_obj.geometry_type) egf_str += "\n" * 4 egf_str += ', '.join(str(i) for i in gca_obj.headers) for ft in features: egf_str += "\n" * 4 egf_str += ', '.join(str(i) for i in ft[0]) for coord_set in ft[1]: lng, lat, elev = coord_set[:3] egf_str += "\n" egf_str += ', '.join(str(coord) for coord in [lat, lng, elev]) egf_str += "\n" return egf_str
976b59d1cfd8d3ae4620416fcddf5104f8de83ba
632,026
def cleanup(run_dir): """ Cleans up the run directory. """ # Remove param_plots folder if empty histo_dir = run_dir.joinpath('histogram_plots') if histo_dir.is_dir() and not any(histo_dir.iterdir()): histo_dir.rmdir() # If run_dir is empty because there aren't enough good pixels, remove it if run_dir.is_dir() and not any(run_dir.iterdir()): run_dir.rmdir() return None
6c180dc2a3125e2a8c5074e0a5aee42c3473f36d
644,766
def get_src(node): """ Returns src module of node, None if attr not defined """ return hasattr(node, "srcmodule") and getattr(node, "srcmodule") or None
28d37a39df61353eec31248da47572df5a5f2c75
32,781
def list_chunks(l, n): """ Return a list of chunks :param l: List :param n: int The number of items per chunk :return: List """ if n < 1: n = 1 return [l[i:i + n] for i in range(0, len(l), n)]
44ef7903dda3070b58f375f74fd19f2b12ed1455
380,446
def cleanup_frame(frame): """Make the columns have better names, and ordered in a better order.""" frame = frame.rename(columns={"Non- Hispanic white": "White"}) frame = frame.reindex(columns=["Asian", "White", "Hispanic", "Black"]) return frame
fb79f5d5161fe71e0ddc038caf095ecc0db69341
615,911
def _formatDict(d): """ Returns dict as string with HTML new-line tags <br> between key-value pairs. """ s = '' for key in d: new_s = str(key) + ": " + str(d[key]) + "<br>" s += new_s return s[:-4]
be9749e5f69c604f3da95902b595f9086b01baa5
682,989
import torch from typing import Tuple from typing import Union def _dtype_min_max(dtype: torch.dtype) -> Tuple[Union[float, int], Union[float, int]]: """Get the min and max values for a dtype""" dinfo = torch.finfo(dtype) if dtype.is_floating_point else torch.iinfo(dtype) return dinfo.min, dinfo.max
43e94545b785351460a18eeec8853473b6302b7c
403,668