content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def version_to_tuple(v): """Converts a version string into a tuple.""" return tuple(map(int, v.split('.')))
0864249d835c27f6e0a0bf6cd289e310836150ce
438,684
def initials(string, sep = ".", sepAtEnd = True): """Returns the initials of a string""" splitString = string.split(" ") theInitialList = [i[:1].capitalize() for i in splitString] return sep.join(theInitialList)+sep*sepAtEnd
dbbd4547563db0d8f1036eb1355e74d72e0c2796
334,054
def estimate_euler_constant_factorial(number_of_elements=1000): """Estimation of euler's constant using its taylor series.""" euler_constant = 1 factorial = 1. for number in range(1, number_of_elements): factorial *= number euler_constant += 1. / factorial return euler_constant
44226b67bd021b7ab0eb2ae06db69dd902a4e6c2
236,921
def handle_invalid_usage_exception(error): """ Handles global exception fired from this namespace. :param error: exception fired. :return: payload, status code tuple. """ return {'message': error.message}, 400
c412e6705ed5f4237ecdc4ca0d723e938fe1ffdf
145,120
def findDuplicatesFilenames(data, sheet): """Finds duplicated articles in sample table and returns list with their associated docx filenames""" duplicated = data.duplicated(subset=['http']) duplicated_list = duplicated.values.tolist() row_ctr= 0 filenames_to_delete = [] for row in duplicated_list: row_ctr +=1 if row : filename= sheet.cell(row=row_ctr+1, column=10).value filename_to_push = str(filename)+ '.docx' filenames_to_delete.append(filename_to_push) print(f"Filenames to delete are {filenames_to_delete}") return filenames_to_delete;
4b17d07c4c0daf328179af99cf67ba880927f7ca
295,373
def nofirst(l): """ Returns a collection without its first element. Examples -------- >>> nofirst([0, 1, 2]) [1, 2] >>> nofirst([]) [] """ return l[1:]
adf86fe85f8173be173ee94479dac1ceb7a38813
662,704
def get_bounding_box_size(images): """Compute bounding box size given list of images.""" height = max(image.shape[0] for image in images) width = max(image.shape[1] for image in images) return height, width
0ca510a082c92e12cb775dabf16af455f11c0e65
250,682
import torch def db_to_amplitude(x, ref=1.0): """ Decibel-to-amplitude conversion (exponential mapping with base=10) Args: x (Tensor): Input in decibel to be converted ref (float): Amplitude value that is equivalent to 0 decibel Returns: (Tensor): same size of x, after conversion """ power_spec = torch.pow(10.0, x / 10.0 + torch.log10(torch.tensor(ref, device=x.device, requires_grad=False, dtype=x.dtype))) return power_spec.pow(0.5)
9fd14bb5eecf39d7bad15175f9a6969beebee543
540,912
def percent_pos_to_step_num(percent_pos, max_steps): """ Calculate step number from percentage position. Note that 100% means fully up.""" return round((1 - percent_pos) * max_steps)
32bc641a55c0d07970e33b7e604852c24db4f4d0
396,258
def setList(value): """ Sets a list to empty if None. """ if value is None: return [] else: return value
e06225398022cc6e795d13123c2a9c7159ef7871
184,624
def _has_eval_results_call(cls): """Return True if cls has a __call__ decorated with @eval_results """ return getattr(getattr(cls, '__call__', None), '_eval_results', False)
3466ff2594f58d2ee9405e57cdb77bafbf3b9529
676,368
def read_bytes(filename, offset, number): """ Reads specific bytes of a binary file. :param filename: path to file :type filename: string :param offset: start reading at offset :type offset: integer :param number: number of bytes :type number: integer :return: byte string """ with open(filename, "rb") as f: f.seek(offset) byte_string = f.read(number) f.close() return byte_string
c04c7659211f7bfee3d88b27cd014cea303b13c4
675,408
from typing import Tuple def compute_character_attack_speed( frames_per_animation: int, base_animation_length: int, speed_coefficient: float = 1.0, engine_tick_rate: int = 60) -> Tuple[float, float]: """Computes the minimum and maximum character_attack_speed for a certain ability to reach the specified frames per animation breakpoint. Args: frames_per_animation: breakpoint to calculate attack speed values for base_animation_length: animation length of ability speed_coefficient: speed-up scalar of ability engine_tick_rate: server tick rate Returns: Tuple[int, int]: min attack speed to reach frames_per_animation breakpoint and max attack speed to leave frames_per_animation breakpoint. """ _coeff = (base_animation_length - 1) * engine_tick_rate / (speed_coefficient * base_animation_length) min_aps = _coeff / frames_per_animation max_aps = _coeff / (frames_per_animation - 1) return min_aps, max_aps
ab1ca81bd89ca5f4ff0e2068a4df51e09140274a
51,784
def cam_name_to_id(name): """Convert the camera name to camera ID""" if name == "clairton1": return 0 elif name == "braddock1": return 1 elif name == "westmifflin1": return 2 else: return None
15beb275dc156bd82fa86ee962c2cfef37202027
248,788
def any_tagged_cell(nb, tag): """Whether the notebook contains at least one cell tagged ``tag``? Parameters ---------- nb : nbformat.NotebookNode The notebook to introspect tag : str The tag to look for Returns ------- bool Whether the notebook contains a cell tagged ``tag``? """ return any([tag in cell.metadata.tags for cell in nb.cells])
a5645aae183de4dda5bc4610091dbee5e890d0b0
583,006
def polygon_list_to_dict(polygon): """ Returns a dictionary with the list of points of a polygon. Parameters ---------- polygon : `list` A list with the XYZ coordinates of the vertices of a polygon. Returns ------- polygon_dict : `dict` A dictionary of with the points of a polygon. """ return {'points': polygon}
1b99fcd75346f67f5c6029185267db066cde0a51
343,367
def eeCollection(mission): """ Earth Engine image collection name from satellite mission name """ switch = { 'Sentinel2':'COPERNICUS/S2', 'Landsat8':'LANDSAT/LC08/C01/T1_TOA', 'Landsat7':'LANDSAT/LE05/C01/T1_TOA', 'Landsat5':'LANDSAT/LT05/C01/T1_TOA', 'Landsat4':'LANDSAT/LT04/C01/T1_TOA' } return switch[mission]
f49feb311645ce7827db3f252ff3dd19eda22244
273,392
async def clap(text, args): """ Puts clap emojis between words. """ if args != []: clap_str = args[0] else: clap_str = "👏" words = text.split(" ") clappy_text = f" {clap_str} ".join(words) return clappy_text
09865461e658213a2f048b89757b75b2a37c0602
705,785
def count(values): """ Returns a dict of counts for each value in the iterable. """ counts = dict() for v in values: if v not in counts: counts[v] = 0 counts[v] += 1 return counts
0e4febef6dbfefb2e04b103f177af3dd3d3bfc59
114,786
def replace_del_alt(var): """ Issues occur with deletions hear the ends of the genome. Currently - is used to represent an entire string of bases being deleted. Here we replace the ALT with "N" the length of REF. """ ref_length = len(var.REF) if var.ALT[0] == '-': fixed_alt = 'N' * ref_length var.ALT[0] = fixed_alt return var else: return var
f3d081a8dd12b8ca81bd8d5a8aca6bf6dbccc839
696,459
def get_channels_first_permutation(spatial): """Returns a permutation to make a (N, ..., C) array into (N, C, ...).""" return [0, spatial + 1] + list(range(1, spatial + 1))
4ff047e50ce2c300b872f3e95d799991b27c4e69
590,905
def prepare_data(logs: dict): """Function to prepare the data according to the type of message. Args: logs: Dictionary containing the metrics and loss values. Returns: string in the same format as logs. """ val = [f"{key} : {value}" for key, value in logs.items()] text = "\n".join(val) return text
7eae9094ccb301c4cb0836f804ba527ca4e2b5c3
411,793
def sanitize(name: str): """ Normalize the given string by removing chars that are potentially problematic for the LP solver """ return name.replace("-", "__")
63d024b6820098b46d3b5457f338511c60e65898
119,077
def drop_command(message, command): """ Given a message text, drops the command prefix from the string. """ return message[len(command) + 1:]
6e8488e9b57be3ba61d5e53111dcd876e82313a1
204,677
def format_chores(chores): """ Formats the chores to properly utilize the Oxford comma :param list chores: list of chores """ if len(chores) == 1: return f'{chores[0]}' elif len(chores) == 2: return f'{chores[0]} and {chores[1]}' else: chores[-1] = 'and ' + chores[-1] return f"{', '.join(chores)}"
38b7115520867e2545f7be7364e6147ca12dc8e1
30,085
def create_dates_list(state_dates, dates_string, key, start_capital): """ Create a dict used to update x_axis values and string. :param state_dates: boolean, show or not dates on axis :param dates_string: list of dates plotted :param key: string, type of graph :param start_capital: float """ # keys are idx of trade and values are date string if state_dates == 2: xaxis_dict = dict(enumerate(dates_string)) # keys and values are idx of trades else: xaxis_dict = {i: i for i in range(len(dates_string))} if key == "Growth" and start_capital == 0.0: xaxis_dict = {} # don"t set string if no data return xaxis_dict
f153ed3b403682f7dc9b451a547f9608fe1a7bf5
416,535
def simple_check(author, channel): """ A predicate used in the wait_for() function, to ensure the user input can only come from the user who activated the command, and in the same channel. :param author: Author object :param channel: Channel object :return: Check function """ def check(message): return message.author == author and message.channel == channel return check
484cf46a71621f3e0f41574e26a0974ddaa187cb
401,410
import aiohttp async def cleanup_file(slack_api_token, file): """Deletes a file from Slack.""" headers = {"Authorization": "Bearer {}".format(slack_api_token)} data = {"file": file["id"]} async with aiohttp.ClientSession() as session: async with session.post('https://slack.com/api/files.delete', headers=headers, data=data) as resp: if resp.status == 200: return True return False
420f20700e0eda242adf826451c8f45c20e91c87
155,882
import re def filter_urls(url_to_check) -> bool: """ Filters the URLs collected so that only those from http://www.bbc.co.uk and https://www.bbc.co.uk are kept. To remove the remaining non useful URLs we assume every valid BBC article has a 8 digit string in its URI and discard those which do not. @Returns bool True if URL is valid. """ searchobj = re.search(r'[0-9]{8}', url_to_check) if ('http://www.bbc.co.uk' or 'https://www.bbc.co.uk' in url_to_check) and (searchobj is not None): # print(f'URL added to list is : {url_to_check} ') return True else: # print('URL is not added to the list, it may be outside BBC.co.uk news or not an news article') return False
3d749f2a35566666fe7071d79b1787ea93273c72
306,681
def input_details(interpreter, key): """Gets a model's input details by specified key. Args: interpreter: The ``tf.lite.Interpreter`` holding the model. key (int): The index position of an input tensor. Returns: The input details. """ return interpreter.get_input_details()[0][key]
e99acb200a460efb137f955bc800c226c8924173
295,610
def get_monitor_name(domain_name: str) -> str: """ Encapsulate the RUM monitors naming convention. :param domain_name: the domain name for which a RUM monitor is to be created. :return: a Rams' compliant RUM monitor name """ return domain_name + "_RUM"
e31e24aa884c7b251c56d6c5d781ac1915c4167d
654,267
def format_correct(row): """ This function help determine if users enter the boggle board in a correct format. :param row: string, Entered row of letters :return: Boolean, return True if the entered row is in correct format """ if len(row) != 7: return False else: for i in range(len(row)): if i % 2 == 0: if not row[i].isalpha(): return False elif i % 2 == 1: if row[i] != " ": return False return True
f5a70d8295de0a6ee71466a5fb40949aeedf7a59
238,781
def get_ng_build_env(env: str) -> str: """ Translate the env string for angular that uses a more verbose string. """ return "production" if env == "prod" else ""
6aaa2b5b1d56b57c7afb24b6dd9f986682f09e49
509,577
def getDatasetId(chunk_id): """ Get dataset id given a chunk id """ n = chunk_id.find('-') + 1 if n <= 0: raise ValueError("Unexpected chunk id") obj_uuid = chunk_id[n:(36+n)] dset_id = "d-" + obj_uuid return dset_id
c0b0b3f3cd655e336589defeb272085ac9dd2b89
335,682
def format_data_frame_row(row, format_message_string): """Return a formatted data frame using a format string.""" return format_message_string.format(**row)
cb59a7fbe6039a53a1d89033842dc0273eb7bf26
482,627
def is_control_char(c): """Return True if 'c' is a control character. c is considered a control character if it is outside of the extended ASCII set or has a code below 32 with some exclusions. An ASCII compatible character set is assumed. """ charcode = 0 # The following assignment # should make this module compatible with # at least Python 2.7 (tested on 2.7.9). try: charcode = ord(c) except TypeError: charcode = c excludes = ("\t", "\r", "\n") if charcode in [ord(char) for char in excludes]: return False return (charcode < 32 or charcode > 255)
aedd5edad7e54d6eccee25f81332bd3ad17108c5
86,766
def cut_dataframe(df_entire, start_date, end_date, date_column="date"): """Cuts a dataframe to a desired date range :param df_entire: The dataframe to cut :type df_entire: pandas.core.frame.DataFrame :param start_date: Start date of the dataframe entries :type start_date: str :param end_date: End date of the dataframe entries :type end_date: str :param date_column: Column name of the timestamp/date column, defaults to "date" :type date_column: str, optional :return: Returns the dataframe with entries only between inclusive boundaries of the mentioned dates :rtype: pandas.core.frame.DataFrame """ mask = (df_entire[date_column] > start_date) & (df_entire[date_column] <= end_date) return df_entire.loc[mask]
677c6a2a50e7fc268782ce7eff2437b5b7a069b6
173,809
import math def Echotan(number): """ Tan of a number """ return math.tan(number)
cfdd4370cebc8fc444168df00804a71d691b62f2
389,547
def get_track(es_obj): """returns the track from the elasticsearch object""" es_ds = es_obj.get('_source', {}) # iterate through ds track_met_options = ['track_number', 'track', 'trackNumber', 'track_Number'] for tkey in track_met_options: track = es_ds.get(tkey, False) if track: return track # if that doesn't work try metadata es_met = es_ds.get('metadata', {}) for tkey in track_met_options: track = es_met.get(tkey, False) if track: return track raise Exception('unable to find track for: {}'.format(es_obj.get('_id', '')))
57ee61968ab0ed9f27a49df624883c867382753a
273,026
import re def remove_tags(text): """Remove html tags from a string""" clean = re.compile('<.*?>') return re.sub(clean, '', text)
b5bb38c10c99ec7979b0d740717e60f87cfccb9d
668,808
def get_portals(entries): """Create mapping of portal entrances from entries.""" portals = {} for portal_name, locations in entries.items(): if len(locations) == 2: (location1, level_change1), (location2, level_change2) = locations portals[location1] = location2, level_change2 portals[location2] = location1, level_change1 return portals
022f3ac297573bff15f1b780b3a0c006e2d614df
321,466
import requests import json import pytz def get_timezone(auth, base_url): """ Gets the account information which includes the timezone, and returns this timezone as a pytz.timezone object. """ r = requests.get(url=base_url + 'account.json', auth=auth) if r.status_code is not 200: raise requests.exceptions.HTTPError('Invalid Authentication') data = json.loads(r.content.decode('utf-8')) return pytz.timezone(data['time_zone'])
a1897f3746734697223a3b2b2ed4add6a6974384
494,966
from pathlib import Path from typing import List def get_dir_list(path: Path) -> List[str]: """ Return directory list """ dir_list = [] paths = Path(path).glob("**/*") for p in paths: if p.is_dir(): dir_list.append(str(p)) return dir_list
ae0bf021aba74f3ab601479c0dd91530f047374d
430,118
def get_xpath_text_list(root, xpath, child_xpath): """ Returns text values of child_xpath in order they occur """ elem = root.find(xpath) if elem is None: return [] else: return [child.text for child in elem.findall(child_xpath)]
5681cba81c4ba8e75031e24f4d42bc914d822e77
181,855
def get_int_mask(ip): """get mask from ip/mask info Args: ip (str): ip with mask Returns: str: mask """ return ip.split("/")[-1]
7fb95397353da1db60c3cf28d090f80f7a0dc254
194,230
def calc_tare_torque(rpm): """Returns tare torque array given RPM array.""" return 0.00104768276035*rpm - 0.848866229797
777ab650de79df9da877fb1ce514c719a4438b66
489,631
from pathlib import Path from typing import Dict from typing import Any def xmlparam(fn: Path) -> Dict[str, Any]: """ reads necessary camera parameters into dict * kinetic rate (1/frame rate) seconds * resolution pixels * binning pixels Parameters ---------- fn : pathlib.Path filename of XML file corresponding to .DMCdata file Returns ------- params : dict camera parameters of interest """ tree = ET.parse(fn) # type: ignore root = tree.getroot() children = root.getchildren() data = children[1] params: Dict[str, Any] = {} diter = data.iter() for el in diter: txt = el.text if txt == "Binning (H x V)": txt = next(diter).text params["binning"] = int(txt) elif txt == "ROI H Pixels": txt = next(diter).text params["horizpixels"] = int(txt) elif txt == "ROI V Pixels": txt = next(diter).text params["vertpixels"] = int(txt) elif txt == "Freq": txt = next(diter).text params["pulsefreq"] = float(txt) params["kineticrate"] = 1 / params["pulsefreq"] return params
739f0795ceac427f3b84f223998ad0122ff97802
270,459
import glob def files_list(folder, suffix='.png'): """ Returns a list of all the images within this directory. """ return glob.glob(folder + '/*' + suffix)
b71e4aa45f4f7728cf8f6698b1a8312105cca0ae
423,741
def extract_name_path(file_path): """String formatting to update the prefix of the ERA5 store location to Azure""" tgt = "az://cmip6/ERA5/" + file_path.split("/zarr/")[1].replace("/data", "") return tgt
2cdb74776356e3556d38a4d7d7dda8f8202da2e2
517,198
def get_next_available_key(iterable, key, midfix="", suffix="", is_underscore=True, start_from_null=False): """Get the next available key that does not collide with the keys in the dictionary.""" if start_from_null and key + suffix not in iterable: return key + suffix else: i = 0 underscore = "_" if is_underscore else "" while "{}{}{}{}{}".format(key, underscore, midfix, i, suffix) in iterable: i += 1 new_key = "{}{}{}{}{}".format(key, underscore, midfix, i, suffix) return new_key
4918d85fb3708357bbadc988fae4e5a12ba3aa7c
584,755
import requests def lacrosse_get_locations(token): """ Query La Crosse server to return a list of user's locations :param token: Current, valid token for user session -- see `lacrosse_login` """ url = "https://lax-gateway.appspot.com/" \ "_ah/api/lacrosseClient/v1.1/active-user/locations" headers = {"Authorization": "Bearer " + token} r = requests.get(url, headers=headers) if r.status_code < 200 or r.status_code >= 300: raise ConnectionError("failed to get locations ()". format(r.status_code)) body = r.json() return body.get('items')
d3bdc778449ac9d75d36015ddfb07e266efc1e17
311,130
def is_function_call(data: str) -> bool: """ Returns true if predicate is probably a function call. """ return all(bracket in data for bracket in ('(', ')'))
135f575074660304bdaabc7b18f502b666014092
435,388
def _basename(key): """ Return the base name of a key name. a.b.c.1 -> a.b.c a.b.c -> a.b """ return '.'.join(key.split('.')[:-1])
c6054cc9cd7fddda829f647ff7243a6125431b0a
154,617
def get_parent_context(liveaction_db): """ Returns context of the parent execution. :return: If found the parent context else None. :rtype: dict """ context = getattr(liveaction_db, 'context', None) if not context: return None return context.get('parent', None)
f15b0143f8b20b1aeacfa00e39f69045d95e17e0
291,562
import re def natural_sort(l): """A more natural sorting algorithm Parameters ---------- l : list List of strings in need of sorting Examples -------- >>> l = ['elm0', 'elm1', 'Elm2', 'elm9', 'elm10', 'Elm11', 'Elm12', 'elm13'] >>> sorted(l) ['Elm11', 'Elm12', 'Elm2', 'elm0', 'elm1', 'elm10', 'elm13', 'elm9'] >>> natural_sort(l) ['elm0', 'elm1', 'Elm2', 'elm9', 'elm10', 'Elm11', 'Elm12', 'elm13'] References ---------- [1] https://stackoverflow.com/a/4836734/5285918 """ convert = lambda text: int(text) if text.isdigit() else text.lower() alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] return sorted(l, key=alphanum_key)
4e1843081ee2876da9f9ba0ff5b7ffa2016f692b
482,383
def Heun_ndt(f_S, Si, dt, T, **kwargs): """ Solve dS/dt = f_S(S, t), S(t=t_i)=Si, at t=T with n steps (n=T/dt), using the explicit Heun's method Parameters ---------- f_S : function State function for given model sub-domain (canopy, unsaturated zone, saturated zone) Si : float State at time t=t_i dt : float or int Time step [days] T : float or int Time period [days] kwargs : dict *kwargs* are used to specify the additional parameters used by the the state function (f_S) Returns ------- dS : float Integrated state at t=ndt with n=T/dt """ n = int(T / dt) dS = Si for _ in range(n): K1 = f_S(dS, **kwargs) K2 = f_S((K1 * dt) + dS, **kwargs) dS = dS + (0.5 * dt * (K1 + K2)) return dS
00803453f89a20318d66ca159784ad03068c38c7
313,732
import torch def hard_sigmoid(tensor: torch.Tensor, inplace: bool = False) -> torch.Tensor: """ Applies HardSigmoid function element-wise. See :class:`torchlayers.activations.HardSigmoid` for more details. Arguments: tensor : Tensor activated element-wise inplace : Whether operation should be performed `in-place`. Default: `False` Returns: torch.Tensor: """ return torch.nn.functional.hardtanh(tensor, min_val=0, inplace=inplace)
5f4d87749ddca014076f46e0af6e9b3c4308ddf7
10,621
def remove_minutes(time): """Sets the minutes, seconds, and microseconds to zero.""" return time.replace(minute=0, second=0, microsecond=0)
97ee54765b36d30fac179e8ff9a42c8eadcac169
485,924
def solve_matrix(matrix, right_hand_side): """ Performs back substitution on an N x N boolean matrix in RREF form to determine the solution to each equation represented by its rows (noting that the equations are of the form [X · S] % 2 = 0). For Simon's Algorithm, this gives you the secret string S that's hidden in the original function being evaluated. Parameters: matrix (list[list[bool]]): The matrix representing the equations to solve. It must be N x N and already in RREF form. right_hand_side (list[bool]): A vector representing the right-hand-side of the equations held in the matrix. These are the "solutions" to each equation. Returns: The solution to the matrix, in this case the secret string S. Remarks: For a good, visual example of how this process works, take a look at this math StackExchange post: https://math.stackexchange.com/a/45348 """ secret_string = [False] * len(matrix) # Start at the bottom row and work our way up to the top for row_index in range(len(matrix) - 1, -1, -1): row = matrix[row_index] right_hand_side_value = right_hand_side[row_index] # Solution for this equation # Walk through the values of the row (these correspond to the variables for each # row beneath it, which have already been solved at this point since we're going # bottom up); if this row has a 1 for that variable, XOR the solution value with # whatever that row's value ended up being. for column_index in range(row_index + 1, len(row)): if row[column_index]: right_hand_side_value ^= secret_string[column_index] # Once the terms have been calculated, assign the solution value at this row's # index to the result of the equation. secret_string[row_index] = right_hand_side_value return secret_string
94f65178d5c5bb78f0f2d5f2eb1753e1e3391864
425,903
def clone_lvol_bdev(client, snapshot_name, clone_name): """Create a logical volume based on a snapshot. Args: snapshot_name: snapshot to clone clone_name: name of logical volume to create Returns: Name of created logical volume clone. """ params = { 'snapshot_name': snapshot_name, 'clone_name': clone_name } return client.call('clone_lvol_bdev', params)
aac63d9e391295e85f5c2a27e55e083e2bf16c49
135,836
from typing import List def pad_tokens(tokens: List[List[int]], pad_value: int = 0) -> List[List[int]]: """pad a batch of tokens to fixed maximum lengh using `pad_value`. Args: tokens: list of list of tokens of varying lengths. pad_value: padding value. Defaults to 0. Example: >>> from hearth.text.utils import pad_tokens >>> >>> tokens = [[1, 2], [1, 2, 3], [1]] >>> pad_tokens(tokens) [[1, 2, 0], [1, 2, 3], [1, 0, 0]] """ maxlen = max(map(len, tokens)) return [seq + [pad_value] * (maxlen - len(seq)) for seq in tokens]
fe4d4954236063943d8c4131f654518f38751908
624,128
def maxL(*it): """ Calculate maximum length of provided items. Parameters ---------- *it : objects Items of various lengths. Only lengths of iterables are returned. Returns ------- Length of longest object (int). """ m = set() for i in it: try: m.add(len(i)) except TypeError: pass if len(m) > 0: return max(m) else: return 1
da8ef145097f03a02dea113ab0c0164b334cc35a
95,670
from functools import reduce from operator import add def mean(L): """return the average of a list""" return reduce(add, L)/len(L)
910049658aea7998751af6a58d07addfcbe4f99a
432,007
import math def _inner_log_logistic_sigmoid(x): """Log of the logistic sigmoid function log(1 / (1 + e ** -x))""" if x > 0: return -math.log(1. + math.exp(-x)) else: return x - math.log(1. + math.exp(x))
af0afc3e82f3509ed1d298bb2a58a108007c66f7
538,812
def unpack_ushort(data: bytes) -> int: """Unpacks unsigned short number from bytes. Keyword arguments: data -- bytes to unpack number from """ return int.from_bytes(data, byteorder="little", signed=False)
a7440c7b6389e45d7f205aded2afc1ad0b8a37b9
68,826
import torch def unpack_bits(t): """t is a tensor of packed bits. Unpack in the first dim""" n = t.shape[0] unpacked = torch.zeros(8*n, *t.shape[1:]).to(t.device) for i in range(8): unpacked[i*n:(i+1)*n] = (t >> i) & 1 return unpacked
040c6de141483d20acf5d2162f8502d2fa32110a
417,947
def Multiply(u, s): """ Returns vector multiplied by scalar. """ def __multiply(x): return x * s return map(__multiply, u)
56b16dfd8d83e46bc4f31b15ec2730d90991f4b9
408,424
def is_in_group(user, group_name): """Take a user and a group name, and returns `True` if the user is in that group.""" return user.groups.filter(name=group_name).exists()
797853cd5000cb1404545e3f20d38703c7a058dd
19,209
def _get_layer_lrs(learning_rate, layer_decay, n_layers=2): """Have lower learning rates for layers closer to the input.""" key_to_depths = {} key_to_depths["layer_0/"] = 0 key_to_depths["layer_1/"] = 4 print(key_to_depths) return { key: learning_rate * (layer_decay ** (n_layers + 2 - depth)) for key, depth in key_to_depths.items() }
511f5acd0d89e7e5fd53d1d2c744ab485bf3bb76
413,785
import ast def create_ast_function_call_with_numeric_values(func_name: str, **kwargs): """ Creates an ast call for function name with passed numeric keyword arguments. :Notes: Will not work if a non-numeric keyword value is passed. :Examples: >>> import astor >>> value = create_ast_function_call_with_numeric_values('ModelInputs', n_phones=100, price_scrap=200) >>> astor.to_source(value) 'ModelInputs(n_phones=100, price_scrap=200)\n' """ ast_keywords = [ ast.keyword( arg=kwarg_name, value=ast.Num(kwarg_value) ) for kwarg_name, kwarg_value in kwargs.items() ] call = ast.Call( func=ast.Name(func_name), args=[], keywords=ast_keywords ) return call
37dcc3468d294106cd4443803a6ddc79cf5c01d3
120,443
def chebyshev_distance(pos1: tuple, pos2: tuple): """ Compute Chebyshev distance between two points :param pos1: Coordinate of first point :param pos2: Coordinate of second point :return: Chebyshev distance between two points """ distance = 0 for ind in range(len(pos1)): distance = max(distance, abs(pos1[ind] - pos2[ind])) return distance
3595a7f72b4fc9d73785785f9dd8760609d2ea74
474,272
from typing import List import re def substitute_inputs(text: str, placeholders: List[str], inputs: List[str]) -> str: """Substitute the inputs for the placeholders in the text""" for placeholder, replacement in zip(placeholders, inputs): text = re.sub(placeholder, replacement, text, 1) return text
9ce3408f836eca7cdf8f0d27276081d43677f0fb
446,377
from typing import Union from typing import Callable import torch def get_reduction(method: Union[Callable[[torch.Tensor], torch.Tensor], str]) \ -> Callable[[torch.Tensor], torch.Tensor]: """Get reduction method with the given method Args: method Union[Callable[[torch.Tensor], torch.Tensor], str]: method of reduction. Raises: AssertionError: when method is not found. TypeError: when type of method is not allowed. Returns: Callable[torch.Tensor], torch.Tensor]: Method of reduction. """ if isinstance(method, str): reduction_method = getattr(torch, method, None) if reduction_method is None: raise AssertionError(f"{method} not found.") elif callable(method): reduction_method = method else: raise TypeError(f"{type(method).__name__} not allowed.") return reduction_method
7c5477d9e15e8b716881c45fcfaf56a6c625f00c
439,255
def get_dict(dictionary='../data/dictionary.txt', length=100): """ Takes a string referencing the location of a text file Optionally takes the length of the letters we're finding a word for Reads that file line by line into a set, removing whitespace """ with open(dictionary, 'r') as f: words = set() for line in f: if len(line) <= length: words.add(line.strip()) return words
6b24a736315bbb85817d7de1ba88dbe2f7b1c16f
124,636
def in_order_path(tsp): """Return the tour [1,2,...,n,1] where n is the dimension of the TSP""" dim = tsp["DIMENSION"] return list(range(1, dim + 1))
43f5be498cb23b9717bb55476b0317412cd63bec
458,787
def split_timecode(time_code): """ Takes a timecode string and returns the hours, minutes and seconds. Does not simplify timecode. :param time_code: String of format "HH:MM:SS.S" ex. "01:23:45.6" :return: HH (float), MM (float), SS (float) """ hh, mm, ss = time_code.split(':') hh = float(hh) mm = float(mm) ss = float(ss) return hh, mm, ss
c3b24d6c8ea08bf1bfc4da81b17eb1207b7304e6
530,104
def get_assignment_details(app_detail_result): """ Method to fetch smart group ids, names and push mode where the app has been deployed :param app_detail_result: json returned from the app details api :return: list of smart group ids, list of smart group names and push mode """ assignments = app_detail_result['Assignments'] sg_ids = [groups['SmartGroupId'] for groups in assignments] sg_names = [groups['SmartGroupName'] for groups in assignments] push_mode = [groups['PushMode'] for groups in assignments] return sg_ids, sg_names, push_mode
ff344d09826e0d5de27a9ed573ceb763ee330eea
84,747
def _get_return_value_type_name_from_line(line_str: str) -> str: """ Get the type name of return value from the target line string. Parameters ---------- line_str : str Target line string. Returns ---------- return_value_type_name : str Type name of return value. """ colon_exists: bool = ':' in line_str if not colon_exists: return_value_type_name: str = line_str.split(':')[0] else: return_value_type_name = line_str.split(':')[1] return_value_type_name = return_value_type_name.strip() return return_value_type_name
1327647128176e9168509d660769634da088c3a1
59,723
def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a # single token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index
aa2ce42d893a7273184047ed0aff3c0d8f9bd189
622,979
import hashlib def hash_sha1(input): """A hash function based hashlib.sha1 Args: input: the data to generate integer-based hash value (need to be encoded with UTF-8) Returns: int: an integer-based hasah values """ if (isinstance(input, bytes) == False): raise TypeError("The input data must be encoded with UTF-8") return int(hashlib.sha1(input).hexdigest()[0:8], 16)
8855b5cb63cc68f96579307c70b94fd0cbc39b7e
486,729
def seti(registers, a, b, c): """(set immediate) stores value A into register C. (Input B is ignored.)""" registers[c] = a return registers
e2f833315382d48a5aaa903e558304472894e60b
550,479
def gen(x): """ Return the generator of ``x``. EXAMPLES:: sage: R.<x> = QQ[]; R Univariate Polynomial Ring in x over Rational Field sage: gen(R) x sage: gen(GF(7)) 1 sage: A = AbelianGroup(1, [23]) sage: gen(A) f """ return x.gen()
b829b698625c3dc4b266f59ca80cf7941466ef3f
115,685
def run_algo(op_list): """Execute all operations in a given list (multiply all matrices to each other).""" ops = op_list[::-1] # reverse the list; after all, it's matrix mult. result = ops[0] for op in ops[1:]: result = result * op return result
d7954d938162885b704bf479f63dcc6741e8e65d
664,157
import torch def eval_metrics(preds: torch.Tensor, labels: torch.Tensor) -> dict: """Calculate metrics: precision, recall, f1, accuracy""" tp = ((preds == 1) & (labels == 1)).cpu().sum().item() fp = ((preds == 1) & (labels == 0)).cpu().sum().item() fn = ((preds == 0) & (labels == 1)).cpu().sum().item() tn = ((preds == 0) & (labels == 0)).cpu().sum().item() precision = tp / (tp + fp) if (tp + fp) else 0.0 recall = tp / (tp + fn) if (tp + fn) else 0.0 f1 = 2 * precision * recall / (precision + recall) if (precision + recall) else 0.0 acc = (tp + tn) / (tp + fp + fn + tn) if (tp + fp + fn + tn) else 0.0 return {"precision": precision, "recall": recall, "f1": f1, "acc": acc}
dc0c641dba387f77f5c2af67835953d2602fc10a
71,307
def read_code_blocks_from_md(md_path): """ Read ```python annotated code blocks from a markdown file. Args: md_path (str): Path to the markdown fle Returns: py_blocks ([str]): The blocks of python code. """ with open(md_path, "r") as f: full_md = f.read() md_py_splits = full_md.split("```python")[1:] py_blocks = [split.split("```")[0] for split in md_py_splits] return py_blocks
ca920f74e9326cf5f3635fbb6ebe125b6d97a349
709,376
def EmptiedFilesCheck(input_api, output_api): # pragma: no cover """Warns if a CL empties a file. This is not handled properly by apply_patch from depot_tools: the file would not exist at all on trybot checkouts. """ empty_files = [] infra_root = input_api.PresubmitLocalPath() for filename in input_api.AffectedTextFiles(): fullname = input_api.os_path.join(infra_root, filename.LocalPath()) if not input_api.os_stat(fullname).st_size: empty_files.append(filename.LocalPath()) if empty_files: return [output_api.PresubmitPromptWarning( 'Empty files found in the CL. This can cause trouble on trybots\n' + 'if your change depends on the existence of those files:\n%s' % '\n'.join(empty_files) )] return []
e9e61ee61f5d0deaa503f4c23843c24b32ffbd88
433,131
from typing import Sequence from typing import Callable def argmax_f(s: Sequence, f: Callable) -> int: """ Take the argmax (i.e. the index) based on the maximum of a particular function. Parameters ---------- s : Sequence The sequence to find the argmax of. f : Callable The function to apply to the elements Returns ------- int The index produced by the argmax. """ return max(enumerate([f(d) for d in s]), key=lambda x: x[1])[0]
b6b1027586b1e6e21faabe98a4218b5601069af3
319,055
import torch def jaccard_score(outputs, labels, smooth=1e-5): """Compute the Jaccard/IoU score. If a division by zero occurs, returns a score of 0. Args: outputs (Tensor): The model's predictions labels (Tensor): The target labels (aka ground truth predictions) smooth (float/int): A smoothness factor Returns: Jaccard score (Tensor) """ outputs, labels = outputs.float(), labels.float() intersect = torch.dot(outputs.contiguous().view(-1), labels.contiguous().view(-1)) union = torch.add(torch.sum(outputs), torch.sum(labels)) jaccard = (intersect + smooth) / (union + smooth) return jaccard if not torch.isnan(jaccard) else torch.Tensor([0.0])
d296ed0037ed45fe85d6e0edc5754f7642189c69
489,979
def find_best_model(checkpoint, history): """ Finds the best model saved by the checkpoint. :param checkpoint: the checkpoint :type checkpoint: ModelCheckpoint :param history: a dictionary with the metrics and their values for each epoch. :type history: dict[str, np.ndarray] :return: the path of the best model :rtype: str or None """ if checkpoint.save_best_only: return checkpoint.filepath period = checkpoint.period monitor = checkpoint.monitor best = checkpoint.best monitor_op = checkpoint.monitor_op values = history.get(monitor, None) if values is None: return None best_epoch = 0 for i in range(len(values)): if monitor_op(values[i], best): best = values[i] best_epoch = i return checkpoint.filepath.format(epoch=(best_epoch + 1) * period)
cbe31a874b7f1e404b38b36fd99884b8fdc2e8de
529,658
def rolling_window(iterable, size): """ :param iterable: the iterable over which to make windows :param size: length of desired windows :returns: an iterable containing rolling windows """ iter_length = len(iterable) return [iterable[x : x + size] for x in range(iter_length - size + 1)]
c8ffbb7aeb0e738a755b885eda87729e769c8f51
156,419
def min_max_prod(minimums, maximums): """ returns the smallest and largest products achievable by interweaving maximums and minimums >>> min_max_prod((3,6,0,1,6),(-4,3,-5,-4,0)) (-2880, 2160) """ assert len(maximums) == len(minimums) ret_min, ret_max = 1, 1 for op_min, op_max in zip(minimums, maximums): cands = (op_min * ret_min, op_min * ret_max, op_max * ret_min, op_max * ret_max) ret_min = min(cands) ret_max = max(cands) return ret_min, ret_max
8d218147c8663d943a030189f7d63cfa3e1eead9
406,910
def prop_has_many_entries(prop_ent): """ Check if a Wikidata entry has multiple values for a given property """ try: prop_ent[1] return True except IndexError: return False
0e701d14e3f7732ed303b22cd052f3b789f5a6d7
108,219
import json def block_schema_version_to_dict(block_schema_version): """Serializes block schema version to dict. Args: block_schema_version (:class:`b2share.modules.schemas.api:BlockSchemaVersion`): block schema version that will be serialized. Returns: dict: serialized BlockSchemaVersion. """ return dict( id=block_schema_version.block_schema.id, version=block_schema_version.version, json_schema=json.loads(block_schema_version.json_schema), )
c096ad19403c90100f7f6c8ba48bf4961e211144
505,623
def get_no_control_guides(df, guide_base_score): """Get guides that are not paired with controls Parameters ---------- df: DataFrame DataFrame with the column anchor_gene guide_base_score: DataFrame Guide scores when paired with controls Returns ------- list Guides without control pairs """ starting_guides = set(df.anchor_guide.to_list()) base_score_guides = set(guide_base_score.anchor_guide.to_list()) no_control_guides = list(starting_guides - base_score_guides) return no_control_guides
598e9d3c45bf7d391dcb8c81c475ad66c71a73a9
572,681
import re def _BinaryExistsInSudoersFiles(path, sudoers_file_contents): """Returns True if the binary in |path| features in the sudoers file. """ for line in sudoers_file_contents.splitlines(): if re.match(r'\s*\(.+\) NOPASSWD: %s(\s\S+)*$' % re.escape(path), line): return True return False
6c42105469b3068137d3b29377b17fc5987379e2
458,957
import copy def subtract_from_all(xs, num): """ Helper function to subtract num from all values in x dictionary, without modifying the x dictionary. """ new = copy.deepcopy(xs) for k in new: new[k] -= num return new
2d1f83c55d3f9c4ec01505a684d87a682333af58
171,352
def readUntilNull(f): """Reads data from the current position to the next null byte.""" byte = '' s = '' while byte != '\0': s += byte byte = f.read(1) return s
a98905610a45fbae2a46d56a153c14d15a78e800
240,297
def get_cached_strict_matching_stickers(context): """A simple helper function to get all unique file_id's of strict matching stickers.""" query_id = context.inline_query_id cache = context.tg_context.bot_data[query_id] return cache["strict_unique"]
a89fb1bc78fb36e0fd5dcf88b0e713ec9384143f
553,821
from typing import List def less_than(list_: List[float], n: int) -> float: """ Get number of symbols in `list_` which have a value less than `n`. Parameters ---------- list_ : List[float] n : int Returns ------- float : Number of elements of the list_ which are strictly less than n. """ return float(len([1 for el in list_ if el < n]))
24fd1c694372e65eeb277e03e2f749f5209fa358
278,131
def get_test_case_code(path): """Get test case content as a string provided the full path to the python file. """ code = '' with open(path) as ff: code = ff.read() return code
a9e1232d836c4ff03a2312efdf96a363752ec133
542,650