content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def _apply_args_and_kwargs(function, args, kwargs): """Apply a tuple of args and a dictionary of kwargs to a function Parameters ---------- function: func function you wish to use args: tuple all args passed to function kwargs: dict all kwargs passed to function """ return function(*args, **kwargs)
9da4933968d1729ae62472cec915f12b97ae6455
581,703
def longest_no_repeat(string: str) -> int: """Return longest length of a substring with no repeating characters.""" last_seen = {} max_so_far = 0 current_streak = 0 for i, letter in enumerate(string): try: index_last_seen = last_seen[letter] current_streak = i - index_last_seen except KeyError: current_streak += 1 max_so_far = max(max_so_far, current_streak) last_seen[letter] = i return max_so_far
bb479499ce7ead76363fca75fe84e7be595a5d16
288,245
def expected_time_in_markov_state_ignoring_class_2_arrivals( state, lambda_1, mu, num_of_servers, system_capacity ): """ The expected time of the Markov chain model at the state given. Note here that for a state (u,v) where v = system capacity (C) no class 1 arrival can occur and thus the rate at which the model leaves that state changes. """ if state[1] == system_capacity: return 1 / (min(state[1], num_of_servers) * mu) return 1 / (min(state[1], num_of_servers) * mu + lambda_1)
73d2be7a9c27d2634473e27b7a727f60eda5e603
515,839
def _to_http_uri(s: str) -> str: """Prefix the string with 'http://' if there is no schema.""" if not s.startswith(('http://', 'https://')): return 'http://' + s return s
5ad67d12bbfbca13143dfbacd4ea96f53b9919e9
47,297
def _calculate_total_dispatchable_mw(case): """ Calculates the total available MW for dispatch. This is equal to the total remaining capacity of all in-service, non-swing generators. :param case: The :class:`Case` that is synchronized with the current loaded case. :type case: :class:`Case` :return: The total remaining dispatchable capacity in the case, in MW. :rtype: float """ total_dispatchable_mw_max = 0 swing = case.swing for machine in case.machines: if machine.status and machine.has_capacity: if machine.bus_number != swing.number: total_dispatchable_mw_max += machine.max_mw_output return total_dispatchable_mw_max
7f26723e186c67f797bddc2e7c11b4313437261f
81,145
def find_empty_location(grid): """ Looks for the coordinates of the next zero value on the grid, starting on the upper left corner, from left to right and top to bottom. Keyword Arguments: grid {number matrix} -- The matrix to look for the coordinates on (default: {The instance's grid}) Returns: tuple -- The (x, y) coordinates of the next zero value on the grid if one is found """ for index, row in enumerate(grid): if 0 in row: return (row.index(0), index)
3027214e96efbb33ed893cb371ff41bc4fd55123
479,334
import csv import io import json def generate_crash_record(line, fieldnames): """ Translates a raw csv line into a python dictionary :param line: string - The raw csv line :param fieldnames: array of strings - The strings to be used as headers :return: dict """ reader = csv.DictReader( f=io.StringIO(line), fieldnames=fieldnames, delimiter="," ) # parse line fields = json.dumps([row for row in reader]) # Generate json # Remove object characters fields = fields.replace("[", "").replace("]", "") # Return a dictionary return json.loads(fields)
69b4f920a3ba88430260e0f6bf6f4b4502ea0c50
561,605
def dtIsArticle(dt): """ Checks if determiner is indefinite or definite English article Argument: dt: the determiner name Return: True if determiner is indefinite or definite English article """ return dt.lower() in ['a', 'an', 'the']
ab9553aa275eeb5d838d1ffe0b5332051f64e59f
88,483
from typing import Callable from typing import Tuple def retry_fn(fn: Callable, allowable_exceptions: Tuple, retry_count: int=5): """ Call fn, retrying if exception type in allowable_exceptions is raised up to retry_count times """ for i in range(0, retry_count): try: return fn() except allowable_exceptions: if i == retry_count - 1: raise
d531751d7e3b1706667c3d2a66c57844f0334888
619,034
import torch def npvec_to_tensorlist(vec, params): """ Convert a numpy vector to a list of tensor with the same dimensions as params Args: vec: a 1D numpy vector params: a list of parameters from net Returns: rval: a list of tensors with the same shape as params """ loc = 0 rval = [] for p in params: numel = p.data.numel() rval.append(torch.from_numpy(vec[loc:loc+numel]).view(p.data.shape).float()) loc += numel assert loc == vec.size, 'The vector has more elements than the net has parameters' return rval
3cbed80b3896d6f0610a057903f09728ccae0a30
39,846
import hashlib def hash_file(file_path: str) -> str: """ Hash file content with SHA-256. This way we can check two files have same content. :param file_path: Absolute path name. :return: File hash string. """ content = "" with open(file_path, mode="rb") as file: content = file.read() content_hash = hashlib.sha256(content).hexdigest() return content_hash
6a811109f88e6f141fafa9a270c4d62657305748
248,315
def split_data(data, train_len) : """Split a dataframe into train and test data Parameters ---------- data : pandas.core.frame.DataFrame Dataframe to split train_len : float Percentage of data for training Returns ------- pandas.core.frame.DataFrame a dataframe with the train data pandas.core.frame.DataFrame a dataframe with the test data """ #calculate the index on which to split the data train_split = int(len(data)*train_len) #split the data into train and test train_data = data[0:train_split] test_data = data[train_split+1:len(data)] #return the splitted data return train_data, test_data
871b6745f01833c56b91020bc7a611a136fc3980
114,878
import struct def get_unique_id(pid, computer_name): """ https://github.com/poweradminllc/PAExec/blob/master/Remote.cpp#L1045-L1065 DWORD RemMsg::GetUniqueID() Creates a unique ID based on the PID of the local host and the name of the local host. It is derived from the first 4 bytes of a UTF-16 Little Endian encoded computer name and the local PID xor'd together. This value is sent in the PAExecSettingsMsg to define the process details and also the PAExecResponseMsg to control the execution and results of the processed based on the settings. :param pid: (int) the process id of the current host :param computer_name: (str/unicode) of the current hostname :return: int of the unique ID derived from the PID and Computer Name """ bcomp_name = computer_name.encode('utf-16-le')[:4] bcomp_name = bcomp_name + (b"\x00" * (4 - len(bcomp_name))) return pid ^ struct.unpack("<L", bcomp_name)[0]
6bad61edd3e5141487a1986c487658a7462136a8
222,724
def calc_x_dist(p, q): """ :param p: robot position list p :param q: robot position list q :return: the x-axis Euclidean distance between p and q """ return abs(p[0]-q[0])
5a5671f01231e5ad1bc099ad9ec0f645bdb9fe08
325,573
def calc_age(age, interval=0): """Determine a person's age after interval years.""" age += interval return age
00a3e7b847485dddde4682f90334b612e391dbad
278,614
def errfunc(p, x, y, fitfunc): """Error function used in fitting. Args: p (list): List of parameters. x (:class:`numpy.ndarray`): x values. fitfunc (function): Fitting function. Returns: :class:`numpy.ndarray`: Values of residuals. """ return y - fitfunc(p, x)
1c0cd9baa0859fa46c0775657a3af9756fe9c4d9
188,366
def get_edges(graph): """Return edge index for each edge in the graph. Parameters ---------- graph : dgl.DGLGraph A DGLGraph for which edge indices will be saved Returns ------- dict a dictionary where keys are (source, destination) tuples of nodes, and corresponding edge indices are values """ edges_dict = {} for idx, (src, dst) in enumerate(zip(graph.edges()[0], graph.edges()[1])): src, dst = src.item(), dst.item() edges_dict[(src, dst)] = idx return edges_dict
9b6f3f8ab90e76b333a42c0342b9481de05f003a
401,317
def get_memory_monitor_summary_filename(selector=None): """ Return the name for the memory monitor summary file. :param selector: special conditions flag (boolean). :return: File name (string). """ name = "memory_monitor_summary.json" if selector: name += '_snapshot' return name
aeb43e1b0516c38ab37c3a5020912dacf0dfba66
610,167
def simple_closure(s, implications): """ Input: A set of implications and an attribute set s Output: The closure of s with respect to implications Examples ======== >>> from fca.implication import Implication >>> cd2a = Implication(set(('c', 'd')), set(('a'))) >>> ad2c = Implication(set(('a', 'd')), set(('c'))) >>> ab2cd = Implication(set(('a', 'b')), set(('c', 'd'))) >>> imps = [cd2a, ad2c, ab2cd] >>> print simple_closure(set('a'), imps) set(['a']) >>> print simple_closure(set(), imps) set([]) >>> simple_closure(set(['b', 'c', 'd']), imps) == set(['a', 'b', 'c', 'd']) True >>> a2bc = Implication(set(('a')), set(('b', 'c'))) >>> ce2abd = Implication(set(('c', 'e')), set(('a', 'b', 'd'))) >>> de2abc = Implication(set(('d', 'e')), set(('a', 'b', 'c'))) >>> cd2abe = Implication(set(('c', 'd')), set(('a', 'b', 'e'))) >>> imps = [a2bc, ce2abd, de2abc, cd2abe] >>> simple_closure(set(['b', 'a']), imps) == set(['a', 'b', 'c']) True >>> simple_closure(set(['a', 'e']), imps) == set(['a', 'b', 'c', 'd', 'e']) True >>> imps = [ce2abd, a2bc, de2abc, cd2abe] >>> simple_closure(set(['a', 'e']), imps) == set(['a', 'b', 'c', 'd', 'e']) True """ unused_imps = implications[:] new_closure = s.copy() changed = True while changed: changed = False for imp in unused_imps: if imp.premise <= new_closure: new_closure |= imp.conclusion changed = True unused_imps.remove(imp) return new_closure
05ff32be462b5949bb1ff62917c28d32a05cde84
695,185
def convert_chemformula(string: str) -> str: """ Convert a chemical formula string to a matplotlib parsable format (latex). Parameters ---------- string or Adsorbate: str String to process. Returns ------- str Processed string. """ result = getattr(string, 'formula', None) if result is None: result = "" number_processing = False for i in string: if i.isdigit(): if not number_processing: result += '_{' number_processing = True else: if number_processing: result += '}' number_processing = False result += i if number_processing: result += '}' return f'${result}$'
00b2759fd05ea6f7f4ab0352f8107c8c36c81ed4
285,448
def score_recipes(recipes, ingredients, scorer): """Score recipies based on ingredients with scorer function.""" ingredients_order = [*ingredients] return [(scorer(recipe, ingredients), recipe, ingredients_order) for recipe in recipes]
d4a77b3d3ff37177cdc0bde9ad99da28611c1689
425,663
import unicodedata import re def normalize(s): """ Normalizes a string. This function does the following, in order: - Calls :func:`unicodedata.normalize("NFC", ...) \ <unicodedata.normalize>`. - Normalizes line endings. - Expands horizontal tabs with a width of four columns. - Expands vertical tabs with a height of one row, and emulates carriage return. - Emulates backspace and delete. Parameters ---------- s: :class:`str` The string to normalize. Returns ------- :class:`str` The normalized string. Examples -------- .. code:: python3 >>> normalize("\\u0061\\u0301") # \u0061\u0301 "\\u00E1" >>> normalize("cool text\\vtext") "cool text\\r\\n text" >>> normalize("text text\\rcool") "cool text" """ s = unicodedata.normalize("NFC", s) s = s.replace("\r\n", "\n") s = s.replace("\f", "\n") s = s.expandtabs(4) if "\r" in s or "\v" in s: s = re.sub(r"\r|\v", "\f\\g<0>", s) lines = s.split("\n") for (i, line) in enumerate(lines): if "\f" in line: parts = line.split("\f") line = "" column = 0 for part in parts: if not part: continue code, part = part[0], part[1:] if code == "\r": try: line, current_line = line.rsplit("\n", 1) line += "\n" except (ValueError) as e: line, current_line = "", line remaining = "" for c in reversed(current_line): if len(current_line) - (len(remaining) + len(part)) < len(c): break remaining = c + remaining line += part + remaining column = len(part) elif code == "\v": line += "\n" + " " * column + part column += len(part) else: line += code + part column = len(code + part) lines[i] = line s = "\n".join(lines) while "\b" in s or "\u007F" in s: s = re.sub(r".?((?:\u0008|\u007F)*)\u0008|\u007F((?:\u0008|\u007F)*).?", r"\1\2", s) s = s.replace("\n", "\r\n") return s
fc4c37dc96623f0f24b77a084b06e7f43864d405
471,946
import yaml def parse_feature_extraction_config(file_path): """ Parse feature extraction configuration dictionary from yaml file. Args: file_path (str): path to yaml file. Returns: (dict): dictionary in specified form specifying which features to extract as well as additional parameters for the feature extraction process. """ with open(file_path, 'r') as f: return yaml.safe_load(f)
c812da87d6eb86a7bc5bb6448378a383f19dd09f
39,343
def avg_values(buildings_buf, building_data, road): """ Get some really simple statistics on a set of buildings given a road segment. Return average building height and average distance from the buildings to the road. """ if not buildings_buf: return 0, 0 avg_dist = 0 avg_height = 0 for bid in buildings_buf: avg_dist += building_data[bid]['geom'].distance(road) # Sometimes no data available in the dataset. if building_data[bid]['height'] is not None: avg_height += building_data[bid]['height'] avg_dist /= len(buildings_buf) avg_height /= len(buildings_buf) return avg_dist, avg_height
c7bd284b1ad64cb081137f5ee45c3dcb348dcd81
162,744
def bresenham_line(x0, y0, x1, y1): """ Return all pixels between (x0, y0) and (x1, y1) as a list. :param x0: Self explanatory. :param y0: Self explanatory. :param x1: Self explanatory. :param y1: Self explanatory. :return: List of pixel coordinate tuples. """ steep = abs(y1 - y0) > abs(x1 - x0) if steep: x0, y0 = y0, x0 x1, y1 = y1, x1 switched = False if x0 > x1: switched = True x0, x1 = x1, x0 y0, y1 = y1, y0 if y0 < y1: y_step = 1 else: y_step = -1 delta_x = x1 - x0 delta_y = abs(y1 - y0) error = - delta_x / 2 y = y0 line = [] for x in range(x0, x1 + 1): if steep: line.append((y, x)) else: line.append((x, y)) error = error + delta_y if error > 0: y = y + y_step error = error - delta_x if switched: line.reverse() return line
13fc58968d25eeaea270b0fdeb9479f201f1225d
496,954
def reduce_set(data): """ Used by serveral analyses to reduce the dataset to stock symbol and model features. """ return data.loc[:, ['stock_symbol', 'stock_price_last', 'tweet_volume', 'unix_diff', 'positivity_percentage', 'currency']]
58662c7e5bc802240285876dc41c8fedcac9dd68
157,761
import math def format_hash(hash_str: str, hash_len: int, hash_seg_len: int, hash_sep: str) -> str: """ Format a hash string: keep only hash_len chars from it, and break it up into segments of len hash_seg_len, using the hash_sep as separator. Ex: >>> format_hash('abcdef1232567890', 8, 2, '-') ab-cd-ef-12 """ hash_str = hash_str[:hash_len] if hash_seg_len >= hash_len: return hash_str num_segs = math.ceil(len(hash_str) / hash_seg_len) return hash_sep.join(hash_str[hash_seg_len * i: (hash_seg_len * i + hash_seg_len)] for i in range(num_segs))
2e7866fcc871bab1c1758403bc198a10c54c1334
40,468
def format_ref(seq): """Remove gaps.""" return str(seq).replace('-', '')
a2da7a535228863082cb0302bf4e970dd6335cdb
382,971
def removeStartTime(sortedData): """Removes start time from time values Arguments: sortedData {list} -- the data sorted with lowest time first """ t0 = sortedData[0][0] for i in range(len(sortedData)): sortedData[i][0] = sortedData[i][0] - t0 sortedData[i][1] = sortedData[i][1] - t0 return sortedData
40a8740c53732bf9f9c2ee0a7dc6df1ce8bdb933
445,152
def port_bound(port): """ Returns true if the port is bound. """ return port['binding:vif_type'] != 'unbound'
ff55f104afae0d0750fd0002fa5490ce40556d85
305,979
import torch def minDCF( positive_scores, negative_scores, c_miss=1.0, c_fa=1.0, p_target=0.01 ): """Computes the minDCF metric normally used to evaluate speaker verification systems. The min_DCF is the minimum of the following C_det function computed within the defined threshold range: C_det = c_miss * p_miss * p_target + c_fa * p_fa * (1 -p_target) where p_miss is the missing probability and p_fa is the probability of having a false alarm. Arguments --------- positive_scores : torch.tensor The scores from entries of the same class. negative_scores : torch.tensor The scores from entries of different classes. c_miss : float Cost assigned to a missing error (default 1.0). c_fa : float Cost assigned to a false alarm (default 1.0). p_target: float Prior probability of having a target (default 0.01). Example ------- >>> positive_scores = torch.tensor([0.6, 0.7, 0.8, 0.5]) >>> negative_scores = torch.tensor([0.4, 0.3, 0.2, 0.1]) >>> val_minDCF, threshold = minDCF(positive_scores, negative_scores) >>> val_minDCF 0.0 """ # Computing candidate thresholds thresholds, _ = torch.sort(torch.cat([positive_scores, negative_scores])) thresholds = torch.unique(thresholds) # Adding intermediate thresholds interm_thresholds = (thresholds[0:-1] + thresholds[1:]) / 2 thresholds, _ = torch.sort(torch.cat([thresholds, interm_thresholds])) # Computing False Rejection Rate (miss detection) positive_scores = torch.cat( len(thresholds) * [positive_scores.unsqueeze(0)] ) pos_scores_threshold = positive_scores.transpose(0, 1) <= thresholds p_miss = (pos_scores_threshold.sum(0)).float() / positive_scores.shape[1] del positive_scores del pos_scores_threshold # Computing False Acceptance Rate (false alarm) negative_scores = torch.cat( len(thresholds) * [negative_scores.unsqueeze(0)] ) neg_scores_threshold = negative_scores.transpose(0, 1) > thresholds p_fa = (neg_scores_threshold.sum(0)).float() / negative_scores.shape[1] del negative_scores del neg_scores_threshold c_det = c_miss * p_miss * p_target + c_fa * p_fa * (1 - p_target) c_min, min_index = torch.min(c_det, dim=0) return float(c_min), float(thresholds[min_index])
fe2dd403ce348fe45ad61bcca85f3c3e7b99dc70
257,925
def identifyL23(addition): """Check if it is L2 or L3 delta request.""" return 'L3' if 'routes' in list(addition.keys()) else 'L2'
63c47a0de8142ddae8559d2e4cc236e2f5e972fd
27,024
def parse_name(r): """ Used to parse the name of a team. """ return str(r.get("naam", ""))
f85dcf12fc26a1ac5acbd23ceefa8d0ce4831828
441,104
def combine_real_imag(real_data, imag_data): """Combines two float data arrays into one complex64 array""" return real_data + 1j * imag_data
a9f3eb9f7e6f2d2b70cca941a2c14802f4a5d069
101,857
def nldpost(self, label="", key="", fileid="", prefix="", **kwargs): """Gets element component information from nonlinear diagnostic files. APDL Command: NLDPOST Parameters ---------- label Specifies the type of command operation: EFLG - Element flag for nonlinear diagnostics. NRRE - Newton-Raphson residuals. key Specifies the command action: STAT - List information about the diagnostic files (Jobname.ndxxx or Jobname.nrxxx) in the current directory. For Label = EFLG, the listing gives a summary that associates the loadstep, substep, time, equilibrium iteration number, cumulative iteration number, and the number of elements that fail each criteria with a specific file ID (Jobname.ndxxx). Use the list to create element components (via the CM option) based on the cumulative iteration number. - For Label = NRRE, the listing provides a summary that associates the loadstep, substep, time, equilibrium iteration number, and cumulative iteration number with a specific file ID (Jobname.nrxxx). Use the list to identify the respective file ID for creating Newton-Raphson residual contour plots (PLNSOL,NRRE,…,FileID). DEL - Delete Jobname.ndxxx or Jobname.nrxxx files in the working directory, if any exist. CM - Create components for elements that violate criteria. This value is valid only when Label = EFLG. fileid Valid only when Label = EFLG and Key = CM, this value specifies file IDs: IDnum - The file ID number. Creates the element components from the diagnostic files corresponding to the specified file ID number in the working directory. ALL - Creates element components from all available diagnostic files residing in the working directory. This value is the default if you do not specify an IDnum value. prefix Sets the prefix name for components. Specify up to 21 alphanumeric characters. Notes ----- Based on the nonlinear diagnostic results (created via the NLDIAG,EFLG command), the NLDPOST command creates element components with predefined names. The following table lists the diagnostic criteria and component names (with specified prefix and without). Here xxx corresponds to the file ID (FileID) of Jobname.ndxxx or Jobname.nrxxx. If you have trouble viewing specific element components, see Viewing Hidden Element Components in the Basic Analysis Guide. For more information, see Performing Nonlinear Diagnostics. """ command = f"NLDPOST,{label},{key},{fileid},{prefix}" return self.run(command, **kwargs)
f5209920bc3e2d5a799df66a0a5ade4be32f79ce
664,747
import torch def huber_function(x: torch.Tensor, delta: float = 1.0): """Huber function. Args: x: difference between the observed and predicted values delta: the threshold at which to change between delta-scaled L1 and L2 loss, must be positive. Default value is 1.0 Returns: Huber function (Tensor) """ return torch.where(x.abs() <= delta, 0.5 * x**2, delta * (x.abs() - 0.5 * delta))
8e95a3cc32e14578aa3c3629667bb7a87a6e9f89
599,385
def _sequence_to_index(seq, dim_list): """ Inverse of _index_to_sequence. Parameters ---------- seq : list of ints List of coordinates for each particle. dim_list : list of int List of dimensions of consecutive particles. Returns ------- i : list Index in a matrix. """ i = 0 for s, d in zip(seq, dim_list): i *= d i += s return i
5a4e2ddc14ff79ce9210faed4c0ede681f82b6e5
69,543
def _get_day_of_year(arg): """ Get the day position in the year starting from 1 Parameters ---------- arg : tuple Returns ------- int with the correct day of the year starting from 1 """ ml = [31,28,31,30,31,30,31,31,30,31,30,31] if arg[0]%4==0: ml[1] += 1 i=1 yday=0 while i<arg[1]: yday += ml[i-1] i += 1 yday += arg[2] return yday
d322fc88ed27a76201835580d8405706978aa4e8
180,061
def assembly_dyna_stif(omega_par, mass_matrix, damp_matrix, stif_matrix): """ Assembly the dynamic stiffness matrix. Args: omega_par (:obj:`float`): 2 pi frequency. mass_matrix (:obj:`numpy.array`): Mass matrix. damp_matrix (:obj:`numpy.array`): Damping matrix. stif_matrix (:obj:`numpy.array`): Stiffness matrix. Returns: Dynamic stiffness matrix. """ return -(omega_par**2) * mass_matrix + 1j * omega_par * damp_matrix + stif_matrix
663297b7b157797f3e64bc94d8994d2553d1add0
316,096
def get_nodes_of_namespace(server, namespaces=None): """ Get the nodes of one or more namespaces . Args: server: opc ua server to use namespaces: list of string uri or int indexes of the namespace to export Returns: List of nodes that are part of the provided namespaces """ if namespaces is None: namespaces = [] ns_available = server.get_namespace_array() if not namespaces: namespaces = ns_available[1:] elif isinstance(namespaces, (str, int)): namespaces = [namespaces] # make sure all namespace are indexes (if needed convert strings to indexes) namespace_indexes = [n if isinstance(n, int) else ns_available.index(n) for n in namespaces] # filter nodeis based on the provide namespaces and convert the nodeid to a node nodes = [server.get_node(nodeid) for nodeid in server.iserver.aspace.keys() if nodeid.NamespaceIndex != 0 and nodeid.NamespaceIndex in namespace_indexes] return nodes
135bb1539c04fbeea8b36a48d176c25395d433f4
229,939
def remove_indices_from_range(ixs, max_ix): """From the indices 0:max_ix+1, remove the individual index values in ixs. Returns the remaining ranges of indices and singletons. """ ranges = [] i0 = 0 for ix in ixs: i1 = ix - 1 if i1 < i0: i0 = ix + 1 elif i1 == i0: ranges.append([i0]) i0 = ix + 1 else: ranges.append([i0,i1+1]) i0 = ix + 1 if i0 < max_ix: ranges.append([i0, max_ix+1]) elif i0 == max_ix: ranges.append([i0]) return ranges
df71db04b7e521815042237000f036735fbbe0f3
41,152
from datetime import datetime def is_valid_isodate(date: str, check_timezone: bool = False) -> bool: """Check if a string is a valid ISO formatted datestring""" dt = None try: dt = datetime.fromisoformat(date) except ValueError: return False if check_timezone: if dt.tzinfo: return True else: return False return True
212c16236c79ef51d369cef18401cfcfd89e246f
673,779
def split_list(listcont, limit, glue='\n'): """ Splits a list of items in chunks to be sent in messages :param listcont: The item list :param limit: The character limit :param glue: The string that will join every list item :return: A list of strings with the items joined given the glue parameter """ chunks = [] chunk = [] for item in listcont: if len(glue.join(chunk + [item])) > limit: chunks.append(list(chunk)) chunk = [item] else: chunk.append(item) if len(chunk) > 0: chunks.append(chunk) return chunks
ad52d678cf49cc985be60b422a781803a2974eaa
75,450
def get_user_version(conn): """Return user_version value.""" cur = conn.cursor() cur.execute('PRAGMA user_version') return cur.fetchone()[0]
862a5feb0508aedbc82570e26a9651d233b8c3c4
458,435
def compose_select(table, fields): """Compose select query string. Arguments --------- table : str Real table name. fields : str List of table fields. Returns ------- str Query string with real table name. However, it can contain placeholders for query parameters. """ return f'SELECT {fields} FROM {table}'
fc3ae3a96e4c0456ba05d0b152fbe471157d917b
529,663
def c2f(c): """Celsius to Fahrenheit""" return 9/5*c + 32
f1c9a382562ffe4edc9d06fdbca3337831c1af7b
454,824
from typing import Dict def get_default_molecular_config() -> Dict: """Get a default molecular configuration (LiH).""" problem_config = { "ion_pos": ((0.0, 0.0, -1.5069621), (0.0, 0.0, 1.5069621)), "ion_charges": (1.0, 3.0), "nelec": (2, 2), } return problem_config
fe0302930cbffecb11573e00f4e434e737377d93
164,913
def add(v1, v2): """ Returns the addition of a two 2-D vectors. """ return (v1[0] + v2[0], v1[1] + v2[1])
40ddea20e33efec8a3f1cdb41964a404247deed5
639,208
def board_full(board): """ Utility function that returns True if the given board is full and False otherwise Arg board: board - the board you want to check """ for row in board: for piece in row: if piece == '*': return False; return True;
a4c5b71cd9aa95f848acb96c62f850afbbf4805e
662,475
def link(rel, href): """Generate a link dict from a rel, href pair.""" if href is not None: return {rel: href}
c1fd163315c315da99d1958027cb09c2f91be385
492,619
def flip_bin_string(bin_str: str): """ This function will flip(invert) each of the bits in a given binary string without prefix. i.e. "0" becomes "1" and vice versa; :param bin_str: A binary string without prefix to flip :return: A binary data represented as string """ flipped_bin_str = "" for bit in bin_str: flipped_bin_str += str(1 - int(bit)) return flipped_bin_str
88ec62adb7ff641d17baa4d95c57dc2a3b5c2210
220,565
def _clip_pad(tensor, pad_shape): """ Clip boxes of the pad area. :param tensor: [n, c, H, W] :param pad_shape: [h, w] :return: [n, c, h, w] """ H, W = tensor.shape[2:] h, w = pad_shape if h < H or w < W: tensor = tensor[:, :, :h, :w].copy() return tensor
c32634c18629578e02ebc168f01a1fe3a9e2d633
231,502
def validate_boolean(option, value): """Validates that 'value' is True or False.""" if isinstance(value, bool): return value raise TypeError("%s must be True or False" % (option,))
3a35a36fde0cc37069d276da26ef79beb1335e95
441,981
import yaml def read_yaml(yaml_file): """Read a yaml file. Args: yaml_file (str): Full path of the yaml file. Returns: data (dict): Dictionary of yaml_file contents. None is returned if an error occurs while reading. """ data = None with open(yaml_file) as f: # use safe_load instead load data = yaml.safe_load(f) return data
b2a50ea3421489e327aa84c142dd54ace08f3811
676,784
from typing import Optional import re def _parse_url(url: str) -> Optional[str]: """Parse a pastecord url""" match = re.search(r"pastecord.com(?:/raw|/documents)?/(\w+)(?:\.\w+)?", url) if match is None: return None return match.group(1)
688b0f34485355b34855c8665c2c962564760fc6
266,618
def maximo(a, b): """ Função que retorna o valor máximo entre dois parâmetros. >>> maximo(3, 4) 4 >>> maximo(0, -1) 0 :param a: number :param b: number :return: number """ if a > b: return a else: return b
8a9ca52588640551b83a4bc43ae8195b6dd5a696
433,098
def is_icmp_reply(pkt, ipformat): """Return True if pkt is echo reply, else return False. If exception occurs return False. :param pkt: Packet. :param ipformat: Dictionary of names to distinguish IPv4 and IPv6. :type pkt: dict :type ipformat: dict :rtype: bool """ # pylint: disable=bare-except try: if pkt[ipformat['IPType']][ipformat['ICMP_rep']].type == \ ipformat['Type']: return True else: return False except: # pylint: disable=bare-except return False
21196c53c0e227602f8aaba984d56aeae3af2781
690,512
def nodes_from_path(G, path, key=''): """Helper to get list of node from a path with key""" if not key: return path else: keys = [G.node[node][key] for node in path] return keys
0c256171edabed3f864537d08025f790286077b4
252,460
def partition(pred, iterable): """ Partion a list in two lists where the first list hold all items for which the function pred returned True and the second list holds all reset. @param pred a function that returns True or False and has one parameter @param iterable a list of items which can be called with pred @returns two list one with all items for which pred returned True and a second list with all items for which pred returned False """ trues = [] falses = [] for item in iterable: if pred(item): trues.append(item) else: falses.append(item) return trues, falses
5409660022ae531a45143a3f12a98ad931d06b49
417,255
def ToKeys(hotkey): """Converts the action value to shortcut keys used from JavaScript. Examples: 'Ctrl - 9' => '9<>CTRL' 'Ctrl - Shift - Tab' => 'tab<>CTRL<>SHIFT' """ values = hotkey.split(' - ') modifiers = sorted(value.upper() for value in values if value in ['Shift', 'Ctrl', 'Alt', 'Search']) keycode = [value.lower() for value in values if value not in ['Shift', 'Ctrl', 'Alt', 'Search']] # The keys which are highlighted even without modifier keys. base_keys = ['backspace', 'power'] if not modifiers and (keycode and keycode[0] not in base_keys): return None return '<>'.join(keycode + modifiers)
52e44ae3a6d49bb22ca52b594b9e985a2bf11fb5
371,696
import re def validate_group_name(group_name): """ Check whether group name is valid. A valid group name only contains alphanumeric character, and the length should less than 255. """ if len(group_name) > 255: return False return re.match('^[\w\s\'\.-]+$', group_name, re.U)
2369ee4c89594d6256bb6829dd2eb4561026682d
453,419
def run_test( func, points, *args, correct_message="default correct", error_message="default error", **kwargs ): """Run a pre-defined test function and creates a dictionary containing the results of the test Parameters ---------- func : function or method Pre-defined test function to run points : int or float Number of points assigned for passing test *args Variable arguments passed to test function correct_message : str Custom message returned with passing test error_message : str Custom message returned with failing test **kwargs Keyword arguments passed to test function Returns ------- results : dict with the following keys: points : int or float : points assigned based on test results pass : bool : passing status of test function description : str : test function name that was run message : str : custom message returned based on passing status traceback : AssertionError : returned from test function when pass is False """ results = {"points": 0, "pass": False} score = 0 try: fname = func.__name__ results["description"] = fname func(*args, **kwargs) except Exception as e: results["message"] = error_message results["traceback"] = e pass else: results["pass"] = True results["message"] = correct_message results["points"] = points return results
772b864033c3c5775cf14bdf5f5f1033e92457cd
205,722
from typing import Iterable from typing import Mapping from typing import Any from typing import Dict def combine_api_output_dictionaries(input_dicts: Iterable[Mapping[str, Any]], require_uniqueness: bool = True ) -> Dict[str, Any]: """Merges the list of API detection dictionaries *input_dicts*. See header comment for details on merge rules. Args: input_dicts: list of dicts, each dict is the JSON of the detections output file from the Batch Processing API require_uniqueness: bool, whether to require that the images in each input_dict be unique Returns: dict, represents the merged JSON """ # Map image filenames to detections, we'll convert to a list later images = {} info: Dict[str, str] = {} detection_categories: Dict[str, str] = {} classification_categories: Dict[str, str] = {} n_redundant_images = 0 n_images = 0 known_fields = ['info', 'detection_categories', 'classification_categories', 'images'] for input_dict in input_dicts: for k in input_dict: if k not in known_fields: raise ValueError(f'Unrecognized API output field: {k}') # Check compatibility of detection categories for cat_id in input_dict['detection_categories']: cat_name = input_dict['detection_categories'][cat_id] if cat_id in detection_categories: assert detection_categories[cat_id] == cat_name, ( 'Detection category mismatch') else: detection_categories[cat_id] = cat_name # Check compatibility of classification categories if 'classification_categories' in input_dict: for cat_id in input_dict['classification_categories']: cat_name = input_dict['classification_categories'][cat_id] if cat_id in classification_categories: assert classification_categories[cat_id] == cat_name, ( 'Classification category mismatch') else: classification_categories[cat_id] = cat_name # Merge image lists, checking uniqueness for im in input_dict['images']: im_file = im['file'] if require_uniqueness: assert im_file not in images, f'Duplicate image: {im_file}' images[im_file] = im n_images += 1 else: if im_file in images: n_redundant_images += 1 previous_im = images[im_file] # Replace a previous failure with a success if ('detections' in im) and ('detections' not in previous_im): images[im_file] = im print(f'Replacing previous failure for image: {im_file}') else: images[im_file] = im n_images += 1 # Merge info dicts, don't check completion time fields if len(info) == 0: info = input_dict['info'] else: info_compare = input_dict['info'] assert info_compare['detector'] == info['detector'], ( 'Incompatible detection versions in merging') assert info_compare['format_version'] == info['format_version'], ( 'Incompatible API output versions in merging') if 'classifier' in info_compare: if 'classifier' in info: assert info['classifier'] == info_compare['classifier'] else: info['classifier'] = info_compare['classifier'] # ...for each dictionary if n_redundant_images > 0: print(f'Warning: found {n_redundant_images} redundant images ' f'(out of {n_images} total) during merge') # Convert merged image dictionaries to a sorted list sorted_images = sorted(images.values(), key=lambda im: im['file']) merged_dict = {'info': info, 'detection_categories': detection_categories, 'classification_categories': classification_categories, 'images': sorted_images} return merged_dict
9be4cea0574934b8552d716ff4048c8c8b827954
618,553
def quote_line(line: str) -> str: """Formats a text line for discord message as quote""" return f"> {line}"
1626b04327ffb2de76a9e3982fb090b78f3a19f5
406,984
import math def norm_pdf(x, mu=0, sigma=1): # type: (float, float, float) -> float """Calculate a the probability density of a normal distribution""" return math.exp(-(((x - mu) ** 2) / (2 * (sigma ** 2)))) / math.sqrt(2 * math.pi * (sigma ** 2))
9db42b260ebd7135de172a5cc4501e8e6aba443c
345,217
def removeDuplicates(a_list: list) -> list: """ Removes duplicates from a list and returns it. Type needs to implement __eq__ """ return list(dict.fromkeys(a_list))
d4099bad061ea8a08051dbd15ee07cd0efcdb056
611,543
import re def _remove_qss_comment(stylesheet: str) -> str: """Remove qss comment from the stylesheet string.""" stylesheet = re.sub(r" */\*[\s\S]*?\*/", "", stylesheet) # Change blank lines to one blank line return re.sub(r"\n\s*\n", "\n", stylesheet)
b435880dc4a7713d1e2fa689b145bb2635aa7a2d
223,072
import torch def one_hot_encoding(input): """ One-hot encoder Inputs: - input : 1D tensor containing labels (N,) Outputs: - output: one-hot encoded tensor (N, C) - classes: all unique class in order (C,) """ if len(input.shape)>1 and input.shape[1]>1: raise ValueError("Tensor to be encoded, should have only one dimension or the second dimension should have size of one!") classes = input.unique() N = input.shape[0] C = classes.shape[0] output = torch.zeros(N,C).long() output[torch.arange(N), input] = 1 return output, classes
0b774ca9d5c310ca0c8b7595838937b476541bd5
564,343
def insert_nulls(df, fill_val=-99999999.99): """replaces fill_val with null in all columns of df. :df: Dataframe :fill_val: fill value to be replaced with nulls. default=-99999999.99 :returns: Dataframe with fill_val replaced with nulls """ for col in df.columns.tolist(): df.ix[df[col] == fill_val, col] = None return df
71fdf29a16916ee1f119b5267043960c9a6d5227
56,120
def min_nondiag_triple(matrix): """Returns a triple consisting of a row #, col #, and value of a minimal non-diagonal entry of matrix.""" if (not len(matrix) > 1) or (not len(matrix[0]) > 1): print("ERROR in min_nondiag_triple: matrix too small") return minval = matrix[0][1] mini = 0 minj = 1 for i in range(len(matrix)): for j in range(len(matrix[i])): if i != j and matrix[i][j] < minval: mini, minj, minval = i, j, matrix[i][j] return mini, minj, minval
5d6e8dc82070c5b980bb2b63a2649c97327b9199
403,512
def load_data(point_cloud_batch, label_cloud_batch, NUM_SAMPLE_POINTS=1024, LABELS=['wing', 'body', 'tail', 'engine']): """ load randomly selected points from point cloud batch Parameters: point_cloud_batch (np array): numpy array of each point cloud expreseed in (x, y, z) format label_cloud_batch (int): label of point clouds to assign for augmented points NUM_SAMPLE_POINTS (int): sample size by default which is 1024 Returns: sampled point_cloud_batch, sampled label_cloud_batch """ point_cloud_batch.set_shape([NUM_SAMPLE_POINTS, 3]) label_cloud_batch.set_shape([NUM_SAMPLE_POINTS, len(LABELS) + 1]) return point_cloud_batch, label_cloud_batch
34bc208e94ccee31c4020d0effd16be8bcf12a6f
346,093
def get_dots_bounds(dots): """Returns the bounds of a list of dots.""" return ( min(x for x, y in dots), max(x for x, y in dots), min(y for x, y in dots), max(y for x, y in dots) )
219ad0838a1c67712189ff5b502a75e3825d040b
199,369
def crop_img(i, edge=0): """ crop the image edge % pr side.""" new_img = i.copy() height = new_img.shape[0] width = new_img.shape[1] sh = int(height / 100 * edge) sw = int(width / 100 * edge) return new_img[sh:height - sh, sw:width - sw]
73d81c2829056b288bf64ccfc74b840f63c3b51a
330,425
from typing import List def list_to_string(list_to_convert: List) -> str: """ Convert provided list to comma separated string :param list_to_convert: List to convert :return: Comma separated string with values of the provided list """ return ",".join(map(str, list_to_convert))
373997fe7c6753d3e9fec4958076f5f2e25421f4
533,095
def parse_route_flow_counter_stats(dut): """Parse command output of "show flowcnt-route stats" Args: dut (object): DUT object Returns: dict: Parsed result. e.g. {'1.1.1.0/24': {'packets': '5', 'bytes': '4500'}} """ stats_list = dut.show_and_parse('show flowcnt-route stats') parse_result = {} for stats in stats_list: if stats['vrf'] == 'default': key = stats['matched routes'] else: key = '|'.join([stats['vrf'], stats['matched routes']]) parse_result[key] = { 'packets': stats['packets'], 'bytes': stats['bytes'] } return parse_result
19751b6f8f9b678b31cb0ada1780f87a3380d10f
216,927
import psycopg2 import psycopg2.extras def get_data_from_postgres(conn, qry): """ Gets a list of 'DictRows' from a PostgreSQL database. A DictRow can be indexed numerically or by column name, In other words it behaves like a list OR a dictionary object. :param conn: The database connection string :param qry: The SQL query which gets the data :return: The result set. """ try: my_conn = psycopg2.connect(conn) cur = my_conn.cursor(cursor_factory=psycopg2.extras.DictCursor) try: cur.execute(qry) c = cur.fetchall() cur.close() # self.result = c return c except psycopg2.Error as e: print(e) return False except psycopg2.OperationalError as e: print(e) return False
adb895da2e0139eed175f8ef7be4eb1f8db4388c
362,693
def _buses_with_gens(gens): """ Return a list of buses with generators """ buses_with_gens = list() for gen_name, gen in gens.items(): if not gen['bus'] in buses_with_gens: buses_with_gens.append(gen['bus']) return buses_with_gens
5cf9e918a55e140053bceb538cd1f15b331c253d
16,229
def get_top_users_and_orgs(user_data_df, top_repos_df): """Gets the top 25 Github Users and top 25 Organizations. Note: Top 25 Users are determined based on total followers. Top 25 Organizations are determined based on total stars for all of an organizations repos. Parameters ---------- user_data_df : pandas DataFrame DataFrame that contains user data scraped with the scrape_github method. top_repos_df : pandas DataFrame DataFrame that contains repo data scraped with the scrape_github method. Returns ------- tuple of sets The top 25 Github Users and top 25 Organizations """ top_users = set( user_data_df.sort_values("followers", ascending=False) .head(25) .query("type == 'User'")["username"] .tolist() ) top_organizations = set( top_repos_df.query("type == 'Organization'") .groupby("username") .sum() .reset_index() .sort_values("stars", ascending=False)["username"] .head(25) .tolist() ) return top_users, top_organizations
285885028d35574475c9bbf0a0d2c6f98feadcb0
632,264
import pathlib import re def grep_dir(path: pathlib.Path, glob_pattern: str, search_pattern: str) -> bool: """Recursively go through a dir and it's children and find the regex.""" pattern = re.compile(search_pattern) for fil in path.glob(glob_pattern): if not fil.is_file(): continue if pattern.search(fil.read_text()): return True return False
e3006f0f9976cf0de75b9b5fd71553ed66c0a6e6
70,394
import six def decode_str(string): """ Converts byte strings to unicode strings. """ if isinstance(string, six.text_type): return string return string.decode('utf-8')
b9eacec05d1c7b51a66f9870e9ad8ccaed71679b
360,732
def get_topics_by_datatype(bag): """ Get all the message types in the bag and their associated topics. @param bag: bag file @type bag: rosbag.Bag @return: mapping from message typename to list of topics @rtype: dict of str to list of str """ topics_by_datatype = {} for c in bag._get_connections(): topics_by_datatype.setdefault(c.datatype, []).append(c.topic) return topics_by_datatype
c1c0b92408f22a96f2127a757126b4d1e8f3f344
620,890
def order_cat(col, ct, rev=False): """ Small helper to convert column to categorical with option to revert order """ col = col.astype(ct) if rev: col.cat.set_categories(new_categories=ct.categories[::-1], ordered=True, inplace=True) return col
b4e3c5d99adff026be6a5b198ec9a4822f1dccf7
216,157
def format_timedelta(timedelta): """Formats a timedelta object to a string by throwing off the microsecond part from the standard timedelta string representation. """ whole_repr = str(timedelta) + '.' return whole_repr[:whole_repr.find('.')]
8c71eb5dc7fbbed65eaece1e84351c805011e9d0
653,641
import re def parse_duration(s): """ Parse a `duration` string and convert it to numeric. Parameters ---------- s : str or float `duration` value Returns ------- Int Numeric `duration` value """ # Check if value is null or already a float if isinstance(s, float): return s # Remove seconds, "m", and spaces s = re.sub(r'\:\s*\d{1,2}', '', s) s = re.sub(r'm|\s*', '', s, flags=re.IGNORECASE) # Convert to int match = re.search(r'(\d)(ho?u?r?s?)(\d\d?)?', s, flags=re.IGNORECASE) if match: # if time is in hours i = int(match.group(1)) * 60 # hours to minutes if match.group(3): return i + int(match.group(3)) # add minutes else: # if time is in minutes return int(s)
df92794b7b5007c71363b5dc4e264c34f7ea9325
164,268
def getValueFromObject(x, y): """ Returns a value from a dictionary x if present. Otherwise returns an empty string """ return x[y] if y in x else ''
15bf70aab5cd1265570cdc7a510f47b0efb25e65
589,314
def unfresh_token(user): """Return unfresh token from user fixture :param user: Pytest fixture :return: Unfresh JWT access token """ return user.get_access_token()
490fd3788b5c4a2110501016469496a494a784f3
102,051
def _en_to_enth(energy, concs, A, B, C): """Converts an energy to an enthalpy. Converts energy to enthalpy using the following formula: Enthalpy = energy - (energy contribution from A) - (energy contribution from B) - (energy contribution from C) An absolute value is taken afterward for convenience. Parameters ---------- energy : float The energy of the structure concs : list of floats The concentrations of each element A : float The energy of pure A B : float The energy of pure B C : float The energy of pure C Returns ------- enth : float The enthalpy of formation. """ enth = abs(energy - concs[0]*A - concs[1] * B - concs[2] * C) return enth
5b288cc00a12f0b4b841bb4e4c78f93b50f277c3
392,906
import torch def calc_iou_tensor(box1, box2): """ Calculation of IoU based on two boxes tensor, Reference to https://github.com/kuangliu/pytorch-src input: box1 (N, 4) box2 (M, 4) output: IoU (N, M) """ N = box1.size(0) M = box2.size(0) be1 = box1.unsqueeze(1).expand(-1, M, -1) be2 = box2.unsqueeze(0).expand(N, -1, -1) # Left Top & Right Bottom lt = torch.max(be1[:,:,:2], be2[:,:,:2]) #mask1 = (be1[:,:, 0] < be2[:,:, 0]) ^ (be1[:,:, 1] < be2[:,:, 1]) #mask1 = ~mask1 rb = torch.min(be1[:,:,2:], be2[:,:,2:]) #mask2 = (be1[:,:, 2] < be2[:,:, 2]) ^ (be1[:,:, 3] < be2[:,:, 3]) #mask2 = ~mask2 delta = rb - lt delta[delta < 0] = 0 intersect = delta[:,:,0]*delta[:,:,1] #*mask1.float()*mask2.float() delta1 = be1[:,:,2:] - be1[:,:,:2] area1 = delta1[:,:,0]*delta1[:,:,1] delta2 = be2[:,:,2:] - be2[:,:,:2] area2 = delta2[:,:,0]*delta2[:,:,1] iou = intersect/(area1 + area2 - intersect) return iou
1928858c1ba2cd8ebcfe06c3354d8ce9960dba25
137,217
from typing import List def find_min(lst: List[int]) -> int: """ Return the smallest value in <lst>. >>> find_min([3, 4, 5, 6, 7]) 3 >>> find_min([5, 6, 7, 3, 4]) 3 """ smallest = lst[0] for i in range(len(lst)): if lst[i] < smallest: smallest = lst[i] return smallest
599c4695bd9d77383b90ec671b3beb4629cbf78d
239,378
def poly_scale(p, n): """Multiply polynomial ``p(x)`` with ``x^n``. If n is negative, poly ``p(x)`` is divided with ``x^n``, and remainder is discarded (truncated division). """ if n >= 0: return list(p) + [0] * n else: return list(p)[:n]
4a994edcc7656189c7e15ca55d7e89315fdc784e
275,690
def top(df, n=5, column=None): """Returns the top 5 entries from a DataFrame, sorted by column. """ return df.sort_values(by=column)[-n:]
d82cc4e488887fa5109a9ca130257e1e59b0fc90
117,404
def check_search_account_options(val, home): """Check if current account option is valid.""" try: # Change option to integer. val = int(val) # Check if option is in range. if val <= 0 or val > 5: print('*********************************') print('Not an option, please try again.\n') return val, home home = False return val, home except ValueError: # Option is not an integer. home = True print('*********************************') print('Please enter an integer 1-5.\n\n\n') return val, home
aa79c264c6edd93abb82f48fe039c8a9ff7a756b
455,162
def isInPar(db, chrom, start, end): """ return None if not in PAR or "1" or "2" if genome is hg19 or hg38 and chrom:start-end is in a PAR1/2 region """ if db not in ("hg19", "hg38"): return None if not chrom in ("chrX", "chrY"): return None # all coordinates are from https://en.wikipedia.org/wiki/Pseudoautosomal_region # and look like they're 1-based if db=="hg38": if chrom=="chrX": if start >= 10001 and end < 2781479: return "1" if start >= 155701383 and end < 156030895: return "2" elif chrom=="chrY": if start >= 10001 and end < 2781479: return "1" if start >= 56887903 and end < 57217415: return "2" elif db=="hg19": if chrom=="chrX": if start >= 60001 and end < 2699520: return "1" if start >= 154931044 and end < 155260560: return "2" elif chrom=="chrY": if start >= 10001 and end < 2649520: return "1" if start >= 59034050 and end < 59363566: return "2" return None
b2728baa4b1659062276289b34ec1055708f7aaa
608,374
import math def get_bands(d_r, number_of_bands, precision=2, nearest_integer=False): """ Divide a range into bands :param: d_r - [min, max] the range that is to be covered by the bands. :param: number_of_bands - The number of bands, a positive integer. :param: precision - The decimal precision of the bounds. :param: nearest_integer - If True then [floor(min), ceil(max)] is used. :return: A dictionary consisting of the band number and [min, midpoint, max] for each band. """ prec = abs(precision) if prec > 14: prec = 14 bands = dict() if (d_r[1] < d_r[0]) or (number_of_bands <= 0): return bands x = list(d_r) if nearest_integer: x[0] = math.floor(x[0]) x[1] = math.ceil(x[1]) dx = (x[1] - x[0]) / float(number_of_bands) b = [x[0], x[0] + dx / 2.0, x[0] + dx] i = 0 while i < number_of_bands: b = list(map(lambda ele_b: round(ele_b, prec), b)) if i == 0: b[0] = x[0] bands[i] = b b = [b[0] + dx, b[1] + dx, b[2] + dx] i += 1 return bands
90acb105bd9301feaeccb588b94b22c67ef3e66e
412,759
def anagrams(word, words): """ A function that will find all the anagrams of a word from a list. You will be given two inputs a word and an array with words. You should return an array of all the anagrams or an empty array if there are none. """ template = sorted([char for char in word]) results = list() for w in words: if template == sorted([char for char in w]): results.append(w) return results
746f0dc06789a411ac408e281a7a299428cdaf49
398,326
def get_issue_info(payload): """Extract all information we need when handling webhooks for issues.""" # Extract the title and the body title = payload.get('issue')['title'] # Create the issue dictionary return {'action': payload.get('action'), 'number': payload.get('issue')['number'], 'domain': title.partition(' ')[0]}
fd3836a94dbd93a387cd9d38b81c37c2a73be4ba
665,942
def get_symbol_addr(sym): """Get the address of a symbol""" return sym["st_value"]
5c8bd6a64ad03213895c90dbe483739f92706cfa
350,069
def gen_refined_corr_dfs(df_corr, ang_corr_out_path=None, subj_corr_out_path=None): """Generate print-ready angle/subject correlation tables for publication. Args: df_corr (pandas.DataFrame): general correlation table of all data streams generated by dataobj.data_utils.build_corr_table() ang_corr_out_path (str): output path for angle correlation CSV, if desired subj_corr_out_path (str): output path for subject correlation CSV, if desired Returns: pandas.DataFrame angle correlation table pandas.DataFrame subject correlation table """ # extract only desired table rows (polyfits are omitted/nonsensical) df_corr_ref = df_corr.loc[[ 'emg-abs-bic', 'emg-abs-brd', 'us-csa', 'us-csa-dt', 'us-t', 'us-t-dt', 'us-tr', 'us-tr-dt' ]] # rename with print-ready labels df_corr_ref.rename(index={ 'emg-abs-bic': 'sEMG-BIC', 'emg-abs-brd': 'sEMG-BRD', 'us-csa': 'CSA', 'us-csa-dt': 'CSA-DT', 'us-t': 'T', 'us-t-dt': 'T-DT', 'us-tr': 'AR', 'us-tr-dt': 'AR-DT' }, inplace=True) # aggregate angle correlation data df_corr_ang = df_corr_ref[[ 'sub1wp1', 'sub1wp2', 'sub1wp5', 'sub1wp8', 'sub1wp10' ]] df_corr_ang.columns = ['25', '44', '69', '82', '97'] if ang_corr_out_path: df_corr_ang.to_csv(ang_corr_out_path) df_corr_subj = df_corr_ref[[ 'sub1wp5', 'sub2wp5', 'sub3wp5', 'sub4wp5', 'sub5wp5' ]] df_corr_subj = df_corr_subj.loc[[ 'CSA', 'CSA-DT', 'T', 'T-DT', 'AR', 'AR-DT' ]] df_corr_subj.columns = ['Sub1', 'Sub2', 'Sub3', 'Sub4', 'Sub5'] if subj_corr_out_path: df_corr_subj.to_csv(subj_corr_out_path) return df_corr_ang, df_corr_subj
2a487cb52ad58f26f1dd899285cd5a937d49ff64
438,379
def numpixels(image): """The number of pixels in the image""" return image.size
2ffb8f4ce86b391e89d12ded7601fe4038fc6a4f
496,613
from datetime import datetime def timestamp_to_seconds(timestamp): """Convert an ISO-ish timestamp to seconds-since-epoch.""" return int(datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S%z").strftime("%s"))
40985cb21247f0d4b299eafe82a3c43b16fbe225
516,637