content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import math def top2daz(north, east, up): """Compute azimouth, zenith and distance from a topocentric vector. Given a topocentric vector (aka north, east and up components), compute the azimouth, zenith angle and distance between the two points. Args: north (float): the north component (in meters) east (float) : the east component (in meters) up (float) : the up component (in meters) Returns: tuple (floats): a tuple of three floats is returned, as: [distance, azimouth, zenith], where distance is in meters, and azimouth and zenith are in radians. """ distance = math.sqrt(north*north + east*east + up*up) a = math.atan2(east, north) % (math.pi*2e0) # normalized [0, 2pi] zenith = math.acos(up/distance); return distance, a, zenith
67a127957b0dc131a6fe5505de05b89871542009
5,116
def calculate_row_format(columns, keys=None): """ Calculate row format. Args: columns (dict): the keys are the column name and the value the max length. keys (list): optional list of keys to order columns as well as to filter for them. Returns: str: format for table row """ row_format = '' if keys is None: keys = columns.keys() else: keys = [key for key in keys if key in columns] for key in keys: if len(row_format) > 0: row_format += "|" row_format += "%%(%s)-%ds" % (key, columns[key]) return '|' + row_format + '|'
91f4ade1a4ce35d57e45136392393d78cca865b6
150,583
import math def _calculate_target_load(num_reqs: int, rate: float = 100, baseline: int = 0) -> int: """ Given the rate and number of URLs in data set, calculate how many URLs to send in this load""" if baseline: target_num_reqs = baseline * (rate / 100) else: target_num_reqs = num_reqs * (rate / 100) return math.ceil(target_num_reqs)
953cbdafa18899f59743932f7edad31d72080ea8
653,743
from datetime import datetime import pytz def get_date(prompt: str, timezone: str) -> datetime: """ Obtains a date from user input. """ date_str = input(f'Enter date of {prompt} (yy-mm-dd hh:mm): ') date = datetime.strptime(date_str, "%y-%m-%d %H:%M") print(f'The date you entered is: {date}') return date.replace(tzinfo=pytz.timezone(timezone))
26dca58b6cb4edc3fd61032ed931aa3963efc63b
50,923
def _get_loss(loss_fn, model, objective, X, y, batch=False): """ Return - 1d array of individual losses of shape=(X.shape[0],), unless batch=True, then return a single float. Note - Parallelizable method. """ if objective == 'regression': y_pred = model.predict(X) # shape=(X.shape[0]) elif objective == 'binary': y_pred = model.predict_proba(X)[:, 1] # 1d arry of pos. probabilities, shape=(X.shape[0],) else: assert objective == 'multiclass' y_pred = model.predict_proba(X) # shape=(X.shape[0], no. class) result = loss_fn(y, y_pred, raw=False, batch=batch) # shape(X.shape[0],) or single float return result
3c8713832228b8439ed0026b5b79b876f9b68581
515,634
def create_colocation_group_to_ops_map(op_graph): """Generate a dict that maps a colocation group to its op id list.""" retval = {} for op_id, op_data in op_graph.nodes().items(): # assume there is only one group group = op_data['colocation_group'] if group in retval: retval[group].append(op_id) else: retval[group] = [op_id] return retval
be8b8567d2ff6988c9f8bbdcd9b3753547b2eccc
631,686
from pathlib import Path def _convert_pathlib_path(path): """Helper function used to convert an instance of pathlib.Path into a unicode string. """ if Path is None: return path if isinstance(path, Path): return str(path) return path
c4d58a8d883526e02dcc5bcc1da0c21dbb327c6b
392,416
def get_nseg(srf): """ Returns number of segments in SRF file. srf: filepath to srf """ with open(srf, "r") as sf: sf.readline() return int(sf.readline().split()[1])
df08ffaea7a19bfcf36d3c2a8eed6a9e97efb975
514,431
def get_recommendation_table_args(ipa): """Creates an recommendation table from an Input Pipeline Analyzer proto. Args: ipa: An input_pipeline_pb2.InputPipelineAnalysisResult. Returns: Returns a gviz_api.DataTable """ table_description = [("link", "string", "link")] data = [[detail] for detail in ipa.recommendation.details] return (table_description, data, None)
b0c972dedca49ba4999384b99a123b121bebcab7
252,399
def calculate_deviations(metric_means): """ Calculate the total deviation for a particular distribution type :param metric_means: the calculated means of a particular metric :return: total positive and total negative deviation """ overlap = [1, 0.8, 0.6, 0.4, 0.2, 0] deviation = metric_means[0] - overlap pos = [round(item, 4) for item in deviation if item >= 0] neg = [round(abs(item), 4) for item in deviation if item < 0] return sum(pos), sum(neg)
184003ea9eacf5574430b23550e0661f18f161a4
564,622
def get_filesize_est(n_regions): """ Get a filesize estimate given the number of regions in grid encoding. Parameters ---------- n_regions: int number of regions encoded by grid encoding Returns ------- float Estimated filesize """ return 0.00636654 * n_regions + 3.392864597
01a3806be9fa70998526d2636cd5800dd1e24b42
271,299
import torch def smooth_dice_beta_loss(pred: torch.Tensor, target: torch.Tensor, beta: float=1., smooth: float=1., eps: float=1e-6) -> torch.Tensor: """ Smoothed dice beta loss. Computes 1 - (((1 + beta**2) * tp + smooth) / ((1 + beta**2) * tp + beta**2 * fn + fp + smooth + eps)) :param pred: (torch.Tensor) predictions, logits :param target: (torch.Tensor) target, logits or binray :param beta: (float) weight to emphasize recall :param smooth: (float) smoothing value :param eps: (eps) epsilon for numerical stability :returns dice_loss: (torch.Tensor) the dice loss """ pred = torch.sigmoid(pred) target = (target > 0).float() tp = (pred.reshape(-1) * target.reshape(-1)).sum() fp = pred.reshape(-1).sum() - tp tn = ((1 - pred).reshape(-1) * (1-target).reshape(-1)).sum() fn = (1 - pred).reshape(-1).sum() - tn return 1 - (((1 + beta ** 2) * tp + smooth) / \ ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + smooth + eps))
7b9086d8f7d0c94d405f04f55df787fcf406e1b1
63,853
def list_to_str(list_name): """ Formats a list into a string. :param list_name: (List of String) List to be formatted :return: (String) String representation of list """ final_str = '' for string in list_name: final_str += string + '\n' return final_str
b36b8d2f7942a48abc7109879275be5c7e669644
191,246
def normalize(series): """Accepts a column (a pandas.Series object) and returns a normalized version. Operations are applied to all elements of the column. 'Normalized' means centering its distribution.""" mean = series.mean() sdev = series.std(ddof = 0) #population standard dev normalized = (series - mean) / sdev return normalized
5d400fbfe59f990071ccca0527c119dad1c543cd
506,653
import requests def valid_url(url): """valid_url. Pings the passed URL to validate its existence. :param url: URL to ping >>> valid_url("https://en.wikipedia.org/wiki/Double_copy_theory") True >>> valid_url("fake") False """ try: ping = requests.head(url) return ping.status_code < 400 except Exception: return False
abd6e3266bffc093454b268e01dc945901e9a7ee
321,297
from typing import Callable def transform_if_not_none(transform: Callable, value): """ Apply a function on a given value if it's not None. Else, return the None value. >>> transform_if_not_none( ... functional_generic.when(operator.is_instance, lambda x: x.casefold())), ... "Some Title" ... ) 'some title' >>> transform_if_not_none( ... functional_generic.when(operator.is_instance, lambda x: x.casefold())), ... None ... ) """ if value is not None: return transform(value) return value
17eb2b18026dc593e59fd02a1af81c6d512c8d65
223,752
def sorted_with_prefix(prefix, it, drop_exact=True, drop_special=True): """ >>> sorted_with_prefix("foo", ["fooZ", "fooAA", "fox"]) ['fooAA', 'fooZ'] >>> sorted_with_prefix("", ["fooZ", "fooAA", "_f", "__f", "fox"]) ['fooAA', 'fooZ', 'fox', '_f'] >>> sorted_with_prefix("", ["fooZ", "fooAA", "_f", "__f", "fox"], drop_special=False) ['fooAA', 'fooZ', 'fox', '_f', '__f'] """ key = lambda name: (name.startswith("__"), name.startswith("_"), name) return sorted([ el for el in it if el.startswith(prefix) and (not drop_exact or el != prefix) and (not drop_special or not el.startswith("__")) ], key=key)
ba9fb7e56bbe90f7df225d501c8e9aa6e2969889
393,699
def checkstyle_source_to_error_type(source): """Convert a checkstyle error source to an error type """ class_name = source.split('.')[-1] if class_name.endswith('Check'): type = class_name[:-len('Check')] else: type = class_name return type
8a9e5502613a665efa3cefa84c7fc5fd3342cb46
638,876
def _resolve(a, b): """Returns a if a is not None, else returns b.""" if a is not None: return a else: return b
4174b56aa8d35a24190641b09814a792e973b9ea
428,896
def rgb_norm(val): """Pixel normalization Function equivalent to keras.application.inception_v3.preprocess_input Arguments: val {int} -- Pixel value (0:255 range) Returns: int -- Pixel normalized value (-1:1 range) """ return 2/255*(val-255)+1
7bb11de5f69d741520d68dcfa9a01fb3cb7ea1e4
400,424
def prepare_chronotrack_result_json(text): """ :param text: string from /load-model that contains embedded json - assumed the callback function is an empty string :return: the embedded json (str) """ return text.lstrip("(").rstrip(");")
e085e87f6ccd2063012be6b571343a6a7221e120
638,957
def find_closest_stores(friends, stores): """ Finds the closest store to each friend based on absolute distance from the store. Parameters: friends: Dictionary with friend names as keys and point location as values. stores: Dictionary with store names as keys and point locations as values. Returns: Dictionary with friends names as keys and the store closest to them as values. >>> friends1 = {'rob': 10, 'bob': 12} >>> stores1 = {'walmart': 11, 'costco': 12} >>> find_closest_stores(friends1, stores1) {'rob': 'walmart', 'bob': 'costco'} >>> friends2 = {'bob': 12} >>> stores2 = {'target': 12, 'costco': 12} >>> find_closest_stores(friends2, stores2) {'bob': 'costco'} # My doctests >>> friends3 = {'joe': 10, 'jack': 20} >>> stores3 = {'target': 5, 'walmart': 16} >>> find_closest_stores(friends3, stores3) {'joe': 'target', 'jack': 'walmart'} >>> friends4 = {'bob': 12} >>> stores4 = {'target': 12, 'costco': 12, 'apple': 12} >>> find_closest_stores(friends4, stores4) {'bob': 'apple'} >>> friends5 = {'joe': 0, 'jack': 2.5} >>> stores5 = {'target': 1.25, 'walmart': 1} >>> find_closest_stores(friends5, stores5) {'joe': 'walmart', 'jack': 'target'} """ return {names: min([(abs(distance - locations), store) \ for store, distance in stores.items()])[1] \ for names, locations in friends.items()}
ca879f6f442a4d734bf9e3c7b0313cd31ea2a026
12,748
def fmt(x, pos): """ A utility function to improve the formatting of plot labels """ a, b = '{:.2e}'.format(x).split('e') b = int(b) return r'${} \times 10^{{{}}}$'.format(a, b)
3fbcc50194f2ac5f71ca11fb52ec4a1283c571ca
687,323
import requests def fetch_idol(idx): """Fetch data for a single idol.""" r = requests.post('https://www.produce101event.com/entry_info.php', data={'idx': idx}) r.raise_for_status() idol_data = r.json() if idol_data['result'] == 'success' and idol_data['name']: return idol_data else: return None
7378ba567336df0240116c4355c2fd8cf56e52d7
31,653
def read_generic_file(filepath): """ reads any generic text file into list containing one line as element """ text = [] with open(filepath, 'r') as f: for line in f.read().splitlines(): text.append(line.strip()) return text
236a142629a1ced710fdb7710e7b9fa84fd05a4b
574,576
def match(s1, s2): """ :param s1: str, the long DNA sequence to be compared :param s2: str, the short DNA sequence to match :return: str, the most similar segment from the long DNA sequence """ ans = '' maximum = 0 # The maximum of match rate for i in range(len(s1) - len(s2) + 1): # (len(s1) - len(s2) + 1) is the times of matching match_rate = 0 segment = s1[i: i+len(s2)] # The DNA segment from the long sequence to be compared for j in range(len(s2)): ch1 = segment[j] ch2 = s2[j] if ch1 == ch2: match_rate += 1 / len(s2) if maximum < match_rate: maximum = match_rate ans = segment return ans
8b52d377831ffc94df4195430b2c251f7755f672
612,556
def inclusive_range(f: int, t: int) -> range: """Returns range including both ends""" return range(f, t + 1)
d3943e3d9783a702b6f298fe8f9a14c50f473f8a
287,991
def factorial(n): """Returns a factorial of an integer n""" if (n == 0): return 1 else: return n*factorial(n-1)
eac399e59348b162aae78a80a7b1050e8a284c37
137,915
def get_offsprings(g, nodes): """ Find the offspring nodes in a DAG :param g: a directed acyclic graph :param nodes: targeted nodes :return: a set of offspring nodes """ return set.union(*[g.descendants(d) for d in nodes])
9da14e457ac402200aa18ebef90b18fba56379e6
666,766
def set_marridged_edge_free(dag, i, j, colliders): """SETMARRIEDEDGEFREE Summary of this function goes here suppose the edge (i, j) is a married edge brought by colliders, then set it free and configure the correct direction to colliders Args: dag: DAG adjacent matrix i: node i j: node j colliders: list of collider indices Returns: untangled adjacent matrix """ dag[i, j] = dag[j, i] = 0 for c in colliders: dag[i, c] = 1 dag[c, i] = 0 dag[j, c] = 1 dag[c, j] = 0 return dag
bd2598517fb91b3035cd00a144226f5db27bda73
283,092
import errno def is_perm_error(e): """Return true if this exception is file permission related. :param EnvironmentalError e: Exception to test for permission issues :return: True if the exception is permission related, false otherwise :rtype: bool """ try: return e.errno in (errno.EACCES, errno.EPERM) except AttributeError: return False
29aaf6021a694e1967c0f00eee9e5fb0e1fdea69
111,659
import string def invertcaps(text): """Return new string with the case of all letters switched. """ return ''.join( c.upper() if c in string.ascii_lowercase else c.lower() if c in string.ascii_uppercase else c for c in text )
b9c0094be7695530c4a9d105a74fc75190c33ed7
512,978
def _find_pos(obj) -> str: """ Pass in a Dataset/DataArray to find the coordinate position (rho/u/v) of the data to be worked with. If obj is a Dataset, 'rho' will be searched first, and then u/v. pos = _find_pos(obj) """ pos = None if 'eta_rho' in obj.dims or 'xi_rho' in obj.dims: pos = '_rho' elif 'eta_psi' in obj.dims or 'xi_psi' in obj.dims: pos = '_psi' elif 'eta_u' in obj.dims or 'xi_u' in obj.dims: pos = '_u' elif 'eta_v' in obj.dims or 'xi_v' in obj.dims: pos = '_v' if pos is None: raise ValueError('Unknown coordinate position (rho/psi/u/v).') return pos
eb3593be242122aefad0f37b619c343a26b6dca7
89,410
import torch def seq_mask_from_lens(lengths, max_len=None): """ Creates a boolean mask from sequence lengths. If the `lengths` is of shape (...), the `mask` is of shape (..., max_len). The last dimension is of shape (max_len) and consisting of consecutive `True`s and `False`s. The number of `True`s is decided by the number in the `lengths`. `True` means that the corresponding position is not padding token, and `False` otherwise. lengths: tensor containing the lengths of sequences max_len: the max length of all the sequences """ if max_len is None: max_len = lengths.max().item() mask = torch.arange(0, max_len, dtype=torch.long).type_as(lengths) mask = mask.unsqueeze(0) mask = mask.repeat(*lengths.size(), 1) mask = mask.lt(lengths.unsqueeze(-1)) return mask
1f606b30d95ed6255249b63dc97a1ac2c2bc4346
73,909
def flatten_board(self): """Convert numpy array board to str 2d board 1d board np.array([[0, 0, 0], [0, 0, 0], --> '000000000' [0, 0, 0]] """ return ''.join(self.board.astype('str').flatten())
b25429655f561f195138ec77617d2c0cd656af5d
336,484
def flatten_list(list_of_lists): """ Will convert a list of lists in a list with the items inside each sub-list. Parameters ---------- list_of_lists: list[list[object]] Returns ------- list """ if not list_of_lists: return [] if isinstance(list_of_lists[0], list): lst = [] for l in list_of_lists: lst.extend(l) return lst return list_of_lists
1064eb59fc3e0154de9fbe7eea4b8f81830cf9af
524,695
def json_field_filter(json_obj, field_filter): """Accepts a json object and returns only the passed field Args: json_obj (obj): json object. field_filter (str): field to extract. Returns: list: A list of filtered values """ result = [document[field_filter] for document in json_obj] return result
830351b9dca0a33d99d8ca48def9e4d1348851bf
164,524
def _get_subplot_val_prefix(subplot_type): """ Get the subplot value prefix for a subplot type. For most subplot types this is equal to the subplot type string itself. For example, a `scatter3d.scene` value of `scene2` is used to associate the scatter3d trace with the `layout.scene2` subplot. However, the `xaxis`/`yaxis` subplot types are exceptions to this pattern. For example, a `scatter.xaxis` value of `x2` is used to associate the scatter trace with the `layout.xaxis2` subplot. Parameters ---------- subplot_type: str Subplot string value (e.g. 'scene4') Returns ------- str """ if subplot_type == 'xaxis': subplot_val_prefix = 'x' elif subplot_type == 'yaxis': subplot_val_prefix = 'y' else: subplot_val_prefix = subplot_type return subplot_val_prefix
21bee094614563b4b35b88bc2cdc618b17d0887a
453,460
def squeeze(self, axis=None): """ Squeeze singleton axes Analogous to numpy, but also allows axis name Parameters ---------- axis : int or str or None axis to squeeze default is None, to remove all singleton axes Returns ------- squeezed_array : DimArray Examples -------- >>> import dimarray as da >>> a = da.DimArray([[[1,2,3]]]) >>> a dimarray: 3 non-null elements (0 null) 0 / x0 (1): 0 to 0 1 / x1 (1): 0 to 0 2 / x2 (3): 0 to 2 array([[[1, 2, 3]]]) >>> a.squeeze() dimarray: 3 non-null elements (0 null) 0 / x2 (3): 0 to 2 array([1, 2, 3]) >>> a.squeeze(axis='x1') dimarray: 3 non-null elements (0 null) 0 / x0 (1): 0 to 0 1 / x2 (3): 0 to 2 array([[1, 2, 3]]) """ if axis is None: newaxes = [ax for ax in self.axes if ax.size != 1] res = self.values.squeeze() else: idx, name = self._get_axis_info(axis) res = self.values.squeeze(idx) newaxes = [ax for ax in self.axes if ax.name != name or ax.size != 1] return self._constructor(res, newaxes, **self.attrs)
1ea7063c8ab0b2afeec2ba2cddaf59fe71e4285b
157,561
def _prod(sizes): """ Product of tiny list of sizes. It is faster than numpy.prod and torch.prod. Parameter --------- sizes : list or tuple Size of inputs, output, or weights, usually 2/3/4 dimensions. Performance ----------- profile : 20 it/s torch.prod : 500 it/s numpy.prod : 700 it/s _prod : 1500 it/s """ ans = 1 for s in sizes: ans *= s return ans
13320e5b4f82f640cc89b627401a2ebc2f1db633
541,092
import json def load_results_from_json(filename: str) -> dict: """ Parses a JSON file and returns results as a dictionary. Parameters ---------- filename: str Input file name to load. sort_keys: bool, optional, default: True Flag to sort keys in the dictionary, including inner dictionaries. """ with open(filename, 'r') as in_file: data = json.load(in_file) return data
8c9581a225db2e756ad967fda56f3049befac9d2
234,403
import math def _compute_dcg(s, k): """ A function to compute dcg :param s: sequence of ground truth in the rank order to use for calculating dcg :param k: top k at which to evaluate ndcg :return: dcg for this ordering of ground truth :rtype: numeric """ dcg = 0.0 for i in range(min(k, len(s))): dcg += (math.pow(2, s[i]) - 1) / math.log(i + 2, 2) return dcg
6732c5ea7bbee643e9c20968c43232fc6bb695cd
81,022
def user_prompt(prompt_string, default=None, inlist=None): """ Takes a prompt string, and asks user for answer sets a default value if there is one keeps prompting if the value isn't in inlist splits a string list with commas into a list """ prompt_string = '%s [%s]: ' % ( prompt_string, default) if default else prompt_string output = input(prompt_string) output = default if output == '' else output if inlist: assert isinstance(inlist, list) while output not in inlist: output = input(prompt_string) output = [x.strip() for x in output.split(',')] if ( isinstance(output, str) and ',' in output) else output return output
5879c8cd7853426d9c94b763292f6b04e9f26e78
17,388
import collections import six def normalize_param(key, value): """Convert a set of key, value parameters into a dictionary suitable for passing into requests. This will convert lists into the syntax required by SoundCloud. Heavily lifted from HTTParty. >>> normalize_param('playlist', { ... 'title': 'foo', ... 'sharing': 'private', ... 'tracks': [ ... {id: 1234}, {id: 4567} ... ]}) == { ... u'playlist[tracks][][<built-in function id>]': [1234, 4567], ... u'playlist[sharing]': 'private', ... u'playlist[title]': 'foo'} # doctest:+ELLIPSIS True >>> normalize_param('oauth_token', 'foo') {'oauth_token': 'foo'} >>> normalize_param('playlist[tracks]', [1234, 4567]) == { ... u'playlist[tracks][]': [1234, 4567]} True """ params = {} stack = [] if isinstance(value, list): normalized = [normalize_param(u"{0[key]}[]".format(dict(key=key)), e) for e in value] keys = [item for sublist in tuple(h.keys() for h in normalized) for item in sublist] lists = {} if len(keys) != len(set(keys)): duplicates = [x for x, y in collections.Counter(keys).items() if y > 1] for dup in duplicates: lists[dup] = [h[dup] for h in normalized] for h in normalized: del h[dup] params.update(dict((k, v) for d in normalized for (k, v) in d.items())) params.update(lists) elif isinstance(value, dict): stack.append([key, value]) else: params.update({key: value}) for (parent, hash) in stack: for (key, value) in six.iteritems(hash): if isinstance(value, dict): stack.append([u"{0[parent]}[{0[key]}]".format(dict(parent=parent, key=key)), value]) else: params.update(normalize_param(u"{0[parent]}[{0[key]}]".format(dict(parent=parent, key=key)), value)) return params
dfdb33d87ca422ac7de8a71c8985cddb157ccc0c
135,673
def get_difference_target_classification_proba(model, x, class_column, method='predict_proba'): """Calculates the objective 1 (f1), where it attempts to maximize the probability of the desired class. Valid only for classification problems with methods returning the probability estimates for each class. :param model: a machine learning model :param x: the individual (or individuals) to be evaluated :type x: numpy.array :param class_column: the column index of the prediction class targeted :type class_column: Integer :param method: the method responsible of determining the prediction :type method: string, defaults to `predict_proba` :return: two objects. The first are the objective 1 (f1) values and the second are the predicted values related to `x` and found by `model` using `method` :rtype: np.array (first object) and np.array (second object) """ prediction = getattr(model, method)(x) return 1 - prediction[:, class_column], prediction
3603cac8c1dd3e9b79243e582178357ec1842155
600,350
def read_file(file_name): """Return a list of the lines of a file.""" f = open(file_name, 'r') file_lines = [line.rstrip("\n") for line in f.readlines()] f.close() return file_lines
60a02759413c16ba5186719c77cb6d484fce0da8
376,452
import json def GetErrorMessage(stdout): """Extract a message field from JSON output if present.""" try: return json.loads(stdout)['message'] except (ValueError, KeyError): return stdout
54656974a891e7c7eefc03ae8fb8b200907b123c
620,810
import threading def background(fun): """ Decorator to run a function in the background. Based on the implementation at https://amalgjose.com/2018/07/18/run-a-background-function-in-python/ """ def background_func(*args, **kwargs): threading.Thread(target=fun, args=args, kwargs=kwargs).start() return background_func
04f4673537b46c04b8ebba5e19d360594efd2142
96,061
import unicodedata def remove_accents(text: str, *, fast: bool = False) -> str: """ Remove accents from any accented unicode characters in ``text``, either by replacing them with ASCII equivalents or removing them entirely. Args: text fast: If False, accents are removed from any unicode symbol with a direct ASCII equivalent; if True, accented chars for all unicode symbols are removed, regardless. .. note:: ``fast=True`` can be significantly faster than ``fast=False``, but its transformation of ``text`` is less "safe" and more likely to result in changes of meaning, spelling errors, etc. Returns: str Raises: ValueError: If ``method`` is not in {"unicode", "ascii"}. See Also: For a more powerful (but slower) alternative, check out ``unidecode``: https://github.com/avian2/unidecode """ if fast is False: return "".join( char for char in unicodedata.normalize("NFKD", text) if not unicodedata.combining(char) ) else: return ( unicodedata.normalize("NFKD", text) .encode("ascii", errors="ignore") .decode("ascii") )
871ca064723e1f7cd16449d58f2c926f09e519d8
531,760
def group_timelogs(timelogs, logf): """ Group multiple timelogs with same ticket, description and date """ cache = {} for timelog in timelogs: key = "{}:{}:{}".format( timelog.ticket, timelog.description, timelog.date.date() ) logf( "Found worklog {}: {} ({}) ({})".format( timelog.ticket, timelog.description, timelog.date, timelog.time ) ) if key not in cache: cache[key] = timelog else: cache[key].time += timelog.time logf("\n---\n") for timelog in cache.values(): logf( "Grouped worklog {}: {} ({}) ({})".format( timelog.ticket, timelog.description, timelog.date, timelog.time ) ) return cache.values()
e9182f7022955470e648f9d359abde86c3d86677
229,287
def filter_plot_size(layout, basesize): """Return the size (in inches) of the plot produced by `plot_filters` Args: * layout (list of tuples): The result of a call to :func:`nnhealpix.visual.filter_plot_layout` * basesize (float): Size (in inches) to be used to plot each of the filters Returns: A 2-element tuple containing the width and height in inches of the plot. """ nrows, ncols = len(layout), max(layout) # Each square containing a filter will be placed in a square # whose side is "basesize" inches long width = min(ncols * basesize, 12) height = nrows * basesize return width, height
a06da5550e2dc7b020ae7b6493c3ec690aeb0e8b
439,923
def least_larger(arr: list, idx: int) -> int: """ This function returns the index of the least number larger than the element at the given index, or -1 if there is no such index. """ if len(arr) < 0: return -1 my_element = [i for i in arr if i > arr[idx]] if len(my_element) > 0: return arr.index(min(my_element)) return -1
e181cd9cbe8bf5f49c400125b04d1028ff79880e
57,217
def de9im_match(pattern: str, target_pattern: str) -> bool: """ Check a DE-9IM pattern `pattern` against a target DE-9IM pattern. Note: To enable maximal speed, patterns are not parsed for correctness. For correct patterns consult https://en.wikipedia.org/wiki/DE-9IM. Args: pattern (str): DE-9IM pattern to check as string target_pattern (str): DE-9IM pattern against which to check as string Returns: bool: True, iff pattern matches with target_pattern """ for char, target_char in zip(pattern, target_pattern): if target_char == "*": continue elif target_char == "T" and char in "012": continue elif char == target_char: continue else: return False return True
5a3f2cee981572b154e7b442f79388763a98dfe4
299,942
import six def encode_header(value, encoding='utf-8'): """Make sure the value is of type ``str`` in both PY2 and PY3.""" value_type = type(value) if value_type != str: # Test for Python3 if value_type == six.binary_type: # pragma: no cover value = value.decode(encoding) # Test for Python2 elif value_type == six.text_type: # pragma: no cover value = value.encode(encoding) return value
9b75fce37044f4d9bc097be998fbb489b0d815be
498,336
def get_n50(lengths, ref=None): """ Get (N50 contig length, N50, shortest contig, longest contig) statistics for list of contigs lengths. @param lengths: a list of contig lengths @return (N50, L50, shortest contig, longest contig) """ if not lengths: return 0, 0, 0, 0 lengths.sort(reverse=True) if ref: total = ref else: total = sum(lengths) n50 = 0 l50 = 0 shortest_seq = min(lengths) longest_seq = max(lengths) for x in lengths: l50 += 1 n50 += x if n50 >= total/2: return x, l50, shortest_seq, longest_seq
6cf890062a207233005ee51674570dda08e74c28
143,580
def get_fancy_time(sec): """ Convert a time measured in seconds to a fancy-printed time. :param sec: Float :return: String """ h = int(sec) // 3600 m = (int(sec) // 60) % 60 s = sec % 60 if h > 0: return '{h} hours, {m} minutes, and {s} seconds.'.format(h=h, m=m, s=round(s, 2)) elif m > 0: return '{m} minutes, and {s} seconds.'.format(m=m, s=round(s, 2)) else: return '{s} seconds.'.format(s=round(s, 2))
0a71d05bffd0d4324343b1d92f43338ef8182227
407,591
def generate_rendered_template(jinja_environment, template_filename, **kwargs): """Returns rendered template args: jinja_environment: Jinja2 Parser from get_jinja_parser template_filename: Path to template that will be used kwargs: Keyword arguments containing the fields on `template_filename` and the values to be processed """ return jinja_environment.get_template(template_filename).render(kwargs)
c2e5002ac21c49e5e7093fe60f683397dba1c41b
567,452
import webbrowser def open_in_browser(path): """ Open directory in web browser. """ return webbrowser.open(path)
41328b2b478f0bd69695da1868c412188e494d08
1,503
def append_dempster_attr(ds_list, dempster_label='belief'): """ Helper functiont to append the dempster output type label to existing dataset. Just an xarray update function. Parameters ---------- ds_list: list A list of xarray datasets. Returns ---------- out_list : list of xarray datasets with appended attributes. """ # check if list if not isinstance(ds_list, list): ds_list = [ds_list] # loop xr datasets and append dempster label out_list = [] for ds in ds_list: ds.attrs.update({'dempster_type': dempster_label}) out_list.append(ds) # return return out_list
2172b1f8bd769dcb1e4dc440ec7a94f43ba594e5
124,693
def double_sort(pathways_dictionary): """ Return the keys to a dictionary sorted with top values first then for duplicate values sorted alphabetically by key """ sorted_keys=[] prior_value="" store=[] for pathway in sorted(pathways_dictionary, key=pathways_dictionary.get, reverse=True): if prior_value == pathways_dictionary[pathway]: if not store: store.append(sorted_keys.pop()) store.append(pathway) else: if store: sorted_keys+=sorted(store) store=[] prior_value=pathways_dictionary[pathway] sorted_keys.append(pathway) if store: sorted_keys+=sorted(store) return sorted_keys
6aadb085add63dd1f684520a0a69b0a6c03cb82e
495,670
def train_vae(model, train_loader, optimizer): """ Function for training a model on a dataset. Train the model for one epoch. Inputs: model - VAE model to train train_loader - Data Loader for the dataset you want to train on optimizer - The optimizer used to update the parameters Outputs: average_bpd - Average BPD average_rec_loss - Average reconstruction loss average_reg_loss - Average regularization loss """ # training loop bpd_vals, rec_loss, reg_loss = 0, 0, 0 total_steps = 0 for step, (train_inputs, train_targets) in enumerate(train_loader): # move to device train_inputs = train_inputs.to(model.device) # make a forward pass through the model (encoder and decoder) L_rec, L_reg, bpd = model.forward(train_inputs) # track bpd, rec, and reg bpd_vals += bpd.item() rec_loss += L_rec.item() reg_loss += L_reg.item() # backpropagation optimizer.zero_grad() bpd.backward() optimizer.step() # keep track of steps total_steps += 1 average_bpd = bpd_vals / total_steps average_rec_loss = rec_loss / total_steps average_reg_loss = reg_loss / total_steps return average_bpd, average_rec_loss, average_reg_loss
2426d58e168bdf7fd133b7290f7a1753b65281ac
519,458
def _row_partitions_identical(shape_a, shape_b): """Returns True iff all row_partitions in shapes are identical.""" return ((shape_a.num_row_partitions == shape_b.num_row_partitions) and all( a is b for a, b in zip(shape_a.row_partitions, shape_b.row_partitions)))
10ae027a451c43c36012c04010e182c8a5d4e5ee
436,184
def _h3_col(h3_lvl): """Make it easy and reporducable to create a h3 column""" return f'h3_{h3_lvl:02d}'
46463bc3ce0edac3e392dacba0cadfe940577ce3
578,714
def str_to_float(in_val): """Convert human-readable exponential form to float. :param in_val: (str) input string of the following formats: 'float_number' --> float_number 'float_number + white_space + exp_prefix + unit_string' --> float_number * 10**exp_value Supported exp prefixes: ['T', 'G', 'M', 'k', '', 'm', 'u', 'n', 'p'] Warning: format 'just exp_prefix without unit_string' is not supported: if only one symbol is given after 'float_number', it will be interpreted as unit and exponent will be set to 10**0. Examples: '1.2 us' --> 1.2e-6 '-4.5 mV' --> -4.5e-3 '10.1 GHz' --> 1.01e10 '1.56 s' --> 1.56 '1.56 m' --> 1.56 [interpreted as 1.56 meters, not as 1.56e-3] :return: (float) extracted value without unit """ if isinstance(in_val, (float, int)): return in_val # Split string into mantissa and exp_prefix + unit item_list = in_val.split() # Extract mantissa exp_prefix if included mantissa = float(item_list[0]) # Extract exp_prefix (a single letter) if included try: exp_prefix_unit = item_list[1] if len(exp_prefix_unit) > 1: exp_prefix = item_list[1][0] else: exp_prefix = '' except IndexError: exp_prefix = '' # Convert exp_prefix into exp_value if exp_prefix == 'T': exp_value = 12 elif exp_prefix == 'G': exp_value = 9 elif exp_prefix == 'M': exp_value = 6 elif exp_prefix == 'k': exp_value = 3 elif exp_prefix == '': exp_value = 0 elif exp_prefix == 'm': exp_value = -3 elif exp_prefix == 'u': exp_value = -6 elif exp_prefix == 'n': exp_value = -9 elif exp_prefix == 'p': exp_value = -12 else: # The case of multi-letter unit without prefix: '1.5 Hz' # the first letter 'H' is not an exp prefix exp_value = 0 return mantissa * (10 ** exp_value)
7b0d99a6db2f26f13bf498a456f60d1054c3c0ce
558,089
def _parse_node_to_coords(element): """ Parse coordinates from a node in the overpass response. The coords are only used to create LineStrings and Polygons. Parameters ---------- element : dict element type "node" from overpass response JSON Returns ------- coords : dict dict of latitude/longitude coordinates """ # return the coordinate of a single node element coords = {"lat": element["lat"], "lon": element["lon"]} return coords
6ce67abb5b294ea8458ecdee64d2b49736348372
692,988
def torange(array, low, high): """ Render an array to value range (low, high) :param array: any array :param low, high: the range :return: new array """ min, max = array.min(), array.max() # normalized to [0, 1] array = array - min array = array / (max - min) # to (low, high) array = array * (high - low) + low return array
fc1574272cd2ac08279eddebd99dcf87e42da632
239,609
def teams_and_members(review_teams): """Fixture with a dictionary contain a few teams with member lists.""" return { "one": ["first", "second"], "two": ["two"], "last_team": [str(i) for i in range(10)], **review_teams, }
a647daa64359933e1a1346046b4cfeaa31bd7248
533,190
def get_xpath1(xml_node, path): """Given an XML node and XPath expression (a string), returns the node pointed to by this XPath. It is an error if there is no such node, or if there are multiple such nodes. xml_node -- XML DOM node (as returned from lxml.etree) path -- XPath expression (as a string) """ t = xml_node.xpath(path) u = len(t) if (0 == u): raise Exception("No node selected by XPath: {}".format(path)) elif (2 <= u): raise Exception("Multiple nodes ({}) selected by XPath: {}".format(u, path)) else: return t[0] # return the only node in the list
ec7849d9e66b7eeed1806ae2681774bc56752496
379,748
import six def convert_recursive_helper(converter, data): """ Given JSON-like data (a nested collection of lists or arrays), which may include Action tuples, replaces all primitive values with converter(value). It should be used as follows: def my_convert(data): if data needs conversion: return converted_value return convert_recursive_helper(my_convert, data) """ if isinstance(data, dict): return {converter(k): converter(v) for k, v in six.iteritems(data)} elif isinstance(data, list): return [converter(el) for el in data] elif isinstance(data, tuple): return type(data)(*[converter(el) for el in data]) else: return data
eb8e759affa0125d8f0dde598992d6e0caf714d6
74,558
def _concat(slice: str) -> str: """helper to concatenate each template slice.""" return "{}\n".format(slice)
c6de3de2c184d98b65e8a5b4829d9f16ce398bcf
477,948
from typing import List from typing import Tuple import math def GenStats(corpus: List[float]) -> Tuple[float, float, float]: """Generates statistics from a list of values Args: corpus (List[float]): The set of data to generate statistics for. Returns: Tuple[float, float, float]: The mean, standard deviation, and coefficient of variation for the given sample data. """ avg = sum(corpus) / len(corpus) adjusted_sum = 0.0 for item in corpus: adjusted = item - avg adjusted_sum += adjusted * adjusted dev = math.sqrt(adjusted_sum / len(corpus)) cv = dev / avg return avg, dev, cv
e619aa3d4c11758106a811cb86c3a61e3e9bdf97
431,159
def mifareclassic_IsFirstBlock ( uiBlock: int): """ Indicates whether the specified block number is the first block in the sector (block 0 relative to the current sector) """ # Test if we are in the small or big sectors if (uiBlock < 128): return ((uiBlock) % 4 == 0) else: return ((uiBlock) % 16 == 0)
3efb1ecfe0165a7ac34f401ea1c6fdcc048d2145
535,290
def remove_decorator(srccode: str, decorator: str) -> str: """remove decorator from return value of `inspect.getsource`. :param srccode: return value of `inspect.getsource` :param decorator: remove target ex: '@snippet' :return srccode_without_decorator: srccode removed decorator """ # no decorator remained if srccode.find(decorator) != 0: return srccode.strip() len_deco = len(decorator) # no option if srccode[len_deco] != '(': return srccode[len_deco:].strip() stack = [] stack.append('(') i = len_deco + 1 while stack: top = stack[-1] nchr = srccode[i] if top == '(': if nchr == ')': stack.pop() elif nchr == "'" or nchr == '"': stack.append(nchr) elif top == "'": if nchr == "'": stack.pop() elif top == '"': if nchr == '"': stack.pop() i += 1 return srccode[i:].strip()
95565a2467f4e615bfe32c8e3060e5279ce3212f
82,922
import math def cosd(thetaDeg): """ Compute the cosine of the input given in degrees. Parameters ---------- thetaDeg : float or numpy.ndarray Angle in degrees Returns ------- float or numpy.ndarray cosine of the input value """ return math.cos(math.radians(thetaDeg))
7ed77e3c92da3cc87afb7e6eceffe847dbc4b214
300,181
def get_video_id(video_name): """ Returns video id from first 4 characters of video name and returns an int """ # video id is first 4 characters of video name minus 1 because they have to be 0 indexed return int(video_name[0:4]) - 1
e57fb0f3016c4b08d1385d4aa213b8a5930f7a24
98,353
def clean_data(df): """ Returns the cleaned data frame with no missing or duplicated values Paramenters: df (pandas DataFrame): The returned data frame from load_data() Returns: df (pandas DataFrame): cleaned data """ df.drop('original',axis=1, inplace=True) df.dropna(inplace=True) category_colnames = ['related','request','offer','aid_related','medical_help', 'medical_products', 'search_and_rescue','security', 'military','child_alone', 'water','food','shelter','clothing','money', 'missing_people','refugees','death','other_aid','infrastructure_related', 'transport', 'buildings', 'electricity', 'tools', 'hospitals', 'shops', 'aid_centers', 'other_infrastructure', 'weather_related', 'floods', 'storm', 'fire', 'earthquake', 'cold', 'other_weather', 'direct_report' ] df[category_colnames] = df[category_colnames].astype(int) #Remove duplicates df = df.drop_duplicates() return df
f3a13391730d6aed57e583df3eafda42b342eedf
458,582
def get_eckey_type(key_pem): """ get EC key type public or private from PEM format key return "public", "private", or 'invalid' :param key_pem: EC key in PEM format """ with open(key_pem, 'rt') as f: key=f.read() if 'PUBLIC' in key: return 'public' elif 'PRIVATE' in key: return 'private' else: return 'invalid'
1bf12a27945edbfd72d2eba06a74405550124069
420,513
from typing import Dict from typing import Any def _build_dummy_product(title: str = '') -> Dict[str, Any]: """Builds a dummy product data. Args: title: A dummy title. Returns: A dummy product data. """ return { 'title': title, }
d583bce1c152d4d6caeed2d407f9741994a70714
468,373
import re def find_first_word(address): """ Find first matched street word identifying possible roads/addresses Args: address: address to perform matching against known state roads Returns first word of the case of matched words in an address """ major_highways = ["85", "77", "485"] hw_matches = re.findall(r"[0-9]+", address) matches = re.findall(r"[A-Za-z]+", address) words = [word for word in matches if len(word) > 3] hw_words = [word for word in hw_matches if word in major_highways] hw_word = hw_words[0] if hw_words else None first_word = words[0] if words else None if hw_word: return hw_word else: return first_word
243cb9b91712507ea20f6ed8fa89ae6c85570038
475,399
def stationary(t): """Probe is stationary at location h = 2, v = 0""" return 0.*t, 2/16 + 0*t, 0*t
984734db7045bbc49850b0b641882b8812bcbe18
633,280
def get_repo_push_url(args): """Return the fully expanded push url for this repo or None. :args: the namespace returned from the argparse parser :returns: string url to push the repo, or None """ url = None if args.repo_push_url_format: url = args.repo_push_url_format.format(args.repo_url) return url
7dffb88c96663b3ab575767892b5ae6cbc43aee9
93,892
import re import codecs def load_text_pairs(file_name): """ Load text pairs from the specified file. Each text pair corresponds to a single line in the text file. Both texts (left and right one) in such pair are separated by the tab character. It is assumed that the text file has the UTF-8 encoding. :param file_name: name of file containing required text pairs. :return a 2-element tuple: the 1st contains list of left texts, the 2nd contains corresponding list of right texts. """ def prepare_text(src: str) -> str: search_res = re_for_unicode.search(src) if search_res is None: return src if (search_res.start() < 0) or (search_res.end() < 0): return src prep = src[:search_res.start()].strip() + ' ' + src[search_res.end():].strip() search_res = re_for_unicode.search(prep) while search_res is not None: if (search_res.start() < 0) or (search_res.end() < 0): search_res = None else: prep = prep[:search_res.start()].strip() + ' ' + prep[search_res.end():].strip() search_res = re_for_unicode.search(prep) return prep.strip() input_texts = list() target_texts = list() line_idx = 1 re_for_unicode = re.compile(r'&#\d+;') special_unicode_characters = {'\u00A0', '\u2003', '\u2002', '\u2004', '\u2005', '\u2006', '\u2009', '\u200A', '\u0000', '\r', '\n', '\t'} re_for_space = re.compile('[' + ''.join(special_unicode_characters) + ']+', re.U) re_for_dash = re.compile('[' + ''.join(['\u2011', '\u2012', '\u2013', '\u2014', '\u2015']) + ']+', re.U) with codecs.open(file_name, mode='r', encoding='utf-8', errors='ignore') as fp: cur_line = fp.readline() while len(cur_line) > 0: prep_line = cur_line.strip().replace('&quot;', '"').replace('&apos;', "'").replace('&gt;', '>').replace( '&lt;', '<').replace('&amp;', '&') if len(prep_line) > 0: err_msg = 'File "{0}": line {1} is wrong!'.format(file_name, line_idx) line_parts = prep_line.split('\t') assert len(line_parts) == 2, err_msg new_input_text = line_parts[0].strip() new_input_text = prepare_text( re_for_dash.sub('-', ' '.join(re_for_space.sub(' ', new_input_text).split()).strip()) ) new_input_text = re_for_dash.sub('-', ' '.join(re_for_space.sub(' ', new_input_text).split()).strip()) new_target_text = line_parts[1].strip() new_target_text = prepare_text( re_for_dash.sub('-', ' '.join(re_for_space.sub(' ', new_target_text).split()).strip()) ) new_target_text = re_for_dash.sub('-', ' '.join(re_for_space.sub(' ', new_target_text).split()).strip()) assert (len(new_input_text) > 0) and (len(new_target_text) > 0), err_msg input_texts.append(new_input_text) target_texts.append(new_target_text) cur_line = fp.readline() line_idx += 1 return input_texts, target_texts
e58634afd7e8ac79ac5c87e67f8b0cdc1807da0d
315,479
import torch def create_S_batch(batch_size, device): """ Creates a batch of diag([1,1,0]) matrices Input: int Output (batch, 3, 3) """ return torch.stack([ torch.stack([torch.ones(batch_size, device=device), torch.zeros(batch_size, device=device), torch.zeros(batch_size, device=device)], dim=1), torch.stack([torch.zeros(batch_size, device=device), torch.ones(batch_size, device=device), torch.zeros(batch_size, device=device)], dim=1), torch.stack([torch.zeros(batch_size, device=device), torch.zeros(batch_size, device=device), torch.zeros(batch_size, device=device)], dim=1) ], dim=2)
c17bf9c221e44ce24a21953f26660cd1880ba717
353,053
import math def tamMuestraChebyshev(epsilon, delta): """ Calculo del tamaño de muestra de acuerdo al criterio de Chebyshev. epsilon: error deseado delta: intervalo de confianda (1-delta) """ nc = 1.0 / (4.0 * delta * epsilon**2) return math.ceil(nc)
d9b311a185aafabc8cb512dba61a5788178ee215
268,473
def _convert_to_string(data): """Converts extracted string data from bytes to string, as strings are handled as bytes since h5py >= 3.0. The function has been introduced as part of an `issue <https://github.com/rundherum/pymia/issues/40>`_. Args: data: The data to be converted; either :obj:`bytes` or list of :obj:`bytes`. Returns: The converted data as :obj:`str` or list of :obj:`str`. """ if isinstance(data, bytes): return data.decode('utf-8') elif isinstance(data, list): return [_convert_to_string(d) for d in data] else: return data
53a82fe27cb3e61e2c86ac6dd730940c400e44f7
231,187
from typing import Tuple import re def get_granularity(freq_str: str) -> Tuple[int, str]: """ Splits a frequency string such as "7D" into the multiple 7 and the base granularity "D". Parameters ---------- freq_str Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc. """ freq_regex = r"\s*((\d+)?)\s*([^\d]\w*)" m = re.match(freq_regex, freq_str) assert m is not None, "Cannot parse frequency string: %s" % freq_str groups = m.groups() multiple = int(groups[1]) if groups[1] is not None else 1 granularity = groups[2] return multiple, granularity
d5c1781cd075176a38dee03f64c5de16fb3f7a33
230,289
def warning(cause): """ Display warning msg (Yellow) """ return ("\033[1;33;40m [!] "+cause+" \033[0m")
af7bf9d5eaa1b094435aa6333faf23a147873b36
231,444
from typing import List def dict_sort(data: dict) -> List[tuple]: """ Convert a ``dict`` into a sorted ``List[tuple]`` for safely comparing multiple ``dict``'s **Basic Usage**: >>> x = dict(a=1, c=2, b=3) >>> y = dict(a=1, b=3, c=2) >>> dict_sort(x) == dict_sort(y) True """ return sorted(tuple(dict(data).items()))
9c8b0e558d6ef382a3f619e3412b83a7b62eb174
154,447
def _parse_disallowed_patterns(disallowed_patterns): """ Parse disallowed patterns from flake8 options. Parameters ---------- disallowed_patterns : str Configuration that represents a pairing of filename pattern and regular expression for disallowed import. Multiple items should be separated by newlines. Returns ------- results : list of tuple(str, str) The first item in the tuple is the pattern for matching files. The second item in the tuple is the pattern for matching disallowed imports. """ results = [] disallowed_patterns = disallowed_patterns.replace(" ", "") for rule in disallowed_patterns.splitlines(): if not rule: continue file_pattern, disallowed = rule.split(":") results.append((file_pattern.strip(), disallowed.strip())) return results
b5fec00857db6f0eff397c5f1cda51c9150e81e3
251,671
from re import VERBOSE def trp(pointer, main_stack, aux_stack): """ Trp Take the top element of the main stack to the power of the top element of the auxiliary stack. If either stack is empty, treat it as zero. """ if VERBOSE: print("Trp", main_stack, aux_stack) if main_stack: a = main_stack.pop() else: a = 0 if aux_stack: b = aux_stack.pop() else: b = 0 # If trying to raise 0 to a negative power, act as a no-op if (a != 0 or b >= 0): try: main_stack.append(a ** b) except OverflowError: pass return(pointer, main_stack, aux_stack)
65a64c4589a167a72bcd3a936fa4fc07a3753188
600,979
def _extrapolate_constant(to_time, from_sample): """Extrapolate a power rating to an earlier or later time. This models a constant power rating before (or after) the `from_sample` input to the `to_time`. It returns a power sample, of the same class as the `from_sample` but at the time specified in `to_time`. """ sample_class = from_sample.__class__ watts = from_sample.watts extrapolated_sample = sample_class(watts=watts, moment=to_time) return extrapolated_sample
e16b488d90e39534ae2647b27740c92928bc2a40
116,119
def _merge_dicts(*args): """ Shallow copy and merge dicts together, giving precedence to last in. """ ret = dict() for arg in args: ret.update(arg) return ret
63b3ff5786b7be098e0e56eb8feee3e94627243a
600,178
def GetBuildShortBaseName(target_platform): """Returns the build base directory. Args: target_platform: Target platform. Returns: Build base directory. Raises: RuntimeError: if target_platform is not supported. """ platform_dict = { 'Windows': 'out_win', 'Mac': 'out_mac', 'Linux': 'out_linux', 'Android': 'out_android', 'NaCl': 'out_nacl' } if target_platform not in platform_dict: raise RuntimeError('Unkown target_platform: ' + (target_platform or 'None')) return platform_dict[target_platform]
0bbbad4de3180c2ea51f5149cc3c2417a22b63e9
702,618
def get_file_content(filename): """Get the content of a file. :filename: str, path for the file :returns: str, the content of the file """ with open(filename, 'r') as f: content = f.readlines() return ''.join(content)
608eeeecbc889e3a0f503848207c60bfb9b22eb9
213,076
import json def parse_json(project_parameter_json): """Gets all relevant information from the parameter_json Parameters ---------- project_parameter_path: str The path for the parameter_json Returns ------- project_dict: dict """ param_file = open(project_parameter_json) project_dict = json.load(param_file) param_file.close() return project_dict
44d7f90ec9edd3a48402337fdf52eae3e7658ee8
389,264
def safe_div(a, b): """ returns a / b, unless b is zero, in which case returns 0 this is primarily for usage in cases where b might be systemtically zero, eg because comms are disabled or similar """ return 0 if b == 0 else a / b
8c425ef9f3e1fa735a7664ef7bfc043dbd1f6349
578,160
def concatenate_qa(prev_qns_text_list, prev_ans_text_list): """ Concatenates two lists of questions and answers. """ qa = "" for q, a in zip(prev_qns_text_list, prev_ans_text_list): qa += q + " | " + a + " || " return qa
0dc6bca0cc84e5b6a06b67304142369c596624cc
75,809
from typing import Tuple import math def normal_approximation_to_binomial(n: int, p: float) -> Tuple[float, float]: """Returns mu and sigma corresponding to a Binomial(n, p)""" mu = p * n sigma = math.sqrt(p * (1 - p) * n) return mu, sigma
f7f5083a7e4cf54ac64f15283dc29593e212d127
565,800
def is_even_or_odd(n: int) -> bool: """ Check if the integer is even or odd. >>> is_even_or_odd(0) True >>> is_even_or_odd(1) False >>> is_even_or_odd(2) True >>> is_even_or_odd(101) False """ return n & 1 == 0
16ed8ce121e1ddc2bab29c524a711e4aa57ce922
216,923
def get_plot_dims(signal, ann_samp): """ Figure out the number of plot channels. Parameters ---------- signal : 1d or 2d numpy array, optional The uniformly sampled signal to be plotted. If signal.ndim is 1, it is assumed to be a one channel signal. If it is 2, axes 0 and 1, must represent time and channel number respectively. ann_samp: list, optional A list of annotation locations to plot, with each list item corresponding to a different channel. List items may be: - 1d numpy array, with values representing sample indices. Empty arrays are skipped. - list, with values representing sample indices. Empty lists are skipped. - None. For channels in which nothing is to be plotted. If `signal` is defined, the annotation locations will be overlaid on the signals, with the list index corresponding to the signal channel. The length of `annotation` does not have to match the number of channels of `signal`. Returns ------- sig_len : int The signal length (per channel) of the dat file. n_sig : int The number of signals contained in the dat file. n_annot : int The number of annotations contained in the dat file. int The max between number of signals and annotations. """ if signal is not None: if signal.ndim == 1: sig_len = len(signal) n_sig = 1 else: sig_len = signal.shape[0] n_sig = signal.shape[1] else: sig_len = 0 n_sig = 0 if ann_samp is not None: n_annot = len(ann_samp) else: n_annot = 0 return sig_len, n_sig, n_annot, max(n_sig, n_annot)
bcdb8abd09a421a47b6386af3aeaada37356acb1
292,205