content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def make_italic(text: str) -> str: """Returns the text surrounded by *""" return "*" + text + "*"
d3b150bc9bc42f0dbcd6c61e492c837430217260
319,579
def interpret_line(line, splitter=','): """ Split text into arguments and parse each of them to an appropriate format (int, float or string) Args: line: text line splitter: value to split by Returns: list of arguments """ parsed = [] elms = line.split(splitter) for elm in elms: try: # try int el = int(elm) except ValueError as ex1: try: # try float el = float(elm) except ValueError as ex2: # otherwise just leave it as string el = elm.strip() parsed.append(el) return parsed
0f51c08484dfd126b59d89231b072523fea4e72a
588,378
def load_lines(text_file): """ Reads the text file and returns a list of lines. """ lines = [] with open(text_file, encoding='utf-8') as text: for line in text: lines.append(line.strip()) return lines
8de0a3b1cfd9a4c0b59c6bd5f7510ffc45de11f6
196,982
def blksLeftStakeWindow(height, netParams): """ Return the number of blocks until the next stake difficulty change. Args: height (int): Block height to find remaining blocks from. netParams (module): The network parameters. Returns: int: The number of blocks left in the current window. """ window = netParams.StakeDiffWindowSize # Add one to height, to account for the genesis block. return window - (height + 1) % window
b0dcfc6747aec4a7c9e580a24dda80a32b9e5f03
645,807
def parse_intf_status(lines): """ @summary: Parse the output of command "show interface description". @param lines: The output lines of command "show interface description". @return: Return a dictionary like: { "Ethernet0": { "oper": "up", "admin": "up", "alias": "etp1", "desc": "ARISTA01T2:Ethernet1" }, ... } """ result = {} for line in lines: fields = line.split() intf = fields[0] oper, admin, alias, desc = None, None, None, None if len(fields) == 4: # when port description is empty string "" oper, admin, alias, desc = fields[1], fields[2], fields[3], '' if len(fields) > 4: oper, admin, alias, desc = fields[1], fields[2], fields[3], ' '.join(fields[4:]) if oper and admin and alias: result[intf] = {"oper": oper, "admin": admin, "alias": alias, "desc": desc} return result
dfc4590f0659fea16daac31e01c9beeae98d590f
672,017
def adjustNumberInRange(x: float) -> float: """Adjust number into range (0,100)""" if x <= 0: return 1e-12 if x >= 100: return 100 - 1e-12 return x
3cb16d47a3b69972764473d05dd4963f47eb4090
585,429
def _generate_payload(sentence): """ Helper function to prep the payload data structure. Also a good place to manipulate the payload (e.g., lower-casing if needed) Args: sentence: sentence to be used for the inference (str) Returns: dictionary of the prediction payload, the input data is nested below a "instances" key as a list of dicts with the different input tensors More details: https://cloud.google.com/ml-engine/docs/v1/predict-request """ return {"instances": [{"sentence": sentence}]}
9a85a111752c27570c130b78e1de2971448e2bcf
432,489
def is_odd(n): """Returns True if n is odd, and False if n is even. Assumes integer. """ return bool(n & 1)
7cc975feb89fa6dcc847342ec1b4b50371a81446
516,082
def is_git_repo(template_repo): """ Returns True if the template_repo looks like a git repository. """ return template_repo.startswith("git@") or \ template_repo.startswith("https://")
ef7454846afde986f635ed23b32dff561f2404df
324,900
def fromSecondsToHMS(seconds): """from seconds to Hour:Minute:Sencond""" seconds = int(seconds) seconds_per_hour = 60 * 60 seoncds_per_minute = 60 hour = seconds / seconds_per_hour minute = (seconds % seconds_per_hour) / seoncds_per_minute second = seconds % seconds_per_hour % seoncds_per_minute return '%s:%s:%s' % (hour, minute, second)
1ac3568d1e20b5bff4458b9f42e3ebc61580fd51
196,395
def is_close(a, b, rel_tol=1e-09, abs_tol=0.0): """ Determines whether one float value is approximately equal or "close" to another float value. Copied from PEP 485. Args: a (float): Specifies the first value to be tested for relative closeness. b (float): Specifies the second value to be tested for relative closeness. rel_tol (float): Specifies the relative tolerance -- it is the amount of error allowed, relative to the larger absolute value of a or b. For example, to set a tolerance of 5%, use rel_tol=0.05. The default tolerance is 1e-9, which assures that the two values are the same within about 9 decimal digits. rel_tol must be greater than 0.0. abs_tol (float): Specifies a minimum absolute tolerance level -- useful for comparisons near zero. Returns: bool: Indicates whether the first float value is approximately equal or "close" to the second float value. """ return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
eb0cf402504882ed90ea238b0a86a7d821cec258
142,254
def eval_overlap(n1, n2): """ Return a tuple containing the number of matches (resp., mismatches) between a pair (n1,n2) of overlapping reads """ hang1 = n2["begin"] - n1["begin"] overlap = zip(n1["alleles"][hang1:], n2["alleles"]) match = mismatch = 0 for (c1, c2) in overlap: if c1 == c2: match += 1 else: mismatch += 1 return match, mismatch
fa6a64897849f3d3671e854bd18bde25e6455905
416,392
import tempfile def create_temporary_vocab_file(words, counts=None): """ Creates a temporary vocabulary file. Args: words: List of words in the vocabulary Returns: A temporary file object with one word per line """ vocab_file = tempfile.NamedTemporaryFile() if counts is None: for token in words: vocab_file.write((token + "\n").encode("utf-8")) else: for token, count in zip(words, counts): vocab_file.write("{}\t{}\n".format(token, count).encode("utf-8")) vocab_file.flush() return vocab_file
fab87ac9153259e56ca2c0ecd6915ff0185c5845
192,620
def builddict(fin): """ Build a dictionary mapping from username to country for all classes. Takes as input an open csv.reader on the edX supplied file that lists classname, country, and username and returns a dictionary that maps from username to country """ retdict = {} for course, country, username in fin: if username not in retdict: retdict[username] = country return retdict
ddf9272e0da6616abd0495b7b159807a36a83dcc
702,628
import re def parse_full_section(section_name, docs): """ Find warning defined in the documentation. Parameters ---------- docs : str Returns ------- str or None Returns warnings from documentation or ``None`` if function didn't find it. """ parser = re.compile(r"{}\s+-+\s+(?P<section_text>(.*\n)+?)\s+" # Here we try to find next section title or # the end of the documentation r"([\w\ ]+\n\s+-+\s+|$)" r"".format(section_name)) parsed_doc_parts = parser.findall(docs) if not parsed_doc_parts: return None section_text_block = parsed_doc_parts[0] full_section_text = section_text_block[0] # Regexp can catch multiple `\n` symbols at the and of # the section. For this reason we need to get rid of them. return full_section_text.rstrip()
5cf967b132ac1fb1412b658799a80b3f9659e60d
569,099
def is_pokemon_included(pokemon:str, team:str) -> int: """ A helper function to check if a pokemon is in a certain team Returns 1 if the pokemon is in the team, 0 otherwise """ if pokemon.strip().lower() in team: return 1 else: return 0
9e5a7bcdb09bd50be8198f542ad673ef8cfd60a3
524,440
def calc_scale1fb(xs, sum_weights): """ Given xs (in pb) and sum of gen weights, calculate scale1fb. :param xs: cross section (in pb) :type xs: float :param sum_weights: sum of gen weights :type sum_weights: float :return: scale1fb :rtype: float """ if xs <= 0: return -1 else: return (xs * 1000.) / sum_weights
c0da32b1a706ff9d6a841935a625a535fd8b96c8
491,786
import re def find_gaps(seq): """ Accepts a string and returns the positions of all of the gaps in the sequence :param seq: str :return: list of [start,end] of all of the gaps """ match = re.finditer(r"-+", seq) positions = [] for m in match: positions.append([m.start(),m.end()]) return positions
b57d28837dd81cf446eb7fae7a94b1594214187f
480,783
def refractivity_dry_continuum(ν, θ, pd, e): """Complex refractivity due to dry air continuum terms. ν GHz frequency at which refractivity is evaluated θ - reciprocal temperature pd hPa pressure of dry air e hPa pressure of water vapor Liebe et al. (1993). """ S0 = 6.14e-11 * pd * θ*θ γ0 = 0.56e-3 * (pd+e) * θ**0.8 F0 = -ν/(ν + 1j*γ0) Sn = 1.40e-18 * pd*pd * θ**3.5 Fn = ν/(1 + 1.9e-5*ν**1.5) return S0*F0 + 1j*Sn*Fn
386c12ab130368761e88897a0d0d775b43c8a323
346,738
def remove_prior(dataframe, include_prior): """If include_prior is False. Drops the prior features from dataframe.""" # The prior is included in the data by default. if not include_prior: print('Removing prior speaking labels from training features.') return dataframe.drop( [c for c in dataframe.columns if 'prior' in c], axis=1, ) return dataframe
8aa2409cf6996779455c332fc1d7c5e9f66fdfee
140,531
import functools def sorted_data(func): """Decorator to sort data passed to stats functions.""" @functools.wraps(func) def inner(data, *args, **kwargs): data = sorted(data) return func(data, *args, **kwargs) return inner
7b2ee9668a875e8716edb4e83fc8e59b7f287314
591,169
def indent(str, level): """ Returns string where each line is indented by the given level in tabs. """ if level == 0: return str return "\n".join("\t" * level + line for line in str.splitlines())
96ad7f3aed849a731e877641bf69ec3f8936e085
131,814
import math def two_strong_shocks(left, right, g): """ Pressure at the interface of the Riemann problem in the case of two strong shocks Input: left - Primitive variables on the left side right - Primitive variables on the right side g - Adiabatic index """ dl = left.Density vl = left.Velocity dr = right.Density vr = right.Velocity return (dl*dr*(1 + g)*(vl - vr)**2)/(2.*(math.sqrt(dl) + math.sqrt(dr))**2)
597587dfdac77cef7856fec7d48038185c2c3f69
217,383
def excel_column_name(n): """Number to Excel-style column name, e.g., 1 = A, 26 = Z, 27 = AA, 703 = AAA.""" name = '' while n > 0: n, r = divmod (n - 1, 26) name = chr(r + ord('A')) + name return name
46d97f35dd2bf053483550851fbd836d0325f118
427,421
def ea_from_rhmax(e_tmin, rh_max): """ Calculates actual vapour pressure [kPa] from maximum relative humidity using FAO equation (18). Arguments: e_tmin - saturation vapour pressure at daily minimum temperature [kPa] rh_max - maximum relative humidity [%] """ # Raise exceptions: if (rh_max < 0 or rh_max > 100): raise ValueError, 'RH_max=%g is not in range 0-100' % rh_max return e_tmin * (rh_max / 100.0)
0679cf1de4b71e4b722be718c190ae416db49ea5
448,981
def ejer51a(polls): """ Dado un dataframe calcula el número de personas según el nivel de preocupación (concern very, somewhat,...) y categoriza las entrevistas por aquellas realizadas estrictamente antes del 2020-09-01, o después :param polls: Un dataframe de entrevistas intersectado con su entrevistador :return: El dataframe de entrada con las columnas `n_very`, `n_somewhat`, `n_not_very`, `n_not_at_all` y `date_group` """ def set_date_group(row): row["date_group"] = "Before 2020-09-01" if row["end_date"] < "2020-09-01" else "After 2020-09-01" return row df51a = polls.apply(set_date_group, axis=1) # Generate date_group column df51a["n_very"] = df51a["very"] / 100 * df51a["sample_size"] df51a["n_somewhat"] = df51a["somewhat"] / 100 * df51a["sample_size"] df51a["n_not_very"] = df51a["not_very"] / 100 * df51a["sample_size"] df51a["n_not_at_all"] = df51a["not_at_all"] / 100 * df51a["sample_size"] return df51a
5bd37bfd19a2cf91453fb54d5ede4ecc0a7db16b
311,713
def soft_capitalize(string: str): """Capitalizes string without affecting other chars""" return f"{string[0].upper()}{string[1:]}"
9cfadc1b5fb51e88625eaad2f64fe3812d226a10
671,861
def s_polynomial(f, g, order): """ Return S-polynomial of f and g with respect to the order. S(f, g) = (lc(g)*T/lb(f))*f - (lc(f)*T/lb(g))*g, where T = lcm(lb(f), lb(g)). """ f_lb, f_lc = order.leading_term(f) # term = (base, coeff) g_lb, g_lc = order.leading_term(g) t = f_lb.lcm(g_lb) return f.term_mul((t - f_lb, g_lc)) - g.term_mul((t - g_lb, f_lc))
ec58d4ea179a89b4cfdb2615d69f80fb59aa4e4b
139,932
def _grad_j(q_j, A_j, b_j, b_j_norm, a_1_j, a_2_j, m): """Compute the gradient with respect to one of the coefficients.""" return (A_j.t() @ q_j / (-m)) + (b_j * (a_1_j / b_j_norm + a_2_j))
d051883d848386af9eeb40a10029351158b075c0
651,750
def Drop(x, **unused_kwargs): """Drops one element.""" del x # Just for the compiler. return ()
0dfb948270351f0f36d99c3748a0d64efd5845c5
581,261
def store_decorators(decorators): """Sets a list of decorators as the attribute `_decorators` on a function. This is for the purpose of applying decorators to a higher-level wrapping function. """ def outer(fn): fn._decorators = decorators return fn return outer
a63df3dfec1cff24ee9e4fd7651284f86459bc1d
573,609
def idf(x): """ Returns the identity function. The identity function returns the element with which the function was applied. .. note:: Useful commonly as an identity element of a function composition. Parameters ---------- x : any Element to apply the function with. Returns ------- x : any The same element as `x`. Examples -------- >>> idf(1) 1 >>> idf([]) [] """ return x
940ea54cee5b79502b7f7c5476f40ea12b4ea8f9
370,991
def str_convert(val: str) -> str: """ Converts the given value to a string type. """ return str(val)
8f9bfca8e6296ef95d747dbcdb7d2bc854245d22
290,853
import html def linebrk(s, n): """ Break input string s with <br/> for every n charactors.""" result = "" j = 0 for i, c in enumerate(s): if j == n and i != len(s) - 1: result = result + "\n" j = 0 j = j + 1 result = result + c result = html.escape(str(result), quote=True) result = result.replace("\n", "<br/>") return result
bfae6d96ab042e283ade4358df69e23bacac4b5f
585,836
import copy def pair_from_inputs(inputs, b=0): """Return a pair without the batch dimension. Note that b specifies which batch index to use. Also note that inputs has a nested dictionary structure 2 layers deep. """ pair = {} for key, value in inputs.items(): if isinstance(value, dict): pair[key] = {} for key2, value2 in value.items(): pair[key][key2] = copy.deepcopy(value2[b]) else: pair[key] = copy.deepcopy(value[b]) return pair
7684f8f80253d57546f4308d604df308fd43d9db
366,000
def convert_variants_trace_idx_to_trace_obj(log, variants_trace_idx): """ Converts variants expressed as trace indexes to trace objects Parameters ----------- log Trace log object variants_trace_idx Variants associated to a list of belonging indexes Returns ----------- variants Variants associated to a list of belonging traces """ variants = {} for key in variants_trace_idx: variants[key] = [] for value in variants_trace_idx[key]: variants[key].append(log[value]) return variants
a3ea3f2246af41241b5e32515747b54bc0e076c6
162,044
def DNA_to_mRNA_List(DNA_string): """Takes in DNA sequence string and converts it to an mRNA list. Parameters ---------- DNA_string : string String that contains letters/characters of a DNA string, e.g., 'a,' t,' 'c,' and 'g.' Returns ------- mRNA_List : list List that converts each and every value in input to corresponding mRNA values. """ # Creates an empty list to which values will be appended mRNA_List = []; # Loops through each character of DNA string input for char in DNA_string: if char == 'a' or char == 'A': # Characters are appended to mRNA_List mRNA_List.append('U') elif char == 't' or char == 'T': mRNA_List.append('A') elif char == 'c' or char == 'C': mRNA_List.append('G') elif char == 'g' or char == 'G': mRNA_List.append('C') # Output mRNA_List is returned by function return mRNA_List
bd44382d7178269341d24acb021d2e3eb82e0417
170,966
def switch_bl(key): """ switch antenna ordering in (ant1, ant2, pol) key where ant1 and ant2 are ints and pol is a two-char str Ex. (1, 2, 'xx') """ return (key[1], key[0], key[2][::-1])
2564eb844944fb91786b4c203df59ac031249391
381,215
def generate_evergreen_project_name(owner, project, branch): """Build an evergreen project name based on the project owner, name and branch.""" return "{owner}-{project}-{branch}".format(owner=owner, project=project, branch=branch)
254a9ab85d4f1805bfef4c1750e714f983bd0a65
87,689
def upperhexstr(buff): """Buffer -> Upper Human Readable Hex String""" return ' '.join([("%.2x" % ord(c)).upper() for c in buff])
e4e988b4f2422191d5fe16a5a9c8326594814cf0
99,482
def tweet_id_to_timestamp(theid): """ Get millisecond creation time from twitter IDs :param theid: int id from tweet object :return: timestamp of object creation ---- !!Only works for tweets authored after 2010-06-01!! ---- """ return ((theid >> 22) + 1288834974657) / 1000
f583ea42848520dbee573b2b250ef9cbea72daa3
127,078
import logging def validate_query_file(**kwargs): """ Validates query file. Currently only checks the query file name Kwargs: query_filename(str): Name of query file Returns: True(bool): Query succesfully validated False(bool): Query failed validation """ if not kwargs["query_filename"].endswith(".sql"): logging.warning( "Query filename " + kwargs["query_filename"] + ' is invalid - does not end in ".sql". Skipping' ) return False else: return True
84c4c630a3580f32ca2eb9b71b6a5ad05e5673d3
409,405
def compute_iou(boxA, boxB): """Computes the Intersection over Union (IoU) for two bounding boxes. Args: boxA, boxB (`numpy.ndarray`): Bounding boxes [xmin, ymin, width, height] as arrays with shape (4,) and dtype float. Returns: IoU (`float`): The IoU of the two boxes. It is within the range [0, 1], 0 meaning no overlap and 1 meaning full overlap of the two boxes. """ xA = max(boxA[0], boxB[0]) yA = max(boxA[1], boxB[1]) xB = min(boxA[0] + boxA[2], boxB[0] + boxB[2]) yB = min(boxA[1] + boxA[3], boxB[1] + boxB[3]) interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0)) if interArea == 0: return 0 boxAArea = abs(boxA[2] * boxA[3]) boxBArea = abs(boxB[2] * boxB[3]) iou = interArea / float(boxAArea + boxBArea - interArea) return iou
a2ed259794923c9874c59fde39877a108f27627f
412,609
def int_safe(obj, default=0): """safely convert something to an integer""" try: obj_int = int(obj) except ValueError: obj_int = default return obj_int
7c916465e75a11444a55978fd698990b02022fbc
369,533
def process_schema_name(name): """ Extract the name out of a schema composite name by remove unnecessary strings :param name: a schema name :return: a string representing the processed schema """ new_raw_name = name.replace("_schema.json", '').replace('.json', '') name_array = new_raw_name.split('_') output_name = "" for name_part in name_array: output_name += name_part.capitalize() return output_name
416f3c521111f107b22da844b0726d867508fab4
542,491
def fail_safe(temperature, neutrons_produced_per_second, threshold): """Assess and return status code for the reactor. :param temperature: value of the temperature in kelvin (integer or float) :param neutrons_produced_per_second: neutron flux (integer or float) :param threshold: threshold (integer or float) :return: str one of: 'LOW', 'NORMAL', 'DANGER' - `temperature * neutrons per second` < 90% of `threshold` == 'LOW' - `temperature * neutrons per second` +/- 10% of `threshold` == 'NORMAL' - `temperature * neutrons per second` is not in the above-stated ranges == 'DANGER' """ result = temperature * neutrons_produced_per_second if result < threshold * 0.9: return 'LOW' if 0.9 * threshold <= result <= 1.1 * threshold: return 'NORMAL' return 'DANGER'
bf3f27d5f66ec8d9cc3d76de245d5fc85eb292bc
347,790
def verify(parser, argv): """verify(parser, argv) Check for input errors Arguments: parser: OptionParser instance argv (list): Argument list Returns: An error message in the event of an input error, or None """ opts, args = parser.parse_args(argv) err_msg = None if opts.script and len(args) != 1: err_msg = "Must provide a script\n" return err_msg
30d8649ad23309516259b188707bb90df9cbbd8a
536,062
def Jy2K(S, theta, lam): """ Convert Jansky/beam to Kelvin, taken from https://science.nrao.edu/facilities/vla/proposing/TBconv S: Flux in Jy/beam theta: FWHM of the telescope in radians lam: Wavelength of the observation in m Returns: Brightness temperature in K """ return 0.32e-3 * lam**2. / theta**2. * S
b28aae3fbad261ce83368a667997c31904a7a85d
255,714
import functools import warnings def deprecated(obj=None, suffix=""): """ Decorator to mark a function or a class as deprecated """ def decorator_deprecation_warning(obj): @functools.wraps(obj) def wrapped(*args, **kwargs): if isinstance(obj, type): msg = ( 'Class "%s" is deprecated and will be removed in 6.0.' % obj.__name__ ) else: msg = ( 'Function "%s" is deprecated and will be removed in 6.0.' % obj.__name__ ) if suffix: msg += "; %s" % suffix warnings.warn(msg, category=FutureWarning) return obj(*args, **kwargs) return wrapped if obj is None: return decorator_deprecation_warning return decorator_deprecation_warning(obj)
019813143c185c4d322d0a057210631481d9843b
629,078
def call_one_by_one(objs, method_name: str, args, **kwargs): """ Call specified method of given objects with given args in order. """ for obj in objs: assert hasattr(obj, method_name), \ "'{cmd}' has no method '{method}".format( cmd=obj.__name__, method=method_name ) args = getattr(obj, method_name)(args, **kwargs) return args
4a3eaf05a786df7978bd4168eb60c60a136f529a
597,909
from functools import reduce def get_chunk_ranges(N, num_procs): """ A helper that given a number N representing the size of an iterable and the num_procs over which to divide the data return a list of (start_index, end_index) pairs that divide the data as evenly as possible into num_procs buckets. """ per_thread = N / num_procs allocation = [per_thread] * num_procs allocation[0] += N - num_procs * per_thread data_ranges = [0] + reduce(lambda acc, num: acc + [num + (acc[-1] if len(acc) else 0)], allocation, []) data_ranges = [(data_ranges[i], data_ranges[i + 1]) for i in range(len(data_ranges) - 1)] return data_ranges
87405d16f4f89050d8f96baa1d1787998ac5cf06
540,578
def degree_to_compass(degree): """ Converts the wind direction from degrees to a compass bearing. Shamelessly copied from @steve-gregory https://stackoverflow.com/questions/7490660/converting-wind-direction-in-angles-to-text-words """ val = int((degree / 22.5) + .5) bearings = ["N","NNE","NE","ENE","E","ESE", "SE", "SSE","S","SSW","SW","WSW","W","WNW","NW","NNW"] return bearings[val % 16]
b7419fc433317b02fe0be58703bb58ff81c928ea
210,082
def process_base64(img_string: str): """ Converts a base64 image string to byte array. Example: "data:image/png;base64,iVBORw0KGgoAAAANSUhE..." becomes b'iVBORw0KGgoAAAANSUhE...' """ # If base64 has metadata attached, get only data after comma if img_string.startswith("data"): img_string = img_string.split(",")[-1] return bytes(img_string,'utf-8')
46f1d46fcb887515ca31c3671b08e3573cd9da41
196,121
import torch def jitter_soma_depth(feats, scale=10): """" Apply jitter to soma depth. Args: feats: features per node scale: scale factor of jittering """ new_feats = feats.copy() new_feats[:, 1] += torch.randn(1).numpy() * scale return new_feats
188cb75555c330402c2b703ef9038de40eebd15c
629,797
import torch def _get_anchor_positive_mask(labels): """Return a 2D mask where mask[a, p] is True if a and p are distinct and have same label.""" indices_equal = torch.eye(labels.size(0)).bool().cuda() indices_not_equal = ~indices_equal labels_equal = labels.unsqueeze(0) == labels.unsqueeze(1) return labels_equal & indices_not_equal
0c77776f6ab1a68966455e8e1ad32719d7512cc3
285,812
def continuous_magnitude_peak(disc_peak_index, disc_magnitude, exponent=0.2308): """Estimate the value and location of a continuous spectrum peak. Applies quadratic interpolation to the discrete peak and its two neighbors to estimate the location and value of the continuous peak. Weighs the peaks using an exponential function to increase the accuracy of the estimate. Based on the paper The XQIFFT: Increasing the Accuracy of Quadratic Interpolation of Spectral Peaks via Exponeltial Magnitude Spectrum Weighting by Kurt James Werner. Parameters ---------- disc_peak_index : integer Index in the `disc_magnitude` array where the magnitude peak is located. disc_magnitude : array_like Magnitude of the discrete spectrum with the peak of interest. exponent : float Exponent of the weighting function. Returns ------- float A value between `0` and `len(disc_magnitude)-1` indicating the location of the peak in the continuous spectrum. float Estimated peak magnitude in the continuous spectrum. """ # Weighing function and its inverse omega = lambda x: x**exponent inv_omega = lambda x: x**(1/exponent) # Weigh alpha, beta and gama alpha = omega(disc_magnitude[disc_peak_index-1]) beta = omega(disc_magnitude[disc_peak_index]) gamma = omega(disc_magnitude[disc_peak_index+1]) # Estimate continuous peak index (peak location) cont_peak_index = (disc_peak_index + (1/2)*(alpha - gamma)/(alpha - 2*beta + gamma)) # Estimate continuous peak magnitude cont_peak = beta - (1/8)*(alpha - gamma)**2/(alpha - 2*beta + gamma) # Unweigh peak magnitude cont_peak = inv_omega(cont_peak) return cont_peak_index, cont_peak
ca507d5b2a0f323a2781e9300f5c0b2ffa6de8e1
217,897
import re from datetime import datetime def clean_title_input(title, draft=False): """Convert a string into a valide Jekyll filename. Remove non-word characters, replace spaces and underscores with dashes, and add a date stamp if the file is marked as a Post, not a Draft. Args: title (string): A string based title draft (bool): A boolean indicating that the file is a draft Returns: string: a cleaned title for saving a new Jekyll post file """ title_clean = title.lower() title_clean = re.sub(r'[^\w -]', '', title_clean) title_clean = re.sub(r' |_', '-', title_clean) today = datetime.today() title_date = today.strftime('%Y-%m-%d') return title_date + '-' + title_clean if not draft else title_clean
80958f893641bd420f487077d009be4c3efec1dc
633,633
def preprocess_baseline2(segment_df, rush_hour): """ Preprocess the segment data considering the weather and the rush hour Algorithm: Preprocess segment_df to add a new column of rush hour split the dataframe with groupby(segment_start, segment_end, weather, rush_hour) Define the new dataframe For name, item in grouped: calcualte the average travel duration save the record into the new dataframe :param segment_df: dataframe after adding the rush hour from final_segment.csv file :param rush_hour: tuple to express which is the rush hour, example: ('17:00:00', '20:00:00') :return: dataframe for the baseline2 """ # Preprocess segment_df to add a new column of rush hour rush_hour_column = segment_df['timestamp'].apply(lambda x: x[11:19] < rush_hour[1] and x[11:19] > rush_hour[0]) new_segment_df = segment_df new_segment_df['rush_hour'] = rush_hour_column grouped = new_segment_df.groupby(['segment_start', 'segment_end', 'weather', 'rush_hour']) result = grouped['travel_duration'].mean() result = result.reset_index() return result
b3a3a4ed4096b6c2424023d2adad2fb2a176d71a
673,131
def _tf(word_occured_in_doc: int, total_words_in_doc: int) -> float: """Term frequency of a word in certain document. See: https://bit.ly/3zEDkMn """ assert word_occured_in_doc <= total_words_in_doc return word_occured_in_doc / total_words_in_doc
32bd03d0b068ad229b7d9871bf3665643d35021e
19,948
import hashlib def getmd5(filename): """ 获取文件 md5 码 :param filename: 文件路径 :return: 文件 md5 码 """ file_txt = open(filename, 'rb').read() m = hashlib.md5(file_txt) # hexdigest()方法来获取摘要(加密结果) return m.hexdigest()
d258ca212d94ad89b97ce2b9c0016256378aec6d
317,035
def get_objects(resource, bucket_name, prefix=""): """List objects inside a bucket""" client = resource.meta.client next_token = "" content = [] directory = [] add_list = lambda keys, values: keys.extend(values) if values is not None else None while True: response = client.list_objects_v2( Bucket=bucket_name, Prefix=prefix, Delimiter="/", ContinuationToken=next_token, ) add_list(content, response.get("Contents")) add_list(directory, response.get("CommonPrefixes")) next_token = response.get("NextContinuationToken") if next_token is None: return {"Contents": content, "CommonPrefixes": directory}
6cdf8fa079417320379f21e94224821ee4b9f225
618,076
import re def splitTypeName(name): """ Split the vendor from the name. splitTypeName('FooTypeEXT') => ('FooType', 'EXT'). """ suffixMatch = re.search(r'[A-Z][A-Z]+$', name) prefix = name suffix = '' if suffixMatch: suffix = suffixMatch.group() prefix = name[:-len(suffix)] return (prefix, suffix)
1a7013b82e554fe6ff6ebff1b84d7d96ae3481db
686,328
def get_slices(data, slice_size): """Slices up and returns the data in slices of slice_size. :param data: list to divide in one or several slices of size slice_size :param slice_size: integer designating the size of a slice from data :return: list of len(data) / slice_size slices of data of size slice_size if the number of items in data is a multiple of slice_size, or list of len(data) / slice_size + 1 slices of data of size slice_size except for the last slice, of size len(data) - slice_size * len(data) / slice_size """ slices = list() indexes = [i for i in range(0, len(data), slice_size)] for i in range(0, len(indexes) - 1): slices.append(data[indexes[i]:indexes[i + 1]]) if len(data) > indexes[-1]: # is there a last slice? slices.append(data[indexes[-1]:]) return slices
7832ba5f0995e2d9aee959e517376fe8afc2602e
308,240
def reset_line_breaks(curr_boundary={}): """ Builds a fresh line breaks dictionary while keeping any information provided concerning line boundaries. Parameters ---------- curr_boundary: dict Line boundaries to be preserved Returns ------- dict The newly initialized line breaks dictionary """ start = [] end = [] tokens = [] if "end" in curr_boundary: end = curr_boundary["end"] if "start" in curr_boundary: start = curr_boundary["start"] if "tokens" in curr_boundary: tokens = curr_boundary["tokens"] line_breaks = { "end": end, "pageBoundaries": {}, "start": start, "tokens": tokens } return line_breaks
da7f1fc0f206e8a39f0a0d42f1652a3c4bb23200
702,221
def get_positions_at_time(positions, t): """ Return a list of positions (dicts) closest to, but before time t. """ # Assume positions list is already sorted. # frame is a list of positions (dicts) that have the same timestamp. frame = [] frame_time = 0.0 for pos in positions: # If we passed the target time t, return the frame we have if pos["time"] > t: break # If this positions is part of the current frame, add it if pos["time"] == frame_time: frame.append(pos) # If current frame is over, make a new frame and add this position to it else: frame = [] frame.append(pos) frame_time = pos["time"] return frame
905f40f8226a0aca96cff13faf725c55794c3caf
597,423
def _format_case_params(case_params): """ Format a set of case parameters for inclusion in an error message. Account for the fact that while the case parameters are supposed to be a dictionary, they could be anything. """ try: return "\n".join( f'{k!r}: {v!r}' for k, v in case_params.items() ) except: return repr(case_params)
ce3347dee1cf24c3b37fadfb31cd1a254a12edd3
212,989
def format_value(value, fmt): """ Convert numerical value to string with a specific format Parameters ---------- value : int or float Numerical variable to convert. fmt : str String format used to apply the conversion Returns ------- string_value : str String containing a formatted version of the value Examples -------- >>> format_value(30.5, ".3f") '30.500' >>> format_value(30.5, "5g") '30.5' >>> format_value(123, "d") '123' >>> format_value(123, ".2f") '123.00' """ return "{value:>{fmt}}".format(value=value, fmt=fmt).strip()
3a402d8a640ead35bea8e2fe3e602a2d700102d4
321,534
def utterance_from_line(line): """Converts a line of text, read from an input file, into a list of words. Start-of-sentence and end-of-sentece tokens (``<s>`` and ``</s>``) will be inserted at the beginning and the end of the list, if they're missing. If the line is empty, returns an empty list (instead of an empty sentence ``['<s>', '</s>']``). :type line: str or bytes :param line: a line of text (read from an input file) :rtype: list of strs :returns: list of words / tokens """ if isinstance(line, bytes): line = line.decode('utf-8') line = line.rstrip() if not line: # empty line return [] result = line.split() if result[0] != '<s>': result.insert(0, '<s>') if result[-1] != '</s>': result.append('</s>') return result
cbf32b595ed354e44644a7fed9215e24feb93073
522,894
def GET_DATA(tag: str) -> dict: """GET_DATA: generate APDU for GET DATA command """ return {'header' : '80CA' + tag, 'Le' : '00'}
92f406b125d137a90613fa5760a4f45a5e521c34
107,729
def pad(bytestring, k=16): """ Pad an input bytestring according to PKCS#7 """ l = len(bytestring) val = k - (l % k) return bytestring + bytearray([val] * val)
5c4917619edb5203402b9370cd1a45a370ad3dc3
611,065
def chunks(items, size): """ Split list into chunks of the given size. Original order is preserved. Example: > chunks([1,2,3,4,5,6,7,8,9], 2) [[1, 2], [3, 4], [5, 6], [7, 8], [9]] """ return [items[i:i+size] for i in range(0, len(items), size)]
960bc6ba7775ce66c3deb6c07966a9157b3e3e2c
634,069
def process_ubls(ubls): """ Return list of tuples of unique-baseline pairs from command line argument. Input: comma-separated value of baseline pairs (formatted as "b1_b2") Output: list of tuples containing unique baselines """ # test that there are ubls to process if ubls == '': return [] else: ubaselines = [] for bl in ubls.split(','): try: i, j = bl.split('_') ubaselines.append((int(i), int(j))) except ValueError: raise AssertionError( "ubls must be a comma-separated list of baselines (formatted as b1_b2)") return ubaselines
91a434d776d7fe5d98f7f2f31a4b570c6100ff18
380,441
from typing import Dict from typing import Any from typing import Optional def __get(data: Dict[str, Any], key: str, src: Optional[str] = None) -> Any: """ Get a value from a dictionary; if the key does not exist, raise an exception that identifies the missing key and the configuration section in which it was expected. :param data: The dictionary from which to get the value. :param key: The key name. :param src: The configuration section associated with the dictionary. """ try: return data[key] except KeyError: if src is None: src = 'Configuration' if src: raise ValueError('{}: "{}" is missing'.format(src, key)) else: raise ValueError('"{}" is missing'.format(key))
263f8e13e28b304cdf50546e0df8c7ed5ae8589e
40,551
def _nice_cls_repr(cls): """Nice repr of classes, e.g. 'module.submod.Class' Also accepts tuples of classes """ return f"{cls.__module__}.{cls.__name__}"
a291bac7b3347beb5c9baca1776484d07493ec51
500,430
def get_reading_level_from_flesch(flesch_score): """ Thresholds taken from https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests :param flesch_score: :return: A reading level and difficulty for a given flesch score """ if flesch_score < 30: return "Very difficult to read" elif flesch_score < 50: return "Difficult to read" elif flesch_score < 60: return "Fairly difficult to read" elif flesch_score < 70: return "Plain English" elif flesch_score < 80: return "Fairly easy to read" elif flesch_score < 90: return "Easy to read" else: return "Very easy to read"
54903df2bc4114de663fb85af8500fe1cb26ddc5
37,752
import requests def get_asn(ip_address): """Get the ASN details of a give IP address.""" try: data = requests.get( "https://api.iptoasn.com/v1/as/ip/{}".format(ip_address), timeout=5 ).text return data except requests.exceptions.ConnectionError: return None
624847f27990d0da89aaab44aee44b3911e4b94a
366,321
from typing import Union from pathlib import Path def remove_suffix(path: Union[str, Path], suf: str) -> str: """Remove a suffix from a string, if it exists.""" # modified from https://stackoverflow.com/a/18723694 if isinstance(path, Path): path = str(path) if suf and path.endswith(suf): return path[: -len(suf)] return path
8e48152e4add2a8f3c4497ab0f3a083776799e5c
614,740
import torch def bellman(qf, targ_qf, targ_pol, batch, gamma, continuous=True, deterministic=True, sampling=1, reduction='elementwise_mean'): """ Bellman loss. Mean Squared Error of left hand side and right hand side of Bellman Equation. Parameters ---------- qf : SAVfunction targ_qf : SAVfunction targ_pol : Pol batch : dict of torch.Tensor gamma : float continuous : bool action space is continuous or not sampling : int Number of sampling in calculating expectation. reduction : str This argument takes only elementwise, sum, and none. Loss shape is pytorch's manner. Returns ------- bellman_loss : torch.Tensor """ if continuous: obs = batch['obs'] acs = batch['acs'] rews = batch['rews'] next_obs = batch['next_obs'] dones = batch['dones'] targ_pol.reset() _, _, pd_params = targ_pol(next_obs) pd = targ_pol.pd next_acs = pd.sample(pd_params, torch.Size([sampling])) next_obs = next_obs.expand([sampling] + list(next_obs.size())) targ_q, _ = targ_qf(next_obs, next_acs) next_q = torch.mean(targ_q, dim=0) targ = rews + gamma * next_q * (1 - dones) targ = targ.detach() q, _ = qf(obs, acs) ret = 0.5 * (q - targ)**2 if reduction != 'none': ret = torch.mean( ret) if reduction == 'elementwise_mean' else torch.sum(ret) return ret else: raise NotImplementedError( "Only Q function with continuous action space is supported now.")
8f57db995c092c9ec81aa321c347439788ab06f3
100,239
def harmonic_mean(frequencies1, frequencies2): """Finds the harmonic mean of the absolute differences between two frequency profiles, expressed as dictionaries. Assumes every key in frequencies1 is also in frequencies2 >>> harmonic_mean({'a':2, 'b':2, 'c':2}, {'a':1, 'b':1, 'c':1}) 1.0 >>> harmonic_mean({'a':2, 'b':2, 'c':2}, {'a':1, 'b':1, 'c':1}) 1.0 >>> harmonic_mean({'a':2, 'b':2, 'c':2}, {'a':1, 'b':5, 'c':1}) # doctest: +ELLIPSIS 1.285714285... >>> harmonic_mean(normalise({'a':2, 'b':2, 'c':2}), \ normalise({'a':1, 'b':5, 'c':1})) # doctest: +ELLIPSIS 0.228571428571... >>> harmonic_mean(normalise({'a':2, 'b':2, 'c':2}), \ normalise({'a':1, 'b':1, 'c':1})) # doctest: +ELLIPSIS 0.0 >>> harmonic_mean(normalise({'a':2, 'b':2, 'c':2}), \ normalise({'a':1, 'b':1, 'c':0})) # doctest: +ELLIPSIS 0.2 """ total = 0.0 for k in frequencies1: if abs(frequencies1[k] - frequencies2[k]) == 0: return 0.0 total += 1.0 / abs(frequencies1[k] - frequencies2[k]) return len(frequencies1) / total
c8701a5df020bd8f4d1655f406a13ffdf92cf362
678,613
def lldp_caps_to_bits(caps, caps_map): """ Convert list of LLDP capabilities names to integer, suitable to IGetLLDPNeighbors remote_capabilities :param caps: List of LLDP capabilities names :param caps_map: name -> LLDP_CAP_* mapping. Name in lowercase :return: IGetLLDPNeighbors.remote_capabilities """ r = 0 for cap in caps: cv = caps_map.get(cap.lower()) if cv is not None: r += cv return r
4b26d2319d74c1d1beb566e7484861157ef08727
513,538
def __avg__(list_): """Return average of all elements in the list.""" return sum(list_) / len(list_)
3204d823e83bd43efccf9886acd3ae8b01e1d7a0
14,022
import json async def _get_kubeconfig(model): """Get kubeconfig from kubernetes-master.""" unit = model.applications["kubernetes-master"].units[0] action = await unit.run_action("get-kubeconfig") output = await action.wait() # wait for result return json.loads(output.data.get("results", {}).get("kubeconfig", "{}"))
c48371cb4ac96f9821e4dd44d60d2bbed4e46df5
349,377
def pos_in_rect(rect, pos): """Return True if pos is in the rectangle""" pos_x, pos_y = pos x, y, width, height = rect return (x <= pos_x <= x + width and y <= pos_y <= y + height)
108d214965f3a4172bd5bc4608ec9b2c48908c10
71,400
def total_fluorescence_from_monomer(m, b, c_b_1): """ Calculate fluorescence from monomer fraction, brightness relation and monomer brightness. """ return c_b_1*(m+(1-m)*b)
275cbe906f18452fa83708062bf40896ed25ece5
459,173
def repr_type(obj): """Return a string representation of a value and its type for readable error messages. """ the_type = type(obj) msg = '{!r} {!r}'.format(obj, the_type) return msg
4bc3dea793ed1a6c1b15107986847ce17321bb93
268,230
def news_url(newsitem): """ Returns the URL of a newsitem depending on if it is based on a twitter post or not. """ if newsitem.twitter_id: return newsitem.get_twitter_url() return newsitem.get_absolute_url()
1fbcb8719fb8a01d1c6e8be44e1baa31f3ea4290
242,285
def leftPadItems(alist): """Add a space to the begining of each string in a given list.""" return [' ' + item for item in alist]
8cd74bdf74c021a81532c8209774975fa5b6f9b4
65,399
import base64 def decrypt_password(encoded): """ Decrypt password with base64 """ password_decypt_bytes = base64.b64decode(encoded) return password_decypt_bytes.decode('utf-8')
4bdff76964226e39350c2dcac8dc03c34d5bcdb9
180,100
def factor_first_event(match_info, event_list, team_key): """ Creates factor for an event in event_list Arguments: event_list: list of 'Event' objects team_key: string of the event type in the 'Team' object, e.g. 'firstTower' Returns: -1 if no event did not happen yet 0 if red team did event 1 if blue team did event """ if len(event_list) > 0: first_event_team = match_info['teams'][0][team_key] return int(first_event_team) else: return -1
0691915ddc4fd81775068fa6a1fcda341cbedc3d
691,753
def median_val(vals): """ :param vals: an iterable such as list :return: the median of the values from the iterable """ n = len(vals) sorted_vals = sorted(vals) if n % 2 == 0: return (sorted_vals[n // 2] + sorted_vals[n // 2 - 1]) / 2 else: return sorted_vals[n // 2]
b0255bce064d72c7031f81ed6b7f0b5f7c2c4ca0
649,971
def lowercase(lista): """Function to lowercase list of texts Args: lista ([list]): list of texts Returns: [list]: List of texts lowercased """ return [text.lower() for text in lista]
2be877aa3b80c5e01eb4237625b426123d5b9976
697,099
def MSE_loss_grad_cupy(outi, out0): """ Computes mean squared error gradient between targets and predictions. Input: predictions (N, k) ndarray (N: no. of samples, k: no. of output nodes) targets (N, k) ndarray (N: no. of samples, k: no. of output nodes) Returns: (N,k) ndarray Note: The averaging is only done over the output nodes and not over the samples in a batch. Therefore, to get an answer similar to PyTorch, one must divide the result by the batch size. """ return 2*(outi-out0)/outi.shape[1]
53915983ce290baa1ebe03641e055bf66ba16906
491,859
def naive_string_matcher(string, target): """ returns all indices where substring target occurs in string """ snum = len(string) tnum = len(target) matchlist = [] for index in range(snum - tnum + 1): if string[index:index + tnum] == target: matchlist.append(index) return matchlist
cdb42e37836b57952cb237a8de8ed6de4933dca7
205,481
from typing import Optional def extract_filename_from_object_info(object_info: dict) -> Optional[str]: """Extracts the filename from the object_info. if filename is in object_info use that, otherwise try to extract it from the one of the access methods. Returns filename if found, else return None Args object_info (dict): DRS object dictionary """ if "name" in object_info and object_info["name"]: return object_info["name"] for access_method in object_info["access_methods"]: url = access_method["access_url"]["url"] parts = url.split("/") if parts: return parts[-1] return None
2e2c6e37147f8689703a38de200953648c066710
207,481
def calc_jaccard_score(span_1: dict, span_2: dict) -> float: """Calculate the jaccard score of the provided spans of tags. Jaccard index = Intersection of argument list / Union of argument list Ref: https://en.wikipedia.org/wiki/Jaccard_index Args: span_1 (dict): first span of tags. Format: {IndexOfTokenInSentence: "tag", ... } span_2 (dict): second span of tags. Format: {IndexOfTokenInSentence: "tag", ... } Returns: float: jaccard score for the given spans. """ s1 = set(span_1) s2 = set(span_2) j_union = s1.union(s2) j_intersection = 0 for sent_index in j_union: if sent_index in span_1 and sent_index in span_2: if span_1[sent_index] == span_2[sent_index]: # both spans have the same tag at the same position. j_intersection += 1 elif span_1[sent_index] in ["B", "I"] and span_2[sent_index] in ["B", "I"]: # both spans have either a 'B' or an 'I' tag are the same position. # Thus, both spans of 'BI' tags intersect at this position. j_intersection += 1 else: pass js = float(j_intersection / len(j_union)) return js
53d1189bb20a4b2cbd535d1f852e2c7a7a79597e
229,840
def find_first_peak(corr): """ Find row and column indices of the first correlation peak. Parameters ---------- corr : np.ndarray the correlation map Returns ------- i : int the row index of the correlation peak j : int the column index of the correlation peak corr_max1 : int the value of the correlation peak Original code from openPIV pyprocess """ ind = corr.argmax() s = corr.shape[1] i = ind // s j = ind % s return i, j, corr.max()
d17959bb446cdc2c0bc19dfb00c6adf4a0d4b1fb
146,405
def sum_fuel_across_sectors(fuels): """Sum fuel across sectors of an enduse if multiple sectors. Otherwise return unchanged `fuels` Arguments --------- fuels : dict or np.array Fuels of an enduse either for sectors or already aggregated Returns ------- sum_array : np.array Sum of fuels of all sectors """ if isinstance(fuels, dict): sum_array = sum(fuels.values()) return sum_array else: return fuels
6a9dea9be2899bd9884106333c550e1ccf619883
675,125
import re def counts(result): """ Extract integer values from command's return value. """ return [int(s) for s in re.findall(r"\d+", result)]
20b372d2a9e98aa03959d704b641e0ec87fe718c
167,399
def _singularity_image_name_on_disk(name: str) -> str: """Convert a singularity URI to an on disk sif name :param str name: Singularity image name :rtype: str :return: singularity image name on disk """ docker = False if name.startswith('shub://'): name = name[7:] elif name.startswith('library://'): name = name[10:] elif name.startswith('oras://'): name = name[7:] elif name.startswith('docker://'): docker = True name = name[9:] # singularity only uses the final portion name = name.split('/')[-1] name = name.replace('/', '-') if docker: name = name.replace(':', '-') name = '{}.sif'.format(name) else: tmp = name.split(':') if len(tmp) > 1: name = '{}_{}.sif'.format(tmp[0], tmp[1]) else: name = '{}_latest.sif'.format(name) return name
c4cb55cf0be9c0497a6df79051aef5855f45dd35
378,454
import requests from bs4 import BeautifulSoup def scrape_hyperlinks(url): """Parses a web page and returns a list of all hyperlinks on it. Parameters ---------- url : str URL of web page to scrape links from Returns ------- links : list list of hyperlinks found """ response = requests.get(url) soup = BeautifulSoup(response.content, "html.parser") links = [x.get('href') for x in soup.findAll('a')] return links
182de26a1fee152b2d8e9bd5af46766b0fa22bb5
200,590