content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def subfolders_in(whole_path): """ Returns all subfolders in a path, in order >>> subfolders_in('/') ['/'] >>> subfolders_in('/this/is/a/path') ['/this', '/this/is', '/this/is/a', '/this/is/a/path'] >>> subfolders_in('this/is/a/path') ['this', 'this/is', 'this/is/a', 'this/is/a/path'] """ path_fragments = whole_path.lstrip('/').split('/') if whole_path.startswith('/'): path_fragments[0] = '/' + path_fragments[0] path = path_fragments[0] subfolders = [path] for fragment in path_fragments[1:]: path += '/' + fragment subfolders.append(path) return subfolders
a7389811a8acacea87abd55ba47892203e0b95e5
7,112
import re def natural_keys(text): """Sort list of string with number in human order. Examples ---------- >>> l = ['im1.jpg', 'im31.jpg', 'im11.jpg', 'im21.jpg', 'im03.jpg', 'im05.jpg'] >>> l.sort(key=tl.files.natural_keys) ... ['im1.jpg', 'im03.jpg', 'im05', 'im11.jpg', 'im21.jpg', 'im31.jpg'] >>> l.sort() # that is what we dont want ... ['im03.jpg', 'im05', 'im1.jpg', 'im11.jpg', 'im21.jpg', 'im31.jpg'] References ---------- alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) """ def atoi(text): return int(text) if text.isdigit() else text return [ atoi(c) for c in re.split('(\d+)', text) ]
92b63b281099c90fa08edcca29c01e83fc13b766
617,056
import time def is_time(value=''): """ Check if the value is time. Parameters ---------- value : str Value to check. Returns ------- boolean Whether the value is time. """ try: time.strptime(str(value), '%H:%M') except ValueError: return False return True
d9021143fc0a71f218c992f496021a19e74b4ede
357,622
import json def load_from_json_file(filename): """Creates an Object from a “JSON file”: Arguments: filename (str) -- file name Returns: obj -- python object. """ with open(filename, 'r', encoding='utf-8') as file: return json.load(file)
5483027cd38de1ae49f2c7912bfc2bc3969eb567
199,560
def lower_text(text: str) -> str: """Transform all the text to lowercase. Args: text : Input text Returns: Output text """ return text.lower()
2a657464a014703464ca47eeb77ed6a630535819
5,112
from pathlib import Path import string def kernelspec_dir(kernelspec_store: Path, kernel_id: str) -> Path: """Return path to the kernelspec directory for a kernel with a given ID Parameters ---------- kernelspec_store Path to the place kernelspec store where kernelspec dir should be placed. kernel_id Internal identifier of the kernel to instal. Should be short and URL-friendly. Must contain only ASCII numbers, ASCII letters, hyphen, period, underscore. Raises ------ ValueError If `kernel_id` contains improper characters. Returns ------- Path Path object to the directory where kernelspec should be installed. """ allowed_characters = set(string.ascii_letters + string.digits + '_.-') if not set(kernel_id) <= allowed_characters: raise ValueError("kernel_id contains forbidden characters") return kernelspec_store / kernel_id
0fb4f40dd6f54d144f314e64873e1f5dca27731b
656,283
import random def generate_random(residual): """ Helper function for max_min to generate random numbers """ if residual < 0: return 0, 0 first = random.randint(1, 20) sec = random.randint(1, 20) tot = float(first + sec) return (first * residual / tot, sec * residual / tot)
66a23cc177086f58fc8f1d7e5db947537c615ac8
468,920
def make_flight_url(date_str, flight_num, route_num): """ Takes date string, flight number, and route number, returns URL to search for a particular flight/route/day. --- IN: date string YYYYMMDD (str), flight_num (str), route_num (str) OUT: search URL (str) """ # base URL constructors base_url1 = 'http://flightaware.com/live/flight/' base_url2 = '/history/' base_url3 = '/KLGA/KORD' # merge vars with URL bases search_url = (base_url1 + flight_num + base_url2 + date_str + '/' + route_num + base_url3) return search_url
10129ee217583d87e488613e33a5b6ec3318852b
232,417
def _parse_post(post: dict) -> dict: """Helper method - parse news response object to dictionary with target structure. [Source: https://cryptopanic.com/] Parameters ---------- post: dict Response object from cryptopanic api. Returns ------- dict Parsed dictionary with target data structure. """ return { "published_at": post.get("published_at"), "domain": post.get("domain"), "title": post.get("title"), "negative_votes": post["votes"].get("negative"), "positive_votes": post["votes"].get("positive"), "link": post["url"], }
0191f4fb46fe2f00330e175602645fe6ef5120da
382,158
def is_valid_total_length(index, date_to_sent_mapping, all_sent_dates, timeline_properties): """ Checks whether adding the sentence in focus would not violate the constraint of limiting the total number of sentences in a timeline. Corresponds to the AsMDS constrained described in Martschat and Markert (2018). Params: index (int): Index of the sentence in focus. date_to_sent_mapping (dict(datetime.datetime, list(tilse.data.sentences.Sentence)): Mapping of dates in a timeline to the sentences in the summary for this date (describes the partial timeline constructed so far). all_sent_dates (list(datetime.dateime)): Dates for all sentences. In particular, `all_sent_dates[index]` is the date of the sentence in focus. timeline_properties (tilse.models.timeline_properties.TimelineProperties): Properties of the timeline to predict. Returns: False if (i) date of the sentence in focus is before start or after end date of the timeline as defined in `timeline_properties` or (ii) adding the sentence in focus would lead to a timeline with more sentences than `timeline_properties.num_sentences`; True otherwise. """ selected_date = all_sent_dates[index] if selected_date < timeline_properties.start \ or selected_date > timeline_properties.end: return False return sum([len(sents) for sents in date_to_sent_mapping.values()]) \ < timeline_properties.num_sentences
75dda3386a5f5899e2e8c9b418357e4c69c5817f
579,127
import pickle def load_dictionary(file_path): """ Loads a categorical variable dictionary that was saved in pickle format. """ with open(file_path, "rb") as dictionary_file: return pickle.load(dictionary_file)
ea500d9739d725f2889f83a3d935f708600eb52e
34,658
def calc_bolometric_luminosity(cont_lwav, cont_wav, reference='Shen2011'): """Calculate the bolometric luminosity from the monochromatic continuum luminosity (erg/s/A) using bolometric correction factors from the literature. The following bolometric corrections are available cont_wav = 1350, reference = Shen2011 cont_wav = 3000, reference = Shen2011 cont_wav = 5100, reference = Shen2011 The Shen et al. 2011 (ApJ, 194, 45) bolometric corrections are based on the composite spectral energy distribution (SED) in Richards et al. 2006 ( ApJ, 166,470). :param cont_lwav: Monochromatic continuum luminosity in erg/s/A. :type cont_lwav: astropy.units.Quantity :param cont_wav: Wavelength of the monochromatic continuum luminosity in A. :type cont_wav: astropy.units.Quantity :param reference: A reference string to select from the available \ bolometric corrections. :type reference: string :return: Returns a tuple of the bolometric luminosity in erg/s and a \ reference string indicating the publication and continuum wavelength \ of the bolometric correction. :rtype: astropy.units.Quantity, string """ if cont_wav.value == 1350 and reference == 'Shen2011': reference = 'Shen2011_1350' return cont_lwav * cont_wav * 3.81, reference if cont_wav.value == 3000 and reference == 'Shen2011': reference = 'Shen2011_3000' return cont_lwav * cont_wav * 5.15, reference if cont_wav.value == 5100 and reference == 'Shen2011': reference = 'Shen2011_5100' return cont_lwav * cont_wav * 9.26, reference else: raise ValueError('[ERROR] No bolometric correction available for the ' 'supplied combination of continuum wavelength and ' 'reference.')
e5ecc7b602a8467961cdde7040ae2e45bd0cfe3a
188,573
import ast def convert_source_to_tree(fpath: str) -> ast.Module: """Convert source code into abstract syntax tree. Args: fpath: Path to the Python file of interest Returns: AST representation of the source code """ with open(fpath, 'r') as f: tree = ast.parse(f.read()) return tree
b7fe1d10036a032afead4ce8c94b6bc98814034a
288,141
def tabulate(data, *, as_list=False, codeblock=False, language="prolog", middle_block=" :: "): """Create a pretty codeblock table Uses hljs's prolog syntax highlighting Recommended hljs languages (for codeblocks): - prolog (default) - asciidoc Parameters ----------- data: :class:`List[List[name, value]] The data to turn into a table as_list: Optional[:class:`bool`] Whether to return a list of strings. Overrides codeblock codeblock: Optional[:class:`bool`] Whether to return the table in a codeblock language: Optional[:class:`str`] The hljs language to use for syntax highlighting """ # Go though the data and find the longest name longest_name = 0 for name, value in data: name_len = len(name) if name_len > longest_name: longest_name = name_len # Format the data, using the longest name as a reference # for adding on spaces to other names table = [] for name, value in data: # Add on extra spaces if needed to_add = "".join(" " for i in range(longest_name - len(name))) table.append(f"{name}{to_add}{middle_block}{value}") if as_list: return table final_table = "\n".join(table) # Append a codeblock if specified if codeblock: final_table = f"```{language}\n{final_table}\n```" return final_table
a913dc3c579cd66af66f48886e9914be0c73f1f1
134,243
def k_func(h,s_H,s_K,g,n,alpha,phi,delta,k): """args:     h     (float): Human capital     s_h   (float): Investments in human capital     s_k   (float): Investments in physical capital     g     (float): Growth in technology     n     (float): Growth rate of population/labour force     delta (float): Depreciation rate     alpha (float): Return on physical capital     phi   (float): Return on human capital     k     (float): Physical capital          Returns:     The transition equation for physical capital per capita     """ return 1/((1+n)*(1+g))*(s_K*k**(alpha)*h**(phi)+(1-delta)*k)-k
732fbea0c3c7ef2e710593f6825574a0577f1313
634,823
def cote(v): """ Renvoie la cote du vecteur ``v``. Arguments: v (array): Un vecteur de l'espace """ return v[2]
8f182d44847094547b9a4d0d81213e62cf1ed8f1
394,981
def secret_errors(vol_desc): """Sanity check a Secret volume description and return a string for the first problem found. Return None if no problem is found. """ required_fields = ['secret_name'] accepted_fields = ['secret_name', 'items', 'default_mode', 'optional'] secret = vol_desc.get('secret', None) if secret is None: return "No secret specification in secret volume description" for field in required_fields: if field not in secret.keys(): return "Secret specification missing required '%s'" % field for field in secret.keys(): if field not in accepted_fields: return "Secret specification has unrecognized '%s'" % field return None
291fe1a1a0231c4fcba6eb7417a22e5c2f654088
479,379
from typing import Tuple import math def quaternion_to_euler(x: float, y: float, z: float, w: float) -> Tuple[float, float, float]: """ Convert quaternion x, y, z, w to euler angle roll, pitch, yaw Args: x (float): quaternion x y (float): quaternion y z (float): quaternion z w (float): quaternion w Returns: Tuple: (roll, pitch, yaw) in radian """ # roll (x-axis rotation) sinr_cosp = 2.0 * (w * x + y * z) cosr_cosp = 1.0 - 2.0 * (x * x + y * y) roll = math.atan2(sinr_cosp, cosr_cosp) # pitch (y-axis rotation) sinp = 2.0 * (w * y - z * x) if abs(sinp) >= 1.0: pitch = math.copysign(math.pi / 2.0, sinp) # use 90 degrees if out of range else: pitch = math.asin(sinp) # yaw (z-axis rotation) siny_cosp = 2.0 * (w * z + x * y) cosy_cosp = 1.0 - 2.0 * (y * y + z * z) yaw = math.atan2(siny_cosp, cosy_cosp) return roll, pitch, yaw
ebf5f107bf39aaae8a4738d9dd0ab0a4c737aca3
520,652
def daterange_to_str(daterange): """ Takes a pandas DatetimeIndex created by pandas date_range converts it to a string of the form 2019-01-01-00:00:00_2019-03-16-00:00:00 Args: daterange (pandas.DatetimeIndex) Returns: str: Daterange in string format """ start = str(daterange[0]).replace(" ", "-") end = str(daterange[-1]).replace(" ", "-") return "_".join([start, end])
a57be32e5adf96ec44cfb80d0d3830d645c9817b
82,827
def _has_valid_shape(table): """Returns true if table has a rectangular shape.""" if not table.columns: return False if not table.rows: return False num_columns = len(table.columns) for row in table.rows: if len(row.cells) != num_columns: return False return True
8b51f96b46ae0d8b586df68ebe192410b390273d
49,724
def _bin_to_int(b_list): """ Convert b_list, a list of {0,1}, to an integer """ out = 0 for b in b_list: out = (out << 1) | b return out
a82e123348e892e95d5ca476005c34a4d9ead7eb
62,746
import random def random_range(value, variance=0.0): """Return a random value within range with variance """ value_range = [value * (1.0 - variance), value] return random.uniform(*value_range)
36cd95b83e09ce2a2b0767b3f0348ac8d8724a07
108,338
import torch def sinc3(t): """ sinc3: t -> (t - sin(t)) / (t**3) """ e = 0.01 r = torch.zeros_like(t) a = torch.abs(t) s = a < e c = (s == 0) t2 = t[s] ** 2 r[s] = 1/6*(1-t2/20*(1-t2/42*(1-t2/72))) # Taylor series O(t^8) r[c] = (t[c]-torch.sin(t[c]))/(t[c]**3) return r
211f44157edc90ef6e6974d85a02c6bcf2837777
643,441
def spltime(tseconds): """ This gets the time in hours, mins and seconds """ hours = tseconds // 3600 minutes = int(tseconds / 60) % 60 seconds = tseconds % 60 return hours, minutes, seconds
a8ba14879da51ebbeac2ba201fc562a22fe13364
707,771
from typing import Optional from typing import List import random def generate_random_number( length: int = 6, forbidden_first_digit: Optional[List[int]] = None ) -> str: """ Generate random number with the provided length (number of digits) ensuring that two neighboring digits are always different and with each digit having a value between 1 - 9. """ result = "" while len(result) < length: random_value = random.randint(0, 9) if len(result) == 0: # First digit if forbidden_first_digit and random_value in forbidden_first_digit: continue result += str(random_value) else: # Make sure it's different than the previous digit if random_value == int(result[-1]): continue result += str(random_value) return result
effbec2feeebd09c541e183bac1c82fe167f3e47
62,708
import math def latlon_distance_conversion(lat): """ Given a latitude (in degrees), returns (mperlat,mperlon) where mperlat = meters per degree latitude, mperlon = meters per degree longitude. These calculations are based on a spherical earth, and were taken from http://www.nga.mil/MSISiteContent/StaticFiles/Calculators/degree.html """ lat_rad = lat * 2.0 * math.pi / 360.0; m1 = 111132.92; m2 = -559.82; m3 = 1.175; m4 = -0.0023; p1 = 111412.84; p2 = -93.5; p3 = 0.118; latlen = m1 + (m2 * math.cos(2 * lat)) + (m3 * math.cos(4 * lat)) + \ (m4 * math.cos(6 * lat)); lonlen = (p1 * math.cos(lat)) + (p2 * math.cos(3 * lat)) + \ (p3 * math.cos(5 * lat)); return (latlen,lonlen);
4dc18463188776dab12682d6ceaf56516686bacd
441,565
import torch def inference_collate_batch(batch): """Collate a batch of data.""" feat_paths, mels = zip(*batch) return feat_paths, torch.stack(mels)
a2ecd6ef4ea634ac453fa7e5cafce97c0dddcb9f
693,174
import pickle def load_egg(filepath): """ Loads pickled egg Parameters ---------- filepath : str Location of pickled egg Returns ---------- egg : Egg data object A loaded unpickled egg """ with open(filepath, 'rb') as f: egg = pickle.load(f) return egg
f2d5190bd09c80c58418e71371643c1f4649293d
193,735
def create_row(size): """Returns a single, empty row with the given size. Each empty spot is represented by the string '-'. >>> create_row(5) ['-', '-', '-', '-', '-'] """ return ['-' for i in range(size)]
6a9a01efab2bae00f016b58324e187f734f904cd
636,370
import collections def count_packages(budgets, all_manifests): """Returns packages that are missing, or present in multiple budgets.""" package_count = collections.Counter( package for budget in budgets for package in budget["packages"]) more_than_once = [ package for package, count in package_count.most_common() if count > 1 ] zero = [package for package in all_manifests if package_count[package] == 0] return more_than_once, zero
124b034a007a3048233f27ed09a76d5e684b2688
120,938
def make_poly(bit_length, msb=False): """Make `int` "degree polynomial" in which each bit represents a degree who's coefficient is 1 :param int bit_length: The amount of bits to play with :param bool msb: `True` make only the MSBit 1 and the rest a 0. `False` makes all bits 1. """ if msb: return 1 << ((8 * int(bit_length / 8)) - 1) result = 0 for x in range(int(bit_length / 8)): result += 0xff << int(x * 8) return result
f700339a0fc90a6110306329b844eec116a26b7f
365,518
def is_correct_score(score): """ Check whether score is in [0.0, 10.0]. :param score: base score :return: True if valid """ return 0.0 <= score <= 10.0
b3be2464a720d2eddcdc6878f725946412b60606
94,374
import torch def depth_to_world(projection, depth): """ backprojects depth maps to point clouds Args: projection: 3x4 projection matrix depth: hxw depth map Returns: tensor of 3d points 3x(h*w) """ # add row to projection 3x4 -> 4x4 eye_row = torch.tensor([[0,0,0,1]]).type_as(depth) projection = torch.cat((projection, eye_row)) # pixel grid py, px = torch.meshgrid(torch.arange(depth.size(-2)).type_as(depth), torch.arange(depth.size(-1)).type_as(depth)) pz = torch.ones_like(px) p = torch.cat((px.unsqueeze(0), py.unsqueeze(0), pz.unsqueeze(0), 1/depth.unsqueeze(0))) # backproject P = (projection.inverse() @ p.view(4,-1)).view(p.size()) P = P[:3]/P[3:] return P
bd4938732ac679a32b41a7b28e42b16312835170
609,639
def compare_dict_keys(dict1, dict2): """ Compare dict1 keys with dict2 keys and see if dict1 has extra keys compared to dict2 Parameters: dict1 (dict): response dict from API dict2 (dict): mock dict Returns: Set of keys """ return dict1.keys() - dict2.keys()
647159dd5eb7dbb8129c65ed2b03e31c073af41e
133,846
def build_clf(X, y, clf_class, **kwargs): """build any classifier that implements a fit method with given parameters""" clf = clf_class(**kwargs) clf_fit = clf.fit(X, y) return clf_fit
79b8ae1979a4363ca49f32b4c07a0be74e9a00bb
546,686
from pathlib import Path def default_raw_csv_filename(job_id): """ Returns the filename for the raw CSV associated with job_id Parameters ---------- job_id: str The job_id to generate the filename for Returns ------- filename: pathlib.Path The filename associated with job_id which will be appended to some base path by the caller. """ assert isinstance(job_id, str) return Path(f'original_{job_id}.csv')
4c3a53a4e9c0b1940de2d73fe4db70a77669704a
385,678
def sanitize_indexes(indexes: list, user_input: str) -> list: """ Given a list of valid indexes and a comma-separated string of (supposedly) user-entered indexes, it returns the list of indexes that are valid. :param list indexes: list of permitted indexes :param str user_input: comma-separated list of ind. :return list: list of valid indexes """ res = set() max_val = len(indexes) for index in user_input.split(","): index = index.strip() if index.isdigit() and int(index)-1 < max_val: res.add(int(index)-1) return list(res)
f9738cf3401bfc14e44b0c3d4ae0693706a06c05
147,562
def get_wanted_statistics(features, statistics): """ Select the wanted statistics from the whole description Parameters ---------- features : list of source features All source feature which you are interested in statistics : list of string The statistics you want to select Returns ------- list of tuple Tuples in the form (feature, statistic) for all statistics and parameters passed as arguments """ result = [[(param, stat) for stat in statistics] for param in features] result = [item for sublist in result for item in sublist] return result
56f0389ead768822dbf15ff22b213712aa4c1010
212,612
import time def datetime_to_integer(datetime): """Convert datetime object to integer value. 'datetime' is the datetime object""" return time.mktime(datetime.timetuple())
8d4d94fac947c3dd9e82ee3d60a1a57a6440457d
699,886
def preamble_for_label(label): """ Return the preamble for the documentation block for the given label. :param label: The label to use as the paragraph title. :return: The string that should be preamble of the new section. """ preamble = str("\n" + " "*4 + "@par " + label + ":\n\n") preamble += str(" "*4 + "@htmlonly\n\n") return preamble
876a242a1a3807a1298753389d8619115d498ed8
45,046
def has_errors(result): """This function checks if a GqlResponse has any errors. Args: result (GqlResponse): [data, errors] Returns: (boolean): Returns `True` if a transaction has at least one error. """ _, errors = result return len(errors) > 0
15fddcf9b2231c946fabb6603edc2635c8b9478f
16,518
import collections def _counts(data): """Return a count collection with the highest frequency. >>> _counts([1, 1, 2, 3, 3, 3, 3, 4]) [(3, 4)] >>> _counts([2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]) [(1.25, 2)] """ table = collections.Counter(iter(data)).most_common() if not table: return table maxfreq = table[0][1] for i in range(1, len(table)): if table[i][1] != maxfreq: table = table[:i] break return table
6d37d78e16d1b00a36120714aae68182ce225930
128,633
def pythagorean_triples(n): """ Returns list of all unique pythagorean triples (a, b, c) where a < b < c <= n and a*a + b*b == c*c. """ l = [] # loop over all a < b < c <= n for c in range(1, n + 1): for b in range(1, c): for a in range(1, b): if a*a + b*b == c*c: l.append((a, b, c)) return l
a85d3dfb1d732797c95ef6eeca2af04a9231f0f0
418,856
def load_config(path): """ Load device configuration from file path and return list with parsed lines. :param path: Location of configuration file. :type path: str :rtype: list """ args = [] with open(path, 'r') as fp: for line in fp.readlines(): if line.strip() and not line.startswith("#"): args.append(line.replace("\n", "")) return args
0413ccc633134d1279299d3154d88b8f866950a9
503,546
def get_subsecond_component(frac_seconds, frac_seconds_exponent, subsec_component_exponent, upper_exponent_limit): """Return the number of subseconds from frac_seconds * (10**frac_seconds_exponent) corresponding to subsec_component_exponent that does not exceed upper_exponent_limit. For example: If frac_seconds*(10**frac_seconds_exponent) is 0.1234567, upper_exponent_limit is SITimeUnit.SECONDS, and subsec_component_exponent is SITimeUnit.MICROSECONDS, 123456 would be returned. If frac_seconds*(10**frac_seconds_exponent) is 0.123456789, upper_exponent_limit is SITimeUnit.MICROSECONDS, and subsec_component_exponent is SITimeUnit.NANOSECONDS, 789 would be returned. Same example as above, but with upper_exponent_limit = SITimeUnit.SECONDS, 123456789 would be returned. """ total_subsecs = int(frac_seconds * (10 ** (frac_seconds_exponent - subsec_component_exponent))) return total_subsecs % (10 ** abs(subsec_component_exponent - upper_exponent_limit))
5922cb41a6a9dcf0874ff353f657de062b8763f8
632,675
from typing import List def moving_avg(inputs: list, w: int = 10) -> List[float]: """Computes the moving average of a list over a sliding window.""" cumsum, moving_avgs = [0], [] for i, x in enumerate(inputs, 1): cumsum.append(cumsum[i-1] + x) if i >= w: moving_avg = (cumsum[i] - cumsum[i-w]) / w moving_avgs.append(moving_avg) return moving_avgs
130bdcb17424a71d9d4dafc53ea875ec4ec6de7f
419,689
def filter_dictionary_by_resolution(raw_data, threshold=False): """Filter SidechainNet data by removing poor-resolution training entries. Args: raw_data (dict): SidechainNet dictionary. threshold (float, bool): Entries with resolution values greater than this value are discarded. Test set entries have no measured resolution and are not excluded. Default is 3 Angstroms. If False, nothing is filtered. Returns: Filtered dictionary. """ if not threshold: return raw_data if isinstance(threshold, bool) and threshold is True: threshold = 3 new_data = { "seq": [], "ang": [], "ids": [], "evo": [], "msk": [], "crd": [], "sec": [], "res": [] } train = raw_data["train"] n_filtered_entries = 0 total_entires = 0. for seq, ang, crd, msk, evo, _id, res, sec in zip(train['seq'], train['ang'], train['crd'], train['msk'], train['evo'], train['ids'], train['res'], train['sec']): total_entires += 1 if not res or res > threshold: n_filtered_entries += 1 continue else: new_data["seq"].append(seq) new_data["ang"].append(ang) new_data["ids"].append(_id) new_data["evo"].append(evo) new_data["msk"].append(msk) new_data["crd"].append(crd) new_data["sec"].append(sec) new_data["res"].append(res) if n_filtered_entries: print(f"{n_filtered_entries} ({n_filtered_entries/total_entires:.1%})" " training set entries were excluded based on resolution.") raw_data["train"] = new_data return raw_data
eca9edd4c79dfd06006339c488ace2d21b6e621f
120,486
import base64 def decode_base64(b): """Encode base64 string as bytes.""" return base64.b64decode(b)
d340c0e7212887738283f2c77db60ac5016414da
424,837
def _clip(n: float) -> float: """ Helper function to emulate numpy.clip for the specific use case of preventing math domain errors on the acos function by "clipping" values that are > abs(1). e.g. _clip(1.001) == 1 _clip(-1.5) == -1 _clip(0.80) == 0.80 """ sign = n / abs(n) if abs(n) > 1: return 1 * sign else: return n
78308e70a405b3b3d94827c00705e8842b242cde
699,291
import importlib def load_class(fully_qualified_class_name): """ Dynamically loads/imports a class by it's fully qualified name. Note - It returns class **type**, NOT the instance of class. Usage - `my_class = load_class('my_package.my_module.MyClass')` `my_class_obj = my_class()` """ class_data = fully_qualified_class_name.split(".") module_path = ".".join(class_data[:-1]) class_str = class_data[-1] module = importlib.import_module(module_path) return getattr(module, class_str)
686d2fc6c0826c738ee96729ae044319c96fcd3b
426,269
def process_weather_station_data(df, station_name): """ Rename weather station data so each station's weather data can be distinguished. Selects only weather features to include in the dataframe. """ df = df.iloc[:, 3:].copy() new_names = [(i,f"{station_name}_{i}") for i in df.iloc[:, 1:].columns.values] return df.rename(columns = dict(new_names))
0dd86a888d251d231c9f9b394614d5081dbc7bbc
87,170
import json def get_rig_width(parent, rig_fn): """Finds the camera image width. Args: parent: class instance rig_fn (str): Path to the rig Returns: int: Camera image width. """ with open(rig_fn, "r") as f: rig = json.load(f) return int(rig["cameras"][0]["resolution"][0])
fcd37c8f5ad04e932b427906b6b3b1f7cc032f1e
180,219
def _find_matching_brackets(args): """ Given a string starting with an open bracket, return: - substring from the opening bracket to the matching closing bracket - the remainder """ open_count = 0 for index in range(len(args) - 1): if args[index] == '(': open_count += 1 elif args[index] == ')': open_count -= 1 if open_count == 0: return args[0:index + 1], args[index + 1:] if open_count > 1: raise Exception("Unmatched brackets in: %s", repr(args)) return args
8bdc604cea7652af80304288855c4f1b2d0e178c
388,366
def _df_elements(df): """Yields all the values in the data frame serially.""" return (x for row in df.itertuples() for x in row)
6b9ae811693539f374e3125f870788c574a7eb79
665,708
def standardize_data(data, mu=0.0, sigma=1.0): """ Rescales the input subtracting mu and dividing by sigma. Args: data (numpy.ndarray): data to be standardized. mu (float) sigma (float) Returns: numpy.ndarray: standardized data """ data -= mu data /= sigma return data
e1b8584a88bfc0e3d9b150b6811ae95d4a39d8c1
483,050
def list_check(lst): """Are all items in lst a list? >>> list_check([[1], [2, 3]]) True >>> list_check([[1], "nope"]) False """ t = [1 if isinstance(x, list) else 0 for x in lst] return len(lst) == sum(t)
9e2c55cb6e15f89ff2b73a78d5f15310d3cac672
706,304
def noNewLine(line): """ Delete all '\n' and '\r' characters in a string. """ return line.replace('\n', '').replace('\r', '')
364e322e7fc6cab22ab16230c6b4988b0523d43e
201,714
def WritePacketRaw(header, data): """ Given two raw bytearrays, return a bytearray representing the entire packet """ return header + data
476749834ac680505863c2876f991f6f0e46385b
677,666
import re def remove_chars(s): """ Remove characters from a string that have unintended effects on file paths. :param s: str :return: str """ return ''.join(re.split('[$/\\\\]', s))
fdc911b13f7bf082ad0eb835d41eef0182f38fd3
108,150
def projxmlpath(request): """Get the filepath to the XML file under test""" return request.config.getoption("--projxmlfile")
e4bbd281e36af661a6989769f76b8a39b242638a
514,367
def invert_dict(front_dict): """ Take a dict of key->values and return values->[keys] """ back_dict = { value : [] for value in front_dict.values() } for key, value in front_dict.items(): back_dict[value].append(key) return back_dict
d936ab299f20b98272534097887070df5d08a931
610,697
def mk_parser(header, null_values=('',)): """ Return a function that will parse a record according to the given header. header: Sequence<(str, function<T>(str)->T)> Indexable collection of (name, func) pairs where the function parses a string into a value of the desired type. null_values: Collection<str> Set of unparsed values to replace with `None` instead of parsing. """ hdr_len = len(header) def parse_record(record): return [(parse(text) if text not in null_values else None) for ((_, parse), text) in zip(header, record)] return parse_record
37f4187032e866b43011af4f2a8cb1efc67757a7
68,124
import importlib def build_augmentations(source): """ Creates list of augmentations classes. :param source: name of augmentation :return: list of augs """ aug_names = source['augmentations'] augs = [] for aug in aug_names: AugClass = getattr(importlib.import_module(f'augmentations.aug_{aug}'), 'Augmentations') augs.append(AugClass()) return augs
02289a9c155f4025eea71da96e72c922f01fb452
371,105
def build_tags(item): """ Build the row of tags for a CSV file :param item: A test item, normally the first test item in responses of a test :return: CSV format string """ if item['type'] == 1: # Question return '' elif item['type'] == 2 or item['type'] == 3: # Example or training also will get tags fields if 'example' in item and 'tags' in item['example']: return (item['example']['tags'] or '').replace(',', '|') else: return '' else: return None
86333b26b655776e9909b26f3051b632834d61f5
439,705
def coins(p = 0, moeda = 'R$'): """ => Formatting function :param p: Original price :param moeda: currency :return: returns the formatted price """ return f'{moeda}{p:>.2f}'.replace('.',',')
336c9fbca306251a9c22b009c53e35ef7c3c3164
267,409
def segment_spectrogram(spectrogram, duration_s, fft_rate): """ Segment the spectrogram into successive subspectrograms of given length and returns the list of all subspectrograms """ spectrograms = [] sub_len = int(duration_s*fft_rate) n_subspectros = int(spectrogram.shape[1]/sub_len) for i in range(n_subspectros): spectrograms.append(spectrogram[:,sub_len*i:sub_len*(i+1)]) return spectrograms
ea19419a0b0bf2dce4dbf2fafaad5f01b43550c5
441,126
def checkfile(findstring, fname): """ Checks whether the string can be found in a file """ if findstring in open(fname).read(): #print(findstring, "found in", fname) return True return False
6f9afa50cf776c977010bbc14f95bd107f8a62d3
686,219
def calc_absent(marks): """ Function which returns the count of absent students. """ absent_count = 0 for mark in marks: if mark == 0: absent_count += 1 return absent_count
444c56dcabe4824c76f44bf07119fb14eedec15f
38,514
import hashlib def make_part_address(namespace_prefix, part_id): """ Creates a part address which will be used to recover the associated UUID if the part already exists in the state storage; or, used as a key to store the new data into the state storage. Args: namespace_prefix (str): The prefix associating with the transaction family part_id (str): The uuid of the part Returns: type: str The address-to-be, which associates the uuid and the namespace prefix. """ return namespace_prefix + \ hashlib.sha512(part_id.encode("utf-8")).hexdigest()[:64]
871f208de9292b898853e4b3a172065bcc8c6330
70,032
import requests def getVideoStats(self, id): """Returns stats of a video.""" res = requests.get(self.baseUrl + "api/v1/stats", params={"id": id}) return res.json()
d5b5af7548e20e90f2cf65292c6dfee81f14b45d
120,979
def _class_required(type_, class_, params): """Return true if method requires a `cls` instance.""" if not params or class_ is None: return False return type_ == 'classmethod'
8e4f92074e06e3bc22a1bea435534c062e72761b
650,360
import re def get_numbers_from_file(path, skip_lines=2): """ Function to read a file line-wise and extract numbers. Parameters ---------- path: str Path to the file including the filename. skip_lines: int Number of lines to skipp at the beginning of the file. Returns ------- lst: list A list with sepereated entries for found numbers. """ with open(path, "r") as data_file: lst = [] for string in data_file: line = re.findall( "[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", string ) lst.append(line) del lst[0:skip_lines] return lst
64026a6c5cf9aa16076a3c8872663ab7996c1add
12,445
def get_cluster_from_pattern(pattern, clusters): """ Helper function to return the cluster a pattern is in Args: pattern: A tuple of three values clusters: A list of lists of patterns Returns: The list containing the pattern """ for cluster in clusters: if pattern in cluster: return cluster else: raise RuntimeError("Pattern not found in cluster")
ba976292b2c50b208fd36aae23fad02bc7a6b47d
421,614
import requests import io def fetch(url_or_path): """Fetches a file from an HTTP or HTTPS url, or opens the local file.""" if str(url_or_path).startswith("http://") or str(url_or_path).startswith("https://"): r = requests.get(url_or_path) r.raise_for_status() fd = io.BytesIO() fd.write(r.content) fd.seek(0) return fd return open(url_or_path, "rb")
368f8ee43e61e7714c12fa5194283af731d02c68
92,193
def check_entity_schema( sg, logger, entity_type, field_name, field_types, required_values ): """ Verifies that field_name of field_type exists in entity_type's schema. :param sg: An authenticated Shotgun Python API instance. :param entity_type: str, a Shotgun entity type. :param field_name: str, the name of a field on entity_type. :param field_types: list, the Shotgun field types that field_name should be. :param required_values: list, values that must exist if the field Shotgun type is a list or status list. """ # Make sure we can read the schema. try: entity_schema = sg.schema_field_read(entity_type) except Exception as e: logger.warning('Can\'t read SG schema for entity "%s": %s' % (entity_type, e)) return # Grab the Shotgun field data type, if the field exists. sg_type = entity_schema.get(field_name, {}).get("data_type", {}).get("value") # Assume the entity doesn't exist in Shotgun and bail if no data_type value # was found. if not sg_type: logger.warning( '%s entity %s field "%s" does not exist in Shotgun, please fix.' % ( entity_type, field_types, field_name, ) ) return # Make sure the field is the correct Shotgun type. if sg_type not in field_types: logger.warning( 'SG field "%s" is type "%s" but should be of type(s) "%s," please fix.' % ( field_name, sg_type, field_types, ) ) return # If we have a list or status_list Shotgun field, make sure any required # values exist. if sg_type == "list" or sg_type == "status_list": schema_list_values = ( entity_schema.get(field_name, {}) .get("properties", {}) .get("valid_values", {}) .get("value", []) ) missing_values = [] for value in required_values: if value not in schema_list_values: missing_values.append(value) if missing_values: logger.warning( 'SG field "%s" does not contain required value(s) "%s", please fix.' % ( field_name, missing_values, ) ) return return True
5ae2ed8a36199d7b570a6934fc54926856f056d6
664,083
import requests def download_png(map_id): """Download map's png file to map_id.png.""" png_link = f"http://unfortunate-maps.jukejuice.com/download?mapname={map_id}&type=png&mapid={map_id}" with open(f"{map_id}.png", "wb") as f: response = requests.get(png_link) f.write(response.content) return f"{map_id}.png"
cab55ee277523bd9f1d55d74a345c11644992e99
374,739
def char_to_string(ll): """Convert 2-D list of chars to 1-D list of strings.""" # https://stackoverflow.com/questions/23618218/numpy-bytes-to-plain-string # bytes_string.decode('UTF-8') # We might be able to do this a bit more shorthand as we did in Python2.x # i.e return [''.join(x).strip() for x in ll] # But this works for now. result = [] for i in range(len(ll)): x = ll[i] string = '' for j in range(len(x)): c = x[j] if type(c) == str: string += c else: string += c.decode() result.append(string.strip()) return result
09d31eee4217ae04fc85485163c5b2528b340874
75,072
def type_to_str(t, separator=":"): """convert a type, e.g. a class to its unique module name. Constructed by default as: PATH_TO_MODULE:CLASS_NAME """ return f"{t.__module__}{separator}{t.__name__}"
e742965ba18e0eae4d5d4ce6aecfc9400893077d
558,937
import functools def partition_dict(d, pred, dict_class=dict): """ Split a dict in two based on a predicate. ``d`` is a dict (or dict-like) object. ``pred(key, value)`` is a function that returns a bool, which will determine which dict the key, value pair will be sent to. Returns two dicts, ``(false, true)``, where ``false`` is a dict with all pairs where ``pred`` returned ``False``, and ``true`` is a dict with all pairs where ``pred`` returned ``True``. """ def iterator(acc, pair): f, t = acc key, val = pair if pred(key, val): t[key] = val else: f[key] = val return f, t return functools.reduce(iterator, d.items(), (dict_class(), dict_class()))
c1d9cd938eb791dc4bfc766b68dec62d40a23cab
312,487
def chrom_sort_key(x): """Return an integer for sorting from a chromosome.""" if x.startswith('chr'): x = x[3:] if x.upper() == 'X': return 100 elif x.upper() == 'Y': return 101 elif x.upper().startswith('M'): return 150 elif x.isdigit(): return int(x) return x
beb7c4c62d04ea2aa5800cfdb956b50a4c720ebd
500,194
def load_samples(sample_file_paths): """Read in processed sample files, returning flat list.""" samples = [] for sample_file_path in sample_file_paths: with open(sample_file_path, "r") as sample_file: for sample in sample_file: sample = sample.strip() samples.append(sample) return samples
a0ad967a26f758ec3fb1b05c769547486325841c
307,846
def get_chat_id(update): """ Get chat ID from update. Args: update (instance): Incoming update. Returns: (int, None): Chat ID. """ # Simple messages if update.message: return update.message.chat_id # Menu callbacks if update.callback_query: return update.callback_query.message.chat_id return None
1669382fd430b445ea9e3a1306c1e68bf2ec0013
1,830
from typing import List def get_header(sentences: List[str]) -> List[str]: """ Header is defined as anything that comes before the abstract """ header = [] for s in sentences: s = s.strip() if s.lower() == 'abstract': break header.append(s) return header
7137cba7a7d9d9f12a8b7885b0d685bc06af7120
154,785
import string import random def random_string(length): """Generate random string of the given length.""" pool = string.ascii_letters + string.digits return "".join(random.choice(pool) for _ in range(length))
f5e83dd2215d708b0ce5f8bd3e344b8fed12277d
695,746
def compare(x, y): """Write a compare function that returns 1 if x > y, 0 if x == y, and -1 if x < y""" if x > y: return 1 elif x < y: return -1 else: return 0
d7bc3853dd1edd3c1f3d0d114beb9390ced9125c
486,721
from pathlib import Path def not_max_depth(path: Path, root: Path, depth: int): """Return true if depth of current path is less then max depth specified""" return len(path.relative_to(root).parents) <= depth
9d443b0b9c5104eff63b40fd453fdb64516180ba
82,472
def module_level_function(param1, param2=None, *args, **kwargs): """Evaluate to true if any paramaters are greater than 100. This is an example of a module level function. Function parameters should be documented in the ``Parameters`` section. The name of each parameter is required. The type and description of each parameter is optional, but should be included if not obvious. This example function calculates if any of the params are greater than a target value of 100, and if so returns True If *args or **kwargs are accepted, they should be listed as ``*args`` and ``**kwargs``. The format for a parameter is:: name : type description The description may span multiple lines. Following lines should be indented to match the first line of the description. The ": type" is optional. Multiple paragraphs are supported in parameter descriptions. Parameters ---------- param1 : int The first parameter. param2 : :obj:`str`, optional The second parameter. *args Variable length argument list. **kwargs Arbitrary keyword arguments. Returns ------- bool True if successful, False otherwise. The return type is not optional. The ``Returns`` section may span multiple lines and paragraphs. Following lines should be indented to match the first line of the description. The ``Returns`` section supports any reStructuredText formatting, including literal blocks:: { 'param1': param1, 'param2': param2 } Raises ------ AttributeError The ``Raises`` section is a list of all exceptions that are relevant to the interface. ValueError If `param2` is equal to `param1`. ValueError If `param2` is not a string """ if param1 == param2: print(f"param1: {param1}, param2: {param2}") error_message = "param1 may not be equal to param2" print(error_message) raise ValueError(error_message) # Collect the params and find the max value value_list = [] value_list.append(param1) if param2: if not isinstance(param2, str): error_message = "param2 must be a string" print(error_message) raise ValueError(error_message) else: converted_param2 = int(param2) value_list.append(converted_param2) if args: for x in args: if not isinstance(x, int): error_message = "args values must be integers" print(error_message) raise ValueError(error_message) value_list.append(x) if kwargs: print("Metadata content") for key, value in kwargs.items(): print(f"{key}: {value}") if key == "verbose" and value is True: print("Additional verbose output: ......................") # Find max value from the compiled list max_value = max(value_list) print( f"param1: {param1}, param2: {param2}, args: {args}, " f"kwargs: {kwargs}. Max value: {max_value}" ) # Function returns True if any of the params are greater than 100 target_value = 100 if max_value > target_value: return True else: return False
b3a6ca94904bf1f1fe4d0b69db03df9f87bb1e26
654,314
def local_memory_access_granularity(dev): """Return the number of bytes per bank in local memory.""" return 4
ec4e6fe5b8ab096c4d69d273469725e984fbba96
301,421
def issue_url(server: str, key: str) -> str: """Generate URL for a given issue.""" return f'{server}/browse/{key}'
db6c7526ee2c35c0ec4903bdee17fdde2f1a0aaf
209,592
def arg(*args, **kwargs): """Utility function used in defining command args.""" return ([*args], kwargs)
7b3643b9136215321b63392fa018b52181161b00
570,742
def to_camel_case(string: str) -> str: """Quick function to return camelCase from snake_case.""" first, *rest = string.split("_") chunks = [first.lower(), *map(str.capitalize, rest)] return "".join(chunks)
00afb059a4c97cf55b1a2abf5ae468e191616b21
466,646
async def audio_playing(ctx): """Checks that audio is currently playing before continuing.""" client = ctx.guild.voice_client if client and client.channel and client.source: return True else: return False # raise commands.CommandError("Not currently playing any audio.")
e5f73bf062c03cfcf81baa54d32b07de51d880f5
577,122
from typing import Any def empty_list() -> list[Any]: """ Returns an empty list Returns: list[Any]: An empty list """ return []
42007230de7b306769160c466dfff4d4673de34f
361,473
def mesg_index(old, last, new, max_index): """ Reference: http://faculty.salina.k-state.edu/tim/NPstudy_guide/servers/Project5_chat_Server.html#project5-chat-server :param old: integer index of oldest (first) message in queue :param last: integer index of last message read by the client thread :param new: integer index of newest (last) message in the queue :param max_index: maximum value that any index can hold [modification] This computes the index value of the message queue from where the reader should return messages. It accounts for having a cyclic counter. This code is a little tricky because it has to catch all possible combinations. """ # print(f'old={old}, new={new}, last={last}, max={max_index}') if new >= old: # normal case if last >= old and last < new: return (last - old + 1) else: return 0 else: # cyclic roll over (new < old) if last >= old: return (last - old + 1) elif last < new: return (max_index - old + last) else: return 0
ec10b161d22842c04ebb8d7b2eaff99aa08a3acf
165,078
def _image2pixelarray(imgObj): """ Return image object as a pixel array list. :param imgObj: PIL Image Object :return: List, pixels """ pixels = list(imgObj.getdata()) return pixels
6491d5ddccab0aa6fe0fa84ef823af15966ecf95
397,242
def compPubKey(keyObj): """ get public key from python-bitcoin key object :param keyObj: python-bitcoin key object :return: public bytes """ keyObj._cec_key.set_compressed(True) pubbits = keyObj._cec_key.get_pubkey() return pubbits
12eb5b1b59f76f80621067275bf93c73fccc4472
661,890
def docker_image_exists(docker_client, image_name): """ Test if a Docker image already exists """ return True if docker_client.images.list(name=image_name) else False
bc4b4b965e562b664d3ff0ed3e63dbe4c71cda99
454,805
def _indent(level): """Returns leading whitespace corresponding to the given indentation `level`. """ indent_per_level = 4 return ' ' * (indent_per_level * level)
4391c308b59db321ef3f810c73b66e35d44566fa
11,771
def setup_config(quiz_name): """Updates the config.toml index and dataset field with the formatted quiz_name. This directs metapy to use the correct files Keyword arguments: quiz_name -- the name of the quiz Returns: True on success, false if fials to open file """ try: conf_file = open("config.toml", 'r') lines = conf_file.readlines() conf_file.close() for i in range(len(lines)): if lines[i].startswith("index"): lines[i] = "index = 'idx-{0}'\n".format(quiz_name.replace(" ", "_")) if lines[i].startswith("dataset"): lines[i] = "dataset = '{0}'\n".format(quiz_name.replace(" ", "_")) conf_file = open("config.toml", 'w') with conf_file: conf_file.writelines(lines) except Exception as e: print(e) return False return True
28aba9399926f27da89953c8b0c6b41d95a12d96
6,003
from typing import Sequence import shlex def _flatten_shlexed_list(shlexed_args: Sequence[str]) -> list[str]: """Convert a list of shlexed args into a flattened list of individual args. For example, ['arg1 arg2=foo', '--arg3'] would be converted to ['arg1', 'arg2=foo', '--arg3']. """ return [arg for shlexed_arg in shlexed_args for arg in shlex.split(shlexed_arg)]
36ce76ae64beb95630df85696597ddb152d0d2d6
248,586