content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def get_all_subclasses(klass): """Return all subclasses of a class Use Python introspection to list all subclasses of the given class. This function do not stop to the direct children, but walk the whole inheritance tree. :param klass: the root class to use for introspection :type klass: T :rtype: tuple(T) """ subclasses = set() queue = [klass] while queue: parent = queue.pop() for child in parent.__subclasses__(): if child not in subclasses: subclasses.add(child) queue.append(child) return tuple(subclasses)
056f1cd12255bfb6fff38d25a29923c6be43cc0f
615,063
def num_words(tokens): """Given list of words, return no. of words (int)""" return len(tokens)
89388c467380803e834d2ef287d33d17b882d666
19,208
import base64 def convert_to_base64(image_file): """Open image and convert it to base64""" print('Converting Image to Base64') with open(image_file, 'rb') as f: return base64.b64encode(f.read()).decode('utf-8')
8013b297db36f71eadf9b6cdec685f984abf2a1e
610,897
def cc(g): """ >>> graph = [[1, 4], [0], [3, 6, 7], [2, 7], [0, 8, 9], [], [2, 10], \ [2, 3, 10, 11], [4, 9], [4, 8], [6, 7, 11], [10, 7]] >>> cc(graph) 3 """ def dfs(g, t, seen): for v in g[t]: if v not in seen: seen.add(v) dfs(g, v, seen) seen = set() cnt = 0 for v in range(len(g)): if v in seen: continue dfs(g, v, seen) cnt += 1 return cnt
07ba602a45635df2d6eab79398c9408ce6b0e736
369,309
def project_value(value, src_min, src_max, dst_min, dst_max): """project value from source interval to destination interval""" scaled = float(value - src_min) / float(src_max - src_min) return dst_min + (scaled * (dst_max - dst_min))
29356e3df08986eb21c7cc44f4039d72d1c5558b
138,375
import yaml def load_config(path="config/default.ymal") -> dict: """ Load and parses a YAML configuration file :param path: path to YAML configuration file :return: configuration dictionary """ with open(path, 'r') as ymlfile: cfg = yaml.safe_load(ymlfile) return cfg
59417b3e6f727244cee51558f064d371dd86d911
599,910
def _trim_ds(ds, epochs): """Trim a Dataset to account for rejected epochs. If no epochs were rejected, the original ds is rturned. Parameters ---------- ds : Dataset Dataset that was used to construct epochs. epochs : Epochs Epochs loaded with mne_epochs() """ if len(epochs) < ds.n_cases: ds = ds.sub(epochs.selection) return ds
0e08273d86188b8572d0b26feea992771694feb8
668,317
def str_manipulation(s): """ This function turns all the alphabet into lower case. ---------------------------------------------------------------------------- :param s: (str) the word that user input. :return: ans (str), the word with the lower case. """ ans = '' for ch in s: if ch.isupper(): ans += ch.lower() else: ans += ch return ans
61a91a4bbcafe2ffd913bad9055cbba115bd8311
117,966
def string_to_elements(string): """ :string: elements separated by colon as in s1:s2:s3 Return list of elements """ ss = string.split(':') elements = [] for s in ss: if s: elements.append(bytes.fromhex(s)) return elements
e1fe186d0e4b8f4b5ebc7fcc548a24be734189d9
53,663
def bytes_to_string(bytes): """ It generates a string with a proper format to represent bytes. Parameters ---------- bytes : int A quantity of bytes Returns ------- size_str : str The string representing the number of bytes with a proper format """ kilobytes = bytes / 1000 if kilobytes < 1.0: return '{:.2f} B'.format(bytes) megabytes = kilobytes / 1000.0 if megabytes < 1.0: return '{:.2f} kB'.format(kilobytes) gigabytes = megabytes / 1000 if gigabytes < 1.0: return '{:.2f} MB'.format(megabytes) terabytes = gigabytes / 1000 if terabytes < 1.0: return '{:.2f} GB'.format(gigabytes) return '{:.2f} TB'.format(terabytes)
3e3a2946e02d0c7f5ed347da265ee329bab0f577
592,481
def convert_qty2gram(qty = {'val': '', 'unit': ''}): """ Convert OFF quantity to a standard quantity (in grams) Args: qty (dict): OFF quantity value and unit Returns: dict with value converted to grams (if possible) and two new keys: std: True if value could be converted using a standard conversion factor approx: True if the original units were not in 'g', 'kg', 'mg' """ init_val = qty['val'] init_unit = qty['unit'] convert_matrix = {'g':1.0, 'kg':1000, 'mg':0.001, 'gal':3785.41, 'egg':50.0, # TO BE VALIDATED 'portion':100.0, # TO BE VALIDATED 'l':1000.0, 'ml':1.0, 'cl':10.0, 'dl':100.0, 'oz':28.3495, 'lb':453.592} if (init_val!='') & (init_unit in convert_matrix.keys()): conv_val = convert_matrix[init_unit]*init_val conv_unit = 'g' conv_std = True else: conv_val = init_val conv_unit = init_unit conv_std = False # all conversions not from g, kg or mg are approximate conversions approx = True if init_unit in ['g', 'kg', 'mg']: approx = False return {'val': conv_val, 'unit': conv_unit, 'std': conv_std, 'approx': approx}
c7f6a5cacf6ee7a799e06dfee9d2e14706acb9da
195,222
from typing import Dict def market_is_active(market: Dict) -> bool: """ Return True if the market is active. """ # "It's active, if the active flag isn't explicitly set to false. If it's missing or # true then it's true. If it's undefined, then it's most likely true, but not 100% )" # See https://github.com/ccxt/ccxt/issues/4874, # https://github.com/ccxt/ccxt/issues/4075#issuecomment-434760520 return market.get('active', True) is not False
f8c113e2e3cf08906aa38cddbf89cb555096012e
623,993
def file_length(file_obj): """ Returns the length in bytes of a given file object. Necessary because os.fstat only works on real files and not file-like objects. This works on more types of streams, primarily StringIO. """ file_obj.seek(0, 2) length = file_obj.tell() file_obj.seek(0) return length
052254469e4a20e4b510cec52434ec88614d92c1
605,480
import inspect def get_func_name() -> str: """Return calling function name.""" func_name = inspect.currentframe().f_back.f_code.co_name # type: ignore return func_name
a48175ee7919e8744e39ebb196bce9827d8814ad
238,415
def subset_BEA_Use(df, attr): """ Function to modify loaded BEA table based on data in the FBA method yaml :param df: df, flowbyactivity format :param attr: dictionary, attribute data from method yaml for activity set :return: modified BEA dataframe """ commodity = attr['clean_parameter'] df = df.loc[df['ActivityProducedBy'] == commodity] # set column to None to enable generalizing activity column later df.loc[:, 'ActivityProducedBy'] = None return df
c3fbb7fc07e0bac81501f57c4f557b360601e3fb
620,564
def getShapes(node): """Returns the shape of the dagNode Arguments: node (dagNode): The input node to search the shape Returns: list: The shapes of the node """ return node.listRelatives(shapes=True)
290710ebfbc43830e847a441cb59beedf4aa7989
593,119
def cycle_length(k: int) -> int: """ Computes the repeated cycle length of the decimal expansion of 1/k. e.g. 1/6 = 0.1(6) -> 1 1/7 = 0.(142857) -> 6 For k not equal to a multiple of 2 or 5, 1/k has a cycle of d digits if 10^d == 1 mod k = 0 """ while k % 2 == 0: k //= 2 # remove factors of 2 while k % 5 == 0: k //= 5 # remove factors of 5 if k == 1: return 0 # this is not a repeating decimal d = 1 x = 10 % k while x != 1: x = (x*10) % k d += 1 return d
f4516683928174fa1e730074c40892e7a56ac0e4
57,694
def to_time_weighted(G, speed): """ Convert from distance weighted network to time weighted network. Parameters ---------- :param G: networkx.MultiDiGraph transport network with length attribute. :param speed: float speed in Km/h. Parameters ---------- :return: networkx.MultiDiGraph time-weighted network """ for u, v, key, data in G.edges(data='length', keys=True): ujt = 1 / (speed * 16.666666667) # turn km/h to min/meter G[u][v][key]['weight'] = data * ujt return G
f72f5284e528f1d103adf008c3c202029bc0d0ff
150,173
def subtree_has_tag(block, tagname): """ Check if the doctree node contains a particular tag in its subtree. Args: block: A block that has to be checked tagname: The searched tag Returns: True if the subtree contains a node with the tagname or False otherwise. """ for node in block.traverse(include_self=False): if node.tagname.strip() == tagname: return True return False
d6fcf7c9315bfa88ed8d81afd4b0d48506a9816c
448,506
def in_or_none(x, L): """Check if item is in list of list is None.""" return (L is None) or (x in L)
bc3e4ef5a8daf7669e7430940e361d4c7ec1a240
11,825
def remove_index(a, index): """Remove element at index of a sequence and return it as a list""" a = list(a) a.pop(index) return a
a8987c36b18592852f9ec8223bedd293a265e95b
234,039
def question2(a): """ Given a string a, find the longest palindromic substring contained in a. :param a: initial string :return: longest palindromic substring in a """ if a == None: return None elif len(a) < 2: return a biggest = (0, "") for num in range(len(a)): for iter in range(num + 1, len(a)): check = a[num:iter] if len(check) > biggest[0] and check == check[::-1]: biggest = (len(check), check) return biggest[1]
f8ff81316f5249733592cd03291828998a1ae6b0
438,297
def _SplitLineIntoRegions(line, uncovered_blocks): """Returns a list of regions for a line of code. The structure of the output is as follows: [ { 'covered': True/False # Whether this region is actually covered. 'text': string # The source text for this region. } ] The regions in the output list are in the order they appear in the line. For example, the following loop reconstructs the entire line: text = '' for region in _SplitLineIntoRegions(line, uncovered_blocks): text += region['text'] assert text == line """ if not uncovered_blocks: return [{'is_covered': True, 'text': line}] regions = [] region_start = 0 for block in uncovered_blocks: # Change from 1-indexing to 0-indexing first = block['first'] - 1 last = block['last'] if last < 0: last = len(line) else: last -= 1 # Generate the covered region that precedes this uncovered region. preceding_text = line[region_start:first] if preceding_text: regions.append({'is_covered': True, 'text': preceding_text}) regions.append({ 'is_covered': False, # `last` is inclusive 'text': line[first:last + 1] }) region_start = last + 1 # If there is any text left on the line, it must be covered. If it were # uncovered, it would have been part of the final entry in uncovered_blocks. remaining_text = line[region_start:] if remaining_text: regions.append({'is_covered': True, 'text': remaining_text}) return regions
03cd4443595af38d102b5cb015dbde7251059d5e
299,181
import re def camelcase_to_snakecase(value: str) -> str: """ Convert a string from snake_case to camelCase. >>> camelcase_to_snakecase('') '' >>> camelcase_to_snakecase('foo') 'foo' >>> camelcase_to_snakecase('fooBarBaz') 'foo_bar_baz' >>> camelcase_to_snakecase('foo_bar_baz') 'foo_bar_baz' >>> camelcase_to_snakecase('_fooBarBaz') '_foo_bar_baz' >>> camelcase_to_snakecase('__fooBarBaz_') '__foo_bar_baz_' """ value = re.sub(r"[\-\.\s]", "_", value) if not value: return value return value[0].lower() + re.sub( r"[A-Z]", lambda matched: "_" + matched.group(0).lower(), value[1:] )
05fe02739e8152bc64ab35bd842162b5d7c3ab4c
704,075
def make_row_dict(row): """ Takes in a DataFrame row (Series), and return a dictionary with the row's index as key, and the row's values as values. {col1_name: col1_value, col2_name: col2_value} """ ind = row[row.notnull()].index values = row[row.notnull()].values # to transformation with extract_col_name here??? return dict(list(zip(ind, values)))
e5eb63e5c56466cfc27756782bf9842a30801110
469,337
import requests def get_content_from_url(url): """Get the content of the page from the URL as a string""" r = requests.get(url) return r.text
c8f3f65490d4ae32b03b0567882f792a18db51dd
596,265
def _clean_values(values): """ Clean values to the state that can be used in Sheets API :type values: list :param values: Row values to clean :rtype: list :return: Cleaned values, in the same order as given in function argument """ return [value if value is not None else '' for value in values]
64556e79cfe019189b4df303bc26cd25a8980b14
94,633
def bindVarCompare(a, b): """ _bindVarCompare_ Bind variables are represented as a tuple with the first element being the variable name and the second being it's position in the query. We sort on the position in the query. """ if a[1] > b[1]: return 1 elif a[1] == b[1]: return 0 else: return -1
3a5534115c4501e532bf5df3f93d2bc18ab6958d
149,129
import torch def pdist(sample_1, sample_2, eps=1e-5): """Compute the matrix of all squared pairwise distances. Code adapted from the torch-two-sample library (added batching). You can find the original implementation of this function here: https://github.com/josipd/torch-two-sample/blob/master/torch_two_sample/util.py Arguments --------- sample_1 : torch.Tensor or Variable The first sample, should be of shape ``(batch_size, n_1, d)``. sample_2 : torch.Tensor or Variable The second sample, should be of shape ``(batch_size, n_2, d)``. norm : float The l_p norm to be used. batched : bool whether data is batched Returns ------- torch.Tensor or Variable Matrix of shape (batch_size, n_1, n_2). The [i, j]-th entry is equal to ``|| sample_1[i, :] - sample_2[j, :] ||_p``.""" if len(sample_1.shape) == 2: sample_1, sample_2 = sample_1.unsqueeze(0), sample_2.unsqueeze(0) B, n_1, n_2 = sample_1.size(0), sample_1.size(1), sample_2.size(1) norms_1 = torch.sum(sample_1 ** 2, dim=-1, keepdim=True) norms_2 = torch.sum(sample_2 ** 2, dim=-1, keepdim=True) norms = (norms_1.expand(B, n_1, n_2) + norms_2.transpose(1, 2).expand(B, n_1, n_2)) distances_squared = norms - 2 * sample_1.matmul(sample_2.transpose(1, 2)) return torch.sqrt(eps + torch.abs(distances_squared)).squeeze()
0e4eba793d19963e3afab3452c7ce981f8eccf57
359,602
def HeadLossCurcuit(List_Head): """ Head Loss Circuit is a function which calculates the head loss around a closed ventilation circuit. Accepts input of a list (Junctions From-To) and Head Losses, in Water A closed-circuit head loss is calculate and returned as a percentage (%) Returns a Tuple of (Head Loss Error, Error Percentage) """ HeadLossVal = 0 #Set Initial Head Loss to 0 TotalHeadLoss = min(List_Head) #Total Head Loss Determined by Lowest Press. Measurement, Error Percentage (%) for HeadLoss in List_Head: HeadLossVal += HeadLoss #All Values are summed to determine closure error of circuit #print(TotalHeadLoss) percentage_error = round(abs(HeadLossVal)/abs(TotalHeadLoss)*100, 2) print("Error Percentage of Head Loss Circuit:", percentage_error) return (round(HeadLossVal, 3), percentage_error)
47d0ab51ab1204579985dfe5fb2f1010ee6e6111
132,931
def get_freejoint(element): """Retrieves the free joint of a body. Returns `None` if there isn't one.""" if element.tag != 'body': return None elif hasattr(element, 'freejoint') and element.freejoint is not None: return element.freejoint else: joints = element.find_all('joint', immediate_children_only=True) for joint in joints: if joint.type == 'free': return joint return None
bcd0980b28f6e2f5b45b854912f9e0916ee732b4
299,172
def get_null_term_str(data, encoding): """ extract null terminated string from data :param data: data buffer :param encoding: encoding used to convert bytes to str :return: string """ # temp = data[:] if 0 in data: temp = bytes(data[:data.index(0)]) else: temp = bytes(data) return temp.decode(encoding)
8c660261209419d284f36d1880e8f6f556b6d757
179,344
import glob def matching_pathnames(paths): """Get list of matching pathnames for the given list of glob patterns.""" results = [] for p in paths: results.extend(glob.glob(p, recursive=True)) return results
da83979d49304a37aa093b4240a7dfd52bdecaf7
547,552
def generate_bins() -> list: """ Generate color bins. :return: List of bins """ h_bins = [(x / 10.0, (x + 1) / 10.0) for x in range(0, 10)] h_bins[-1] = (h_bins[-1][0], 1.1) s_bins = [(0.0, 0.333), (0.333, 0.666), (0.666, 1.1)] l_bins = [(0.0, 0.333), (0.333, 0.666), (0.666, 1.1)] bins = [] for h_bin in h_bins: for s_bin in s_bins: for l_bin in l_bins: bins.append((h_bin, s_bin, l_bin)) return bins
fa207596bc915f83145964d6a07b5140fb021e5d
680,128
import itertools import operator def invert_otm_mapping(mapping): """Inverts a one-to-many mapping.""" # Create a list of inverted (v, k) tuples. tuples = (itertools.zip_longest(v, [k], fillvalue=k) for k, v in mapping.items() if len(v) > 0) # yapf: disable tuples = itertools.chain.from_iterable(tuples) # Sort tuples by first element. id_attr = operator.itemgetter(0) sorted_tuples = sorted(tuples, key=id_attr) # Create inverted dictionary using groupby. inverted = {k: set(list(zip(*grp))[1]) for k, grp in itertools.groupby( sorted_tuples, key=id_attr)} return inverted
e4d3d0449f7b7e87ff00447228653788805a5a6d
588,316
def get_pgstattuple_schema_name(curs): """ Getting the schema where the pgstattuple extension is installed """ query = """ select n.nspname::text, e.extversion::numeric from pg_catalog.pg_extension e join pg_catalog.pg_namespace as n on n.oid = e.extnamespace where e.extname = 'pgstattuple' """ curs.execute(query) try: r = curs.fetchone() return [r[0], r[1]] except: return [None, None]
4223ae2fcdbb25314b7c523d78c95b3e510de1cf
557,358
def get_missing_mlflow_experiment_tracker_error() -> ValueError: """Returns a detailed error that describes how to add an MLflow experiment tracker component to your stack.""" return ValueError( "The active stack needs to have a MLflow experiment tracker " "component registered to be able to track experiments using " "MLflow. You can create a new stack with a MLflow experiment " "tracker component or update your existing stack to add this " "component, e.g.:\n\n" " 'zenml experiment-tracker register mlflow_tracker " "--type=mlflow'\n" " 'zenml stack register stack-name -e mlflow_tracker ...'\n" )
572fe73e420480566066ed97e71a6acc13304c19
168,991
import collections def _compile_job_list(job_iterator): """ Make one unified list of jobs from a job iterator. Identify jobs by their `description` or index in the `job_iterator`. Keep the last one in the `job_iterator` for each job identifier. Remove jobs missing the required fields. """ jobs_dict = collections.OrderedDict() for i, job in enumerate(job_iterator): job_id = job.get('description', i) if 'url' in job and 'schedule' in job: jobs_dict[job_id] = job elif job_id in jobs_dict: del jobs_dict[job_id] return list(jobs_dict.values())
f41383b913164dfdde898b7c23eb0f561beb9496
429,081
def test_cache_memoize_arg_normalization(cache): """ Test taht cache.memoize() normalizes argument ordering for positional and keyword arguments. """ @cache.memoize(typed=True) def func(a, b, c, d, **kargs): return (a, b, c, d) for args, kargs in ( ((1, 2, 3, 4), {"e": 5}), ((1, 2, 3), {"d": 4, "e": 5}), ((1, 2), {"c": 3, "d": 4, "e": 5}), ((1,), {"b": 2, "c": 3, "d": 4, "e": 5}), ((), {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), ((), {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), ): func(*args, **kargs) assert len(cache) == 1
170069c8af7f9debe94029018f8df8bdaf31c01d
647,313
def trim(inp_str, symbol): """ Trim a string up to a symbol Parameters ---------- inp_str : string input string symbol : string symbol to split on Returns ------- out : string trimmed output """ if isinstance(inp_str, str) and isinstance(symbol, str): out = symbol.join(inp_str.split(symbol)[:-1]) else: raise TypeError('Trim function requires strings as input!') return out
b884937fb36a330d08cd445150f48759382b357b
269,009
def map_type_list_field(old_type): """ This function maps the list type into individual field type which can contain the individual values of the list. Mappings - list:reference <table> --> refererence <table> - list:integer --> integer - list:string --> string """ if (old_type == "list:integer"): return "integer" elif old_type.startswith("list:reference"): return old_type.strip("list:") elif old_type == "list:string": return "string"
496e238c48ed72d0713b4b17a89b9133e3cef8e0
113,579
import yaml def _yaml_to_dict(yaml_string): """ Converts a yaml string to dictionary Args: yaml_string: String containing YAML Returns: Dictionary containing the same object """ return yaml.safe_load(yaml_string)
c7de0c860028d17302cd4d07e20c3215503b977b
4,444
def _stacktrace_beginning(stacktrace, size): """ Gets the first `size` bytes of the stacktrace """ if len(stacktrace) <= size: return stacktrace return stacktrace[:size]
cfebf615dd129b89da77e1843437788659043a52
505,869
def transform_df_to_numbers(df): """ Subsitutes difficulties with numbers to work with them in a better way, from 1 to 3 :param df: Dataframe to transform to numbers to :return transformed datafarme """ mapping = {'LOW': 1, 'MEDIUM': 2, 'HIGH': 3, 'undef': -1} df = df.replace({'physDifficulty': mapping, 'psyStress': mapping, 'psyDifficulty': mapping}) for col in ['physDifficulty', 'psyStress', 'psyDifficulty']: df[col] = df[col].astype('int64') return df
c59e3aecb35b49b9a56681c31835872d8eab7317
509,857
def _translate_attachment_summary_view(_context, vol): """Maps keys for attachment summary view.""" d = {} storage_pool_id = vol['id'] # NOTE(justinsb): We use the storage_pool id as the id of the attachment object d['id'] = storage_pool_id d['storage_pool_id'] = storage_pool_id d['server_id'] = vol['instance_uuid'] if vol.get('mountpoint'): d['device'] = vol['mountpoint'] return d
b5ed8754d544dd16cc325c7cc15b74c97c59e64a
612,666
import typing def hour_to_day(hour: int) -> typing.Tuple[int, int]: """Converts from a simulation hour to the pair (day, hour).""" day = int(hour // 24) hour = int(hour % 24) return day, hour
138bcf18020cf0c20c4d90303ffdaf9e518a7bfe
256,475
import hashlib def md5(data: str): """ generate md5 hash of utf-8 encoded string. """ return hashlib.md5(data.encode("utf-8")).hexdigest()
3a7274c72228247e71b0e4668b42328506040d3c
139,921
def tuple_to_string(tuple_in: tuple) -> str: """Converts an RGB tuple to a latex-xcolor compatible string""" return ", ".join([f"{val:6.3f}" for val in tuple_in])
adec820901c4c3fb86dc74e2a404f2bc03f5d7f3
343,036
def interpret_numbers(user_range): """ :param user_range: A string specifying a range of numbers. Eg. interpret_numbers('4-6')==[4,5,6] interpret_numbers('4,6')==[4,6] interpret_numbers('4,6-9')==[4,6,7,8,9] :return: A list of integers, or None if the input is not numberic """ if all(d in '0123456789-,' for d in user_range): numbers_and_ranges = user_range.split(',') numbers = [n for lst in [[int(s)] if '-' not in s else range(int(s[:s.index('-')]), int(s[s.index('-')+1:])+1) for s in numbers_and_ranges] for n in lst] return numbers else: return None
dc3a156bdb392e8a54edf95fc4182dfd5965010a
25,716
def one(s): """Get one element of a set""" return next(iter(s))
45ce1607e5d4b6bf2fc53cfc2da5602ccdc83910
657,591
def eval_multiple(exprs,**kwargs): """Given a list of expressions, and keyword arguments that set variable values, returns a list of the evaluations of the expressions. This can leverage common subexpressions in exprs to speed up running times compared to multiple eval() calls. """ for e in exprs: e._clear_eval_cache() return [e._eval([],kwargs) for e in exprs]
2bc90dacb972d3315168638a4ea99f9cfbb13830
702,136
def kinase_families(klifs_metadata, kinase_group=None): """ Get all kinase families for a kinase group. Parameters ---------- kinase_group : None or str Kinase group name (default is None, i.e. all kinase groups are selected). Returns ------- list of str Kinase family names. """ if kinase_group: klifs_metadata = klifs_metadata[klifs_metadata.group == kinase_group] kinase_families = klifs_metadata.family.unique().tolist() return kinase_families
483c050c3f27dcd20c0ad90197c501b8fa2c64ce
215,343
def get_isotopic_abundance_product(components): """ Estimates the abundance of a molecule based on the abundance of the isotopic components. Returns ------- :class:`float` Notes ------ This is essentially a simplistic activity model. Isotopic abundances from periodictable are in %, and are hence divded by 100 here. """ abund = 1.0 for iso in components: abund *= iso.abundance / 100.0 return abund
554db46ae3ba43fab780e7c6458efa79d1927697
119,425
def flatten_dict(data, parent_name="", sep=".", key_converter=None, skip_key_check=None): """ Flattens a dictionary to a single layer with child keys separated by `sep` charactor Example: input: parent_name = "root" data = { "parent_obj": { "child_obj": { "grand_child_obj": "my value" } }, "foo": "bar" } output: { "root.parent_obj.child_obj.grand_child_obj": "my value", "root.foo": "bar" } """ if not skip_key_check: skip_key_check = lambda *_: False flattened = {} for key, val in data.items(): child_name = key if not parent_name else f"{parent_name}{sep}{key}" if isinstance(val, dict) and not skip_key_check(child_name): flattened.update( flatten_dict( val, parent_name=child_name, sep=sep, key_converter=key_converter, skip_key_check=skip_key_check ) ) else: if key_converter: child_name = key_converter(child_name) flattened[child_name] = val return flattened
4d171e842d04a94f5610d36798519450f9bd674b
562,309
def make_message(options): """Returns formatted string describing `options` dict.""" return 'Options: ' + ', '.join('%s=%s' % (k, options[k]) for k in options)
a778ad4eeef6d0c7cc48160e21a91811077626ec
219,392
def refix(val, p_in="", p_out=""): """ Convert between different SI unit prefixes. Available options are: :code:`'T'` Terra :code:`'G'` Giga :code:`'M'` Mega :code:`'k'` Kilo :code:`'m'` Milli :code:`'mu'` Micro :code:`'n'` Nano :code:`'p'` Pico Parameters ---------- val: scalar The value for which to convert the unit prefix. p_in: string, any of the above, optional The current prefix of :code:`val`. If :code:`p_in` is undefined, :code:`val` has no SI unit prefix. p_out: string, any of the above, optional The prefix of :code:`val_refix` after the conversion. If :code:`p_in` is undefined, :code:`val_refix` has no SI unit prefix. Returns ------- val_refix: scalar The value in units of prefix :code:`p_out`. """ prefix = { 'p': 10 ** -12, 'n': 10 ** -9, 'mu': 10 ** -6, 'm': 10 ** -3, '': 10 ** -0, 'k': 10 ** 3, 'M': 10 ** 6, 'G': 10 ** 9, 'T': 10 ** 12 } val_refix = val * prefix[p_in] / prefix[p_out] return val_refix
2667ee7ef0df622bbb81285cbbb0a3f67be2d01d
442,903
def write_outfile(lines_list, filename, writestyle = "x"): """ ================================================================================================= write_outfile(lines_list, filename, writestyle) This function is meant to take a list of strings and a filename, then write the strings to the file. ================================================================================================= Arguments: lines_list -> A list of strings, which will become the liens of the file filename -> A string containing the name of the output file, and the path if necessary writestyle -> A string determining how the file should be written. Look at the python function 'open()' for more details. ================================================================================================= Returns: None ================================================================================================= """ # This is a basic scheme for writing files. Open the file with writestyle, # use the writelines method of files to write the lines, and clsoe the file. with open(filename, writestyle) as f: f.writelines(lines_list) f.close() return None
7c4a113985de4ae85c67f3e1676b7bc55306869f
423,787
def skipfile(filename): """Read a skip file, return a list of integers.""" with open(filename) as f: data = f.readlines() return map(int, map(str.strip, data))
a51dad2f3ad7f2322f8ad8095eb32b4560f5814d
542,890
def is_collection(v): """ Decide if a variable contains multiple values and therefore can be iterated, discarding strings (single strings can also be iterated, but shouldn't qualify) """ # The 2nd clause is superfluous in Python 2, but (maybe) not in Python 3 # Therefore we use 'str' instead of 'basestring' return hasattr(v,'__iter__') and not isinstance(v,str)
e51ee293566e0be9f7143524abb055da0e35671e
24,170
def is_method_of(method, obj): """Return True if *method* is a method of *obj*. *method* should be a method on a class instance; *obj* should be an instance of a class. """ # Check for both 'im_self' (Python < 3.0) and '__self__' (Python >= 3.0). cls = obj.__class__ mainObj = getattr(method, "im_self", getattr(method, "__self__", None)) return isinstance(mainObj, cls)
554ab48effb7ce996846192786ce2141abf671a4
699,838
import click def print_result(result): """ Print successful result to the terminal. """ return click.echo(result)
fb34e935ee4f501136d4837d41c8accc4d9b0c91
608,031
def parse_command(cmd_str): """ # the line has one word for the command and n pairs that go to key, value (separator is space) :param cmd_str: string with name of command and pairs of params and values :return: cmd : str (name of the command) cmd_par: dictionary {par_name: str(par_value)} with the parameters for the command """ split_cmd = cmd_str.split(' ') assert (len(split_cmd) % 2) cmd_par = {split_cmd[i]: split_cmd[i + 1] for i in range(1, len(split_cmd), 2)} cmd = split_cmd[0] return cmd, cmd_par
ac48d05bcd88c7eb5e04cedeb26c5d5278bbc3bd
26,382
import json def load_json_from_string(string): """Load schema from JSON string""" try: json_data = json.loads(string) except ValueError as e: raise ValueError('Given string is not valid JSON: {}'.format(e)) else: return json_data
66f96373a8e02bf69289e5e4594ac319906475f5
5,839
def check_read(read): """ Helper function to decide what reads should be keep when parsing alignment file with `pysam`. Parameters ---------- read : AlignedSegment read from alignment file parsed with `pysam`. Returns ------- bool True/False if read should be included """ # Exclude Quality Failures if read.is_qcfail: return False # Exclude Secondary Mappings if read.is_secondary: return False # Exclude Unmapped Reads if read.is_unmapped: return False else: return True
d8cdec3eae4b3c85831b82c4f89eca1896bfe28b
447,458
def create_headers_for_request(token): """Create a header dict to be passed to the api. :param token: token string coming from the api :return: a dict containing all the headers for a request """ return { 'X-Auth-Token': token, 'Content-Type': 'application/json', 'Accept': 'application/json' }
c52e560125b00195d9811b00579281a75ecd8edf
217,514
def coord2num(i,j,N): """ return number given coordinate start indices at 0 """ return (i+1)+N*j
efa489c8b72b2b00bcc92f92d8dd59b1fff3cd71
239,478
def _normalize_sizes(sizes): """ Checks whether all the sizes are either slices or not. Transforms slices into their sizes. """ out = [] ns = 0 for size in sizes: if isinstance(size, slice): size = size.stop - size.start ns += 1 else: size = int(size) out.append(size) if ns: if ns != len(sizes): raise ValueError('cannot mix sizes with slices! (%s)' % (sizes,)) is_slice = True else: is_slice = False return out, is_slice
0ad4d2ffec0e80e435ec59226c815a5b1737de14
338,760
def readLinesFromFile(filename): """ Returns the read file as a list of strings. Each element of the list represents a line. On error it returns None. """ try: with open(filename, "r") as input_file: lines = input_file.readlines() return lines except EnvironmentError: return None
e80d17cbb1039fd8f87da5bedee28c470ba697f6
379,962
import random def choose(population, weights=None, k=None): """ Chooses k times from the given population with an optional weighted probability. :param population: the population to chose from :param weights: the weights attached to each population element :param k: the amount of times to chose :return: an element of the list if k = None, or a k-sized list of choices """ choice = random.choices(population, weights=weights, k=k or 1) return choice if k else choice[0]
4a78dde05dba4f9774ae64f0b85bd89e61204b89
32,477
def _has_attr(node, attr): """ Given a ``node`` and an ``attr``, check if the ``attr`` exists on the ``node``. :param node: Node :type node: :class:`~nuke.Node` :param attr: UUID attribute :type attr: str """ if attr in node.knobs(): return True return False
2e750ceb795c1e4803dddb43cd3a18f0150a25fb
545,737
def number_to_digits(num): """Return sorted list of digits in number.""" return sorted(int(ch) for ch in str(num))
cc09154db0f587da5042bc06236e793411a0c5ab
634,048
from typing import Union from pathlib import Path from typing import Optional from typing import List def find_wav_files(path_to_dir: Union[Path, str]) -> Optional[List[Path]]: """Find all wav files in the directory and its subtree. Args: path_to_dir: Path top directory. Returns: List containing Path objects or None (nothing found). """ paths = list(sorted(Path(path_to_dir).glob("**/*.wav"))) if len(paths) == 0: return None return paths
3a780eb6fffd750a1aa7425578b98f61e38ffb6c
522,219
import six import networkx as nx def nx_ascii_tree(graph, key=None): """ Creates an printable ascii representation of a directed tree / forest. Args: graph (nx.DiGraph): each node has at most one parent ( i.e. graph must be a directed forest) key (str): if specified, uses this node attribute as a label instead of the id References: https://stackoverflow.com/questions/32151776/visualize-tree-in-bash-like-the-output-of-unix-tree Example: >>> import networkx as nx >>> graph = nx.dfs_tree(nx.balanced_tree(2, 2), 0) >>> text = nx_ascii_tree(graph) >>> print(text) └── 0 β”œβ”€β”€ 1 β”‚ β”œβ”€β”€ 3 β”‚ └── 4 └── 2 β”œβ”€β”€ 5 └── 6 """ branch = 'β”œβ”€' pipe = 'β”‚' end = '└─' dash = '─' assert nx.is_forest(graph) assert nx.is_directed(graph) lines = [] def _draw_tree_nx(graph, node, level, last=False, sup=[]): def update(left, i): if i < len(left): left[i] = ' ' return left initial = ['{} '.format(pipe)] * level parts = six.moves.reduce(update, sup, initial) prefix = ''.join(parts) if key is None: node_label = str(node) else: node_label = str(graph.nodes[node]['label']) suffix = '{} '.format(dash) + node_label if last: line = prefix + end + suffix else: line = prefix + branch + suffix lines.append(line) children = list(graph.succ[node]) if children: level += 1 for node in children[:-1]: _draw_tree_nx(graph, node, level, sup=sup) _draw_tree_nx(graph, children[-1], level, True, [level] + sup) def draw_tree(graph): source_nodes = [n for n in graph.nodes if graph.in_degree[n] == 0] if source_nodes: level = 0 for node in source_nodes[:-1]: _draw_tree_nx(graph, node, level, last=False, sup=[]) _draw_tree_nx(graph, source_nodes[-1], level, last=True, sup=[0]) draw_tree(graph) text = '\n'.join(lines) return text
c0e8146f13216714d873e6cab91e0e57ad6e57dd
529,672
import random def random_bits(n: int) -> str: """ Returns a binary string with `n` bits. Args: n (int): Number of bits. Returns: bits (str): Binary string of random bits. """ return f"0b{''.join([str(random.randint(0, 1)) for _ in range(n)])}"
344b7b66c534cb28cb17927646f82872b258edc3
526,563
def sextractor_output(fname): """SExtractor detection FITS table name from FITS image name""" return fname.replace(".fits",".fits.stars")
5001f163a64531a7c005f204878ae58c37c27595
87,718
def n_coeffs_from_ell_max(ell_max): """Returns the number of coefficients for an SWSFT with max degree ell_max.""" return (ell_max + 1)**2
2c173b7f0c2365ddde5136fa3d3984468e0339bf
667,831
def know(possible_dates): """A person knows the birthdate if they have exactly one possible date.""" return len(possible_dates) == 1
53c445b1579b9a6ea72a0428a7b1c6b820e34e8c
575,826
def setattr_validate_property(traito, traitd, obj, name, value): """Validates then assigns a value to a specified property trait attribute""" validated = traitd.c_attrs.validate(traitd, obj, name, value) result = traitd.c_attrs.post_setattr(traito, traitd, obj, name, validated) return result
bfed5c7f6ce6e02c25c297d9abf2c107e98f501b
129,198
def copy_vals(params): """Save the values and stderrs of params in a temporary dict.""" tmp_params = {} for para_key in params: tmp_params[para_key] = (params[para_key].value, params[para_key].stderr) return tmp_params
9b4cd635d861840046584fb3feb3f060a0a39030
528,642
import math def sizeof_fmt(size, suffix='B'): """ Return a human-readable string representation of a filesize Arguments: size -- size in bytes """ try: size = int(size) except ValueError: return None if size <= 0: return '0 %s' % suffix size_name = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') i = int(math.floor(math.log(size, 1024))) if i >= len(size_name): i = len(size_name) - 1 p = math.pow(1024, i) s = size / p # round to 3 significant digits s = round(s, 2 - int(math.floor(math.log10(s)))) if s.is_integer(): s = int(s) if s > 0: return '%s %s%s' % (s, size_name[i], suffix) else: return '0 %s' % suffix
ba8da544c29296e55aca30787026cb519bdbe75e
575,739
def basic_sequence(piece_dict): """ Create a basic sequence with the given pieces. Keyword arguments: piece_dict -- The pieces dictionary with the quantity of each piece. """ sequence = [] # Iterate over the pieces original dict for piece, number in piece_dict.items(): if number < 1: continue times = (("%s " % piece)*number) sequence.extend(times.split()) return sequence
df95990188b8b24275bd0608645c7efcfa0aa8f9
588,118
from typing import List def max_crossing_sum(lst: List[int], mid: int, n: int) -> int: """ Parameter <mid> is the floor middle index of <lst>. Parameter <n> is the length of the input list <lst>. Pre: <lst> is a list of integers and len(lst) >= 2. Post: returns the maximum contiguous crossing sum starting from the middle of <lst>. >>> max_crossing_sum([2, -5, 8, -6, 10, -2], 3, 6) 12 """ left_sum, right_sum, total = 0, 0, 0 # initialize values # max sum of the left half k = mid - 1 i = 0 while i < mid: total += lst[k - i] i += 1 if total > left_sum: left_sum = total # # max sum the left half # for i in range(mid - 1, -1, -1): # iterate from index mid - 1...0 backward # total += lst[i] # if total > left_sum: # left_sum = total total = 0 # max sum the right half for i in range(mid, n): # iterate from index mid...n - 1 total += lst[i] if total > right_sum: right_sum = total # note: left_sum and right_sum are each at least zero return left_sum + right_sum
3d873907cb7ed0c14152ec3c2e92a742bd52aa85
1,820
def sortable(obj): """Returns True if *obj* is sortable else returns False.""" try: sorted([obj, obj]) return True except TypeError: return False
f6df95acb95a15ab57d19c275e0a0e3fb2479ede
192,645
def rounder(x, ndigits): """Round a number, or sequence of numbers, to a specified number of decimal digits Args: x (None, float, complex, list): The number or sequence of numbers to be rounded. If the argument is None, then None will be returned. ndigits (int): The number of decimal digits to retain. Returns: None, float, complex, list: Returns the number, or sequence of numbers, with the requested number of decimal digits. If 'None', no rounding is done, and the function returns the original value. """ if ndigits is None: return x elif x is None: return None elif isinstance(x, complex): return complex(round(x.real, ndigits), round(x.imag, ndigits)) elif isinstance(x, float): return round(x, ndigits) elif hasattr(x, '__iter__'): return [rounder(v, ndigits) for v in x] return x
17d02f934e0ac62070010a3524885b5cb25afb36
619,758
def build_connections(movies, participants, role, threshold): """ Build connections between movies and participants (cast and directors). The `role` filed in the `attributes` will be constructed with corresponding pointers, and the nodes that are not connected to any movie will be removed. Parameters ---------- movies : dict A dict of movies, with movie id as keys. participants : dict A dict of participants, with subject id as keys. role : str New key in the attributes to save the links. threshold : int Only the participants linked with more than `threshold` number of movies will be kept. Returns ------- dict Updated `movies`. dict Updated `participants`. """ linked = set() for m in movies.values(): m.attributes[role] = {} for p in participants.values(): movie_ids = [int(m.strip()) for m in p.attributes['movie_id'].split(',')] p.attributes.pop('movie_id') if len(movie_ids) <= threshold: continue for mid in movie_ids: if mid in movies.keys(): movies[mid].attributes[role][p.id] = p p.movies[mid] = movies[mid] linked.add(p.id) to_pop = [] for p in participants.keys(): if p not in linked: to_pop.append(p) for p in to_pop: participants.pop(p) return movies, participants
601606f4c45a404c0bcbc6fcb56a7be9e7d2f495
569,325
def varchar(length): """ Factory for a character length validator of the specified length. """ length = int(length) def char_length_validator(string): """ Validate a string ensuring that it doesn't exceed a maximum length. """ if string is None: return string = str(string) if len(string) > length: raise ValueError("Value '%s' exceeds character limit " "of %i." % (string, length)) return string return char_length_validator
a0d498c26528f1f0e7156a7b2fda632db1d65682
52,064
def _listify(element): """ Ensures elements are contained in a list """ # either it is a list already, or it is None if type(element)==type([]) or element==None: return element else: return [element]
e4f5fac2af9a23e4595db4a4c01f2108beb07ef6
387,280
def pythag(x, y, find=0): """ Pythogream Theorem. cos**2 + adj**2 = hyp**2 x= Adjacent y= Opposite , find hypotneuse. :param x: :param y: :param find: :return: <float> """ if not isinstance(x, float) and not isinstance(x, int): raise ValueError("Please input only float/ interger values for x: such as 1.0") if not isinstance(y, float) and not isinstance(y, int): raise ValueError("Please input only float/ interger values for y: such as 1.0") if find == 0: ''' input is opposite and adjacent ''' return ((x**2)+(y**2))*0.5 # Or you can use math.hypot(x, y) if find == 1: ''' input is hypotenuse and opposite or adjacent ''' return abs((x**2)-(y**2))**0.5
51df31b44c08914e4c96c844af0b7007ce53c65f
383,569
def get_object(o, names ): """ Command used to get the object o.name1.name2.name3 where name1, name2, name3 are provided in `names` It is located here so that it can be pickled and sent over the wire :param o: :param names: :return: """ result = o for n in names: result = getattr(result, n) return result
5a47556e2a549962b8c32ea365d5cc6d9ad25215
259,435
def clip(x, min_val, max_val): """ Clips x between min_ and max_val. Args: x (float): The input to be clipped. min_val (float): The min value for x. max_val (float): The max value for x. Returns: float: The clipped value. """ return max(min_val, min(x, max_val))
9060f990e89eef1ce40b5b8e97bcf465c8f28ef2
460,224
def get_words_from_string(line): """ Return a list of the words in the given input string, converting each word to lower-case. Input: line (a string) Output: a list of strings (each string is a sequence of alphanumeric characters) """ word_list = [] # accumulates words in line character_list = [] # accumulates characters in word for c in line: if c.isalnum(): character_list.append(c) elif len(character_list)>0: word = "".join(character_list) word = word.lower() word_list.append(word) character_list = [] if len(character_list)>0: word = "".join(character_list) word = word.lower() word_list.append(word) return word_list
b00825a5801b7fe92c58f1caeb6f2d1e02d3a9d2
447,127
def watt_hours(watts, hours): """Multiplies watts times the hours used""" return watts * hours
38e367ef25754fa47212b2459571fa6c3e6625d2
599,111
import torch def simple_inter(box1, box2): """ Simple intersection among bounding boxes. :param box1: bounding boxes coordinates. :param box2: bounding boxes coordinates. :return: intersection among pair of bounding boxes. """ top_left_i = torch.max(box1[..., :2], box2[..., :2]) bot_right_i = torch.min(box1[..., 2:], box2[..., 2:]) sizes = torch.clamp(bot_right_i - top_left_i, min=0) return sizes[..., 0] * sizes[..., 1]
3efe0232849ee827ca9af2f3bb9dc2675ba07e33
424,157
def get_inputs( filename ): """ The input file contains rules and messages separated by a blank line. """ with open( filename, 'r') as input_file: raw_data = input_file.read().split('\n\n') rules = raw_data[0].splitlines() messages = raw_data[1].splitlines() return rules, messages
cd277aac4928413d77af6f842644f929b736a027
176,296
def remove_empty_parameters(data): """Accepts a dictionary and returns a dict with only the key, values where the values are not None.""" return {key: value for key, value in data.items() if value is not None}
4448556ffd3fe20a651986396c6b724ce6157f4f
632,716
def _intersect(rect1, rect2): """ Check whether two rectangles intersect. :param rect1, rect2: a rectangle represented with a turple(x,y,w,h,approxPoly_corner_count) :return whether the two rectangles intersect """ # check x x_intersect = False if rect1[0] <= rect2[0] and rect2[0] - rect1[0] < rect1[2]: x_intersect = True if rect2[0] <= rect1[0] and rect1[0] - rect2[0] < rect2[2]: x_intersect = True # check y y_intersect = False if rect1[1] <= rect2[1] and rect2[1] - rect1[1] < rect1[3]: y_intersect = True if rect2[1] <= rect1[1] and rect1[1] - rect2[1] < rect2[3]: y_intersect = True return x_intersect and y_intersect
24892f225ff2794e8f1f37714ba69c724dec4651
87,706
from typing import List def find(arr: List[int], key: int) -> int: """Modified version of binary search: one half of the array must be already sorted. Therefore check the sorted half to decide whether to search the left or right half. This takes O(lg n) time and O(1) space. """ lo = 0 hi = len(arr) - 1 while lo <= hi: mid = int(lo / 2 + hi / 2) if key == arr[mid]: return mid # left partition is sorted if arr[lo] <= arr[mid]: if arr[lo] <= key < arr[mid]: hi = mid - 1 else: lo = mid + 1 else: # right partition is sorted if arr[mid] < key <= arr[hi]: lo = mid + 1 else: hi = mid - 1 return -1
b4a49cc73009b27344fc403a79c26130339b5d04
644,082
def createFile(name, paramToValue, numberOfCycles=1): """Creates a dictionary storing file information""" File = {'name': name, 'paramToValue': paramToValue, 'numberOfCycles': numberOfCycles} return File
55294fb81152977ff0262c78888fb08986234a0a
658,451
def __convert_sec_to_time(seconds): """Convert sec to time format""" seconds = seconds % (24 * 3600) hour = seconds // 3600 seconds %= 3600 minutes = seconds // 60 seconds %= 60 return hour, minutes, seconds
01f859b8b622b6bb403ffa79b2a7688b39bc07be
263,677
def generate_sample_fov_tiling_entry(coord, name): """Generates a sample fov entry to put in a sample fovs list for tiling Args: coord (tuple): Defines the starting x and y point for the fov name (str): Defines the name of the fov Returns: dict: An entry to be placed in the fovs list with provided coordinate and name """ sample_fov_tiling_entry = { "scanCount": 1, "centerPointMicrons": { "x": coord[0], "y": coord[1] }, "timingChoice": 7, "frameSizePixels": { "width": 2048, "height": 2048 }, "imagingPreset": { "preset": "Normal", "aperture": "2", "displayName": "Fine", "defaults": { "timingChoice": 7 } }, "sectionId": 8201, "slideId": 5931, "name": name, "timingDescription": "1 ms" } return sample_fov_tiling_entry
c8e2e0886f9e5e67398726053a977c32b33cbcc7
287,828