content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import re def extractMigrationInformation(filename: str, direction: str): """Extract the version from a filename e.g. V1.0.3.up.sql -> V1.0.3""" match = re.search("(?P<version>[^_]*)_?(?P<description>.*)\." + direction + "\.sql", filename, re.IGNORECASE) if not match: return {'version': None, 'description': None} return {'version': match.group('version'), 'description': match.group('description')}
41983a2aa4e0b8c144db758da6cbec9d6803bd6c
177,514
def normalize(xs, axis=None, eps=1e-8): """ Normalize array along axis :param xs: np.array(), array to normalize :param axis: int, axis along which is normalized :param eps: float, offset to avoid division by zero :return: np.array(), normed array """ return (xs - xs.mean(axis=axis)) / (xs.std(axis=axis) + eps)
548b4dc7eae65ccbfabffd2869fc6f30f9bf70f4
317,305
def month_diff(d1, d2): """Return the number of months between d1 and d2, such that d2 + month_diff(d1, d2) == d1 """ diff = (12 * d1.year + d1.month) - (12 * d2.year + d2.month) return diff
3a7545049dba00da755564614c56bcb2659c0324
199,857
def _get_key_and_indices(maybe_key_with_indices): """Extracts key and indices from key in format 'key_name[index0][index1]'.""" patterns = maybe_key_with_indices.split('[') if len(patterns) == 1: return (maybe_key_with_indices, None) # For each index ensure that the brackets are closed and extract number indices = [] for split_pattern in patterns[1:]: # Remove surrounding whitespace. split_pattern = split_pattern.strip() if split_pattern[-1] != ']': raise ValueError( 'ParameterName {} has bad format. Supported format: key_name, ' 'key_name[index0], key_name[index0][index1], ...'.format( maybe_key_with_indices)) try: indices.append(int(split_pattern[:-1])) except ValueError: raise ValueError( 'Only integer indexing allowed for ParameterName. ' 'Faulty specification: {}'.format(maybe_key_with_indices)) return patterns[0], indices
ffc065c60da419b73b1283e06a69098eeac19fbb
37,070
import gzip import pickle def read_pickle(path: str) -> object: """Load a Python pickle that was compressed with Gzip. Args: path: Path to pickle Returns: Unpickled object """ with gzip.open(path, 'rb') as f: return pickle.load(f)
a4f1579a2145993a152123fe38369823e309455f
204,080
def bigram_shingler(str): """Extract a set of 2 character n-grams (character bigrams) from string""" big_set = {str[x:x+2] for x in range(0, len(str) -1) if len(str) > 1} return big_set
c82ad4b1d76b25e6e1101af9316e1bc6fd7cc408
163,085
def parse_classification(tool): """ Parses a classification from an ontology. Only radio, checklist, and text are supported for mal Args: tool (dict) Returns: dict """ if tool['type'] in ['radio', 'checklist']: return { 'tool': tool['type'], 'featureSchemaId': tool['featureSchemaId'], 'options': [r['featureSchemaId'] for r in tool['options']] } elif tool['type'] == 'text': return { 'tool': tool['type'], 'featureSchemaId': tool['featureSchemaId'] }
1c5fe385678ba375cfcfee97c56941382d151fd0
399,128
def make_context(host, port, path, store, callbacks, collection_funcs, retrieve_funcs, entry_funcs, strip_meta, ctl, queue): """Returns a context object which is passed around by visit().""" return {"host": host, "port": port, "store": store, "path": path, "queue": queue or [], "ctl": ctl, "collection_funcs": collection_funcs, "retrieve_funcs": retrieve_funcs, "entry_funcs": entry_funcs, "strip_meta": strip_meta, "callbacks": callbacks}
2b469f661d2b33eed0e50b646805039bbe112b42
471,106
def get_all_terminals_of_inner_node(phylo_tree, node_id): """ Given a phylogenetic tree and a preorder ID of an inner node, return a list of all leaf nodes of the subtree rooted at the inner node. """ # Since the node IDs are unique, the loop is only iterated once. leaf_nodes = list() for clade in phylo_tree.find_clades(name=node_id, order='preorder'): leaf_nodes += clade.get_terminals(order='postorder') return leaf_nodes
36fbc43e0472415ab2215087412b8f4bfa1def2c
428,763
def almul(x, y): """ Function to multiply 2 operands """ return(x * y)
f12e22e44d1a09c563501e58596cf8e52a93443e
520,498
import six def _GetDuration(args): """Returns a formatted duration string.""" return six.text_type(args.duration) + 's' if args.duration else None
b659eefe06bf2f02946c97470b04580b0d956f1e
398,224
def all_additional_output_names(experiment_proto): """Return all additional output names from an Experiment proto. Args: experiment_proto: selection_pb2.Experiment describing the experiment. Returns: List of strings in sorted order. """ names = [ao.name for ao in experiment_proto.additional_output] return sorted(names)
fdff817f5ec7b0a876b389a8447fc6e5b0e3dae3
582,164
def strip_profile_url(value): """strip /weblog/ from profile url""" if value.endswith('weblog/'): value = value[:-7] return value
d8e050ce8a84a2e3518d7fb226fd39db7a8d8b96
163,563
def strict_accuracy_N(act, pred, ignore_class=0): """ Computes the accuracy of an array of tagged sentences Actual values which match `ignore_class` are not factored in Inputs: - act: array of actual numerical vectors - pred: array of predicted numerical vectors - ignore_class: numerical value to be ignored Outputs: - accuracy score """ # number of correct predictions corr_preds = 0 # number of predictions total_preds = 0 # compute values via iterating over sentences for sent in zip(act, pred): act_classes = sent[0] pred_classes = sent[1] for t in range(len(act_classes)): if act_classes[t] != int(ignore_class): total_preds += 1 if pred_classes[t] == act_classes[t]: corr_preds += 1 # actual accuracy without padding return corr_preds / total_preds
18bc12f4456e9d75fcb502412b76823669fea742
140,077
import json def _write_json(file, contents): """Write a dict to a JSON file.""" with open(file, 'w') as f: return json.dump(contents, f, indent=2, sort_keys=True)
86f0809cb46d833135ac3941eef9916b23001d4e
121,790
def get_id(*, label, configuration): """Generates a unique identifier for a target. Args: label: The `Label` of the `Target`. configuration: The value returned from `get_configuration`. Returns: An opaque string that uniquely identifies the target. """ return "{} {}".format(label, configuration)
a5f86cd4465938bb9faa67a20c7501c89d381fbd
515,217
def calc_csr(sigma_veff, sigma_v, pga, rd, gwl, depth): """ Cyclic stress ratio from CPT, Eq 2.2, """ return 0.65 * (sigma_v / sigma_veff) * rd * pga
becedff4526031f5047e68a0a2d51476bf56ca9b
26,232
def collection_core_fields(item): """Extract only fields that are used to identify a record""" record = {} # Define umm umm = item.get('umm', {}) record['ShortName'] = umm.get('ShortName') record['Version'] = umm.get('Version') record['EntryTitle'] = umm.get('EntryTitle') # Define meta meta = item.get('meta', {}) record['concept-id'] = meta.get('concept-id') return {key: value for key, value in record.items() if value}
c25d21d0f6d8c7752a75cada066cfb21a732a5a1
400,292
import re def cut_iframes(value): """ Filter which cut <iframe> tags. """ pattern = re.compile(r'<p><iframe .*</iframe></p>') return re.sub(pattern, '', value)
a8ab5e1f0c1c55a225c7d041374aba041d10e6d2
634,791
import builtins def from_str_to(type_): """Return a cast function to convert a string to a builtin type. The `type` parameter is the name of the type as a string. Returns the builtin Python cast function if `type` is "str", "int", or "float". If `type` is "bool", the returned cast function will return `True` for the values "y", "yes", "true", and "1" (case insensitive), otherwise it will return `False`. For any other `type` specified, the builtin `str` function is returned. >>> f = from_str_to("str") >>> f("hello") "hello" >>> f = from_str_to("int") >>> f("10") 10 >>> f = from_str_to("float") >>> f("10") 10.0 >>> f = from_str_to("bool") >>> [f(s) for s in ['yes', 'no', 'true', 'false']] [True, False, True, False] """ if type_ in ("str", "int", "float"): return getattr(builtins, type_) if type_ == "bool": return lambda s: s.lower() in ("y", "yes", "true", "True", "1") return str
8b6624a0d759c17eb9d6fa596d9922c6c459286b
525,619
def replace_in_testbenches( testbenches, search_str, replace, include_langs=None, exclude_langs=None ): """ In a testbench, replace a string with another string. Args: testbenches (str or dict): The testbench to replace text in. This may be a string representing a single testbench or a dictionary where different testbenches are indexed by language search_str (str): String to search for replace (str-like): Converted to string and used to replace search_str include_langs (iterable, optional): Iterable of a subset of languages to perform the replacement. Defaults to None. exclude_langs (iterable, optional): Iterable of a subset of languages to exclude from replacement. Defaults to None. Returns: str or dict: The modified testbench. Same type as testbenches """ if isinstance(testbenches, str): testbenches = testbenches.replace(search_str, str(replace)) else: for lang in [*testbenches]: if include_langs and lang not in include_langs: continue if exclude_langs and lang in exclude_langs: continue if isinstance(replace, dict): if lang in replace: replace_str = str(replace[lang]).replace("$$lang", lang) else: replace_str = "" else: replace_str = str(replace).replace("$$lang", lang) testbenches[lang] = testbenches[lang].replace( search_str, replace_str ) return testbenches
3496175defb7327a166d78b77e1715fa2fb7e26d
388,410
def remove(seq1, seq2): """ remove the elements in `seq2` from `seq1` """ return tuple(elem for elem in seq1 if elem not in seq2)
49ab05fec0304aca078f419dfee0cb5fab38d5f5
640,602
import struct def Int2Bytes(integer): """Converts an Int32 into a 4 byte 'bytes' object.""" return struct.pack('i', integer)
c25ddfec574cb969c114d42476f743b548171392
488,547
import json def getTokensInfo(ds): """ Get name of the token set and number of tokens used for tokenization of problem solutions Parameters: - ds - directory with tokenized dataset Returns: - Name of token set - Number of tokens """ _info_fn = f"{ds}/info.json" with open(_info_fn, 'r') as _info_json: _info = json.load(_info_json) return _info["token_set"], _info["n_tokens"]
e1b816ba4eb5e0f62ccccbc67bb5bea1cfb56788
575,039
def stellar_params_from_archive(df, kep_name): """Get stellar parameters for the host of a KOI from exoplanet archive (downloaded data). Parameters ---------- df: pandas.DataFrame dataframe of exop. archive downloaded data kep_name: str Kepler name of planet Returns ------- smass: float Stellar mass (solar mass) srad: float Stellar radius (solar radius) limbdark_mod: str Limb darkening model ldm_c1: float Limb darkening coefficient 1 ldm_c2: float Limb darkening coefficient 2 """ smass = float(df.loc[df['kepler_name'] == kep_name].koi_smass) #stellar mass (/solar mass) smass_uerr = float(df.loc[df['kepler_name'] == kep_name].koi_smass_err1) #stellar mass upper error smass_lerr = float(df.loc[df['kepler_name'] == kep_name].koi_smass_err2) #stellar mass lower error srad = float(df.loc[df['kepler_name'] == kep_name].koi_srad) #stellar radius (/solar radius) srad_uerr = float(df.loc[df['kepler_name'] == kep_name].koi_srad_err1) #stellar radius upper error srad_lerr = float(df.loc[df['kepler_name'] == kep_name].koi_srad_err2) #stellar radius lower error limbdark_mod = str(df.loc[df['kepler_name'] == kep_name].koi_limbdark_mod) #LDM Model ldm_c2 = float(df.loc[df['kepler_name'] == kep_name].koi_ldm_coeff2) #LDM coef 2 ldm_c1 = float(df.loc[df['kepler_name'] == kep_name].koi_ldm_coeff1) #LDM coef 1 return smass, smass_uerr, smass_lerr, srad, srad_uerr, srad_lerr, limbdark_mod, ldm_c1, ldm_c2
e87759395e7dbb3d84593e6c9f42e358b0589980
343,481
def render_value(v): """Render given value to string.""" if isinstance(v, float): # ensure that we don't waste space by insignificant digits return f'{v:.2g}' return str(v)
750597882f2017aeeeca1b93c89e4e77ed1b98cf
509,463
import torch def xxyy2xywh(box): """ Convert the box (x1, y1, x2, y2) encoding format to (c_x, c_y, w, h) format Arguments: box: tensor of shape (N, 4), boxes of (x1, y1, x2, y2) format Returns: xywh_box: tensor of shape (N, 4), boxes of (c_x, c_y, w, h) format """ c_x = (box[:, 2] + box[:, 0]) / 2 c_y = (box[:, 3] + box[:, 1]) / 2 w = box[:, 2] - box[:, 0] h = box[:, 3] - box[:, 1] c_x = c_x.view(-1, 1) c_y = c_y.view(-1, 1) w = w.view(-1, 1) h = h.view(-1, 1) xywh_box = torch.cat([c_x, c_y, w, h], dim=1) return xywh_box
1d3e92c0c0636772694bd348b796a81f99d09106
357,357
def Returns1(target_bitrate, result): """Score function that returns a constant value.""" # pylint: disable=W0613 return 1.0
727e58e0d6d596cf4833ca3ca1cbcec6b9eedced
708,193
def _filter_featured_downloads(lst): """Filter out the list keeping only Featured files.""" ret = [] for item in lst: if 'Featured' in item['labels']: ret.append(item) return ret
d722fdd01966f1650575912715f8a6f07d793dda
700,475
def get_caption(etype, cell_meta, resources): """return an ipypublish caption or False captions can either be located at cell_meta.ipub.<type>.caption, or at resources.caption[cell_meta.ipub.<type>.label] the resources version is proritised """ try: caption = cell_meta["ipub"][etype]["caption"] except (KeyError, TypeError): caption = False try: label = cell_meta["ipub"][etype]["label"] except (KeyError, TypeError): label = False rcaption = False if label: try: rcaption = resources["caption"][label] except (KeyError, TypeError): pass if rcaption: return rcaption return caption
b8e08c734efb6d3f5d6204cb70930cb835800b4c
630,707
from datetime import datetime def isoformat(dt: datetime) -> str: """ISO format datetime object with max precision limited to seconds. Args: dt: datatime object to be formatted Returns: ISO 8601 formatted string """ # IMPORTANT should the format be ever changed, be sure to update TIMESTAMP_REGEX as well! return dt.isoformat(timespec="seconds")
679ce7aa71ab30e4c78a0953272c17f487714177
699,758
def change_device(arg_params, aux_params, ctx): """Changes device of given mxnet arguments Args: arg_params (dict): arguments aux_params (dict): auxiliary parameters ctx (mx.cpu or mx.gpu): new device context Returns: dicts: arguments and auxiliary parameters on new device """ new_args = dict() new_auxs = dict() for k, v in arg_params.items(): new_args[k] = v.as_in_context(ctx) for k, v in aux_params.items(): new_auxs[k] = v.as_in_context(ctx) return new_args, new_auxs
7442a4284cb7c777c5e03f2d7b35bd4fb75f9d5d
334,322
def entity_seqs_equal(expected, predicted): """ Returns true if the expected entities and predicted entities all match, returns false otherwise. Note that for entity comparison, we compare that the span, text, and type of all the entities match. Args: expected (list of core.Entity): A list of the expected entities for some query predicted (list of core.Entity): A list of the predicted entities for some query """ if len(expected) != len(predicted): return False for expected_entity, predicted_entity in zip(expected, predicted): if expected_entity.entity.type != predicted_entity.entity.type: return False if expected_entity.span != predicted_entity.span: return False if expected_entity.text != predicted_entity.text: return False return True
2060aeaeecb3e210020466437a634aa6c01914e5
89,423
def as_request_params(**kwargs): """Coerce kwargs into a tuple of param=value strings.""" return tuple('{0}={1}'.format(k, v) for k, v in kwargs.items())
19329a08281ee610220b6cfd3c6fd3daa9893bf4
482,452
def dectobin(dec_string): """Convert a decimal string to binary string""" bin_string = bin(int(dec_string)) return bin_string[2:]
5f02507ae5e7ab855eceb7a5908347060b46a400
15,111
import itertools def possible_library_filenames(library_names): """Given a collection of library names like 'libfoo', generate the set of library filenames that may be found on the system (e.g. libfoo.so). This generates the library filenames that may appear on any OS. """ lib_extensions = ['a', 'la', 'so', 'tbd', 'dylib'] return set( '.'.join((lib, extension)) for lib, extension in itertools.product(library_names, lib_extensions))
b6317246d5aa97ef7dacf0aee764de0f82953391
421,827
import hashlib def md5sum(filepath): """Calculate md5_sum Args: filepath (string): path to the file Returns: string - digest value as a string of hexadecimal digits """ md5 = hashlib.md5() with open(filepath, "rb") as f: for chunk in iter(lambda: f.read(8192), b""): md5.update(chunk) return md5.hexdigest()
d04b85448d762b56907e799ba370d5a6ccabd5db
440,749
def estimateGalaxyMass(pot_ext, r_gal, G): """ Estimate the equivalent mass of galaxy by - pot_ext*r_gal/G Parameters: ------------- pot_ext: float the external potential of the center of the particle system r_gal: float the distance between the center of the particle system to the galactic center G: float gravitational constant Return: ------------- M_galaxy: float the estimated mass of the galaxy """ M_galaxy = - pot_ext*r_gal/G if (M_galaxy<0): raise ValueError('External potential is positive! ', pot_ext) return M_galaxy
82bd568d8d77eb83a432a6b0c449fb7a6c9c29ef
135,566
import re def MigrateImports(content): """Updates import statements from TestNG to JUnit.""" content_new = re.sub('org.testng.annotations.Test', 'org.junit.Test', content) content_new = re.sub('org.testng.annotations.BeforeMethod;', 'org.junit.Before;', content_new) content_new = re.sub('org.testng.annotations.BeforeClass;', 'org.junit.BeforeClass;', content_new) content_new = re.sub( 'import org.testng.annotations.DataProvider;', '''import com.tngtech.java.junit.dataprovider.DataProvider; import com.tngtech.java.junit.dataprovider.DataProviderRunner; import com.tngtech.java.junit.dataprovider.UseDataProvider; import org.junit.runner.RunWith;''', content_new) # for remaining imports such as assertEquals content_new = re.sub('testng', 'junit', content_new) return content_new
14a9dd9882376189b1767119437fd31ce54fd26e
112,127
def flatten_multicolumns(df): """ Converts multi-index columns into single column """ df.columns = [ "_".join([el for el in col if el != ""]).strip() for col in df.columns.values if len(col) > 1 ] return df
1c8ce6d0a1139263d248139598830a03f46a9d92
295,526
def is_target_in_tags(tag, topic_list): """Check if tags contains target""" return True if set(tag).intersection(topic_list) else False
9fb62a23c26060cf18e7b638664fb1fa4343f5ba
188,039
def add_tab(title: str, htmlcode: str, comment_cell: bool = True) -> str: """Add new tab section for the report. By default adds an opinion editable box at the start. Parameters ---------- title : str Title associated with this tab / section htmlcode : str All HTML code contain within this section comment_cell : bool Comment cell Returns ------- str HTML code as string """ html_text = f'<div id="{title}" class="tabcontent"></br>' if comment_cell: html_text += """<p style="border:3px; border-style:solid; border-color:#000000; padding: 1em; width: 1050px;" contentEditable="true"> No comment. </p>""" html_text += f"{htmlcode}</div>" return html_text
4095a5e519c9ded04a67444a4b31bd5f023fb255
604,997
import re import operator def extract_words(row, params): """ Extract words as features Args row: A row of data stop_words: List of stop words case_insensitive: Use case insensitive? strip_entities: Strip entities (hashtags, user mentions, URLs)? strip_punctuation: Strip punctuations? strip_numbers: Strip numeric strings? strip_repeated_chars: Strip repeated characters? min_token_length: Minimum word token length max_features: Maximum number of feature tokens """ word_feat = dict() if row and len(row.strip()) > 0: if params["case_insensitive"]: row = row.lower() if params["strip_entities"]: row = ' '.join(re.sub("(@[A-Za-z0-9]+)|(#[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)"," ", row).split()) if params["strip_punctuation"]: row = re.sub('[^a-zA-Z0-9]', ' ', row) if params["strip_numbers"]: row = re.sub('[0-9]', ' ', row) if params["strip_repeated_chars"]: row = re.sub(r'((\w)\2{2,})', ' ', row) if params["stop_words"]: row = row.replace('_', ' ').replace('-', ' ') x = [i.lower() for i in row.split() if i.lower() not in params["stop_words"]] row = ' '.join(x) for t in [x for x in row.split() if len(x) >= params["min_token_length"]]: if 'word_' + t not in word_feat: word_feat['word_' + t] = 0 word_feat['word_' + t] += 1 if params["max_features"]: # sorts dictionary of word features in descending order of occurence word_feat = sorted(word_feat.items(), key=operator.itemgetter(1), reverse=True) # creates dictionary of features for top max_features number of features word_feat = dict(word_feat[:params["max_features"]]) return word_feat
cf31ac24e31c30467e4106ee8a78905fd46191a1
464,808
def ctcp(function=None, *command_list): """Decorate a callable to trigger on CTCP commands (mostly, ``ACTION``). :param str command_list: one or more CTCP command(s) on which to trigger .. versionadded:: 7.1 This is now ``ctcp`` instead of ``intent``, and it can be called without argument, in which case it will assume ``ACTION``. .. note:: This used to be ``@intent``, for a long dead feature in the IRCv3 spec. It is now replaced by ``@ctcp``, which can be used without arguments. In that case, Sopel will assume it should trigger on ``ACTION``. As ``sopel.module`` will be removed in Sopel 9, so will ``@intent``. """ default_commands = ('ACTION',) + command_list if function is None: return ctcp(*default_commands) # called as ``@ctcp()`` elif callable(function): # called as ``@ctcp`` or ``@ctcp(function)`` # or even ``@ctcp(function, 'ACTION', ...)`` return ctcp(*default_commands)(function) # function is not None, and it is not a callable # called as ``@ctcp('ACTION', ...)`` ctcp_commands = (function,) + command_list def add_attribute(function): function._sopel_callable = True if not hasattr(function, "intents"): function.intents = [] for name in ctcp_commands: if name not in function.intents: function.intents.append(name) return function return add_attribute
a97131ff7f1c072e65579c5031609937c5f80bd9
503,673
def same_family(first_person, second_person) -> bool: """Check whether two person have the same family or not. Args: first_person (Person): The first input person. second_person (Person: The second input person. Returns: bool: True, if the families match. Otherwise, returns False. """ if first_person.family is second_person.family: return True return False
32845caba59d6166454c129d6166f8006e306bbf
194,312
import re from typing import OrderedDict def read_avg_residuemap(infile): """ Read sequence definition from PSN avg file, returning sequence Map :param infile: File handle pointing to WORDOM avgpsn output file :return: Returns an internal.map.Map object mapping the .pdb residues to WORDOM id's from "Seq" section of the avgpsn-file """ m_start = re.compile("^\*\*\* Seq \*\*\*") m_end = re.compile("^============") m_entry = re.compile("^\s*\d+\s+.:.\d+\s+\d+\.\d+\s*$") residuemap = OrderedDict() reading = False for line in infile: if reading: # Stop reading if end of interaction strength section if m_end.search(line): break else: if m_entry.search(line): [num, resname, normfact] = line.split() residuemap[resname] = int(num) # Start reading when header found elif m_start.search(line): reading = True return residuemap
92c4cbe53edcd3d894a038d7cb9308c653e37146
3,206
def get_mers(seq, size): """ Get X-mers from seq. Example ------- >>> get_mers('MEAIKHD', 3) {'MEA', 'EAI', 'AIK', 'KHD'} """ mers = [] mersa = mers.append for i in range(len(seq) - (size - 1)): mersa(seq[i:i + size]) return set(mers)
516b9afa3ba38ff918e2025a4aed709c18711e3a
72,253
def find_first_diff_pos(str1: str, str2: str) -> int: """Finds the first difference position. Returns `-1` if both strings are identical.""" if str1 == str2: return -1 shorter, longer = sorted((str1, str2), key=len) shorter_len = len(shorter) return next( (i for i in range(shorter_len) if shorter[i] != longer[i]), shorter_len, )
a49833e3fed8f70470937beccbff6f3f46a1ba31
434,027
def is_not_loaded(a_hash): """ Do we have no intention of loading the data a_hash is supposed to contain? If a hash has a single key 'NotLoaded' that means we don't intend to load that hash and we shouldn't complain about data inconsistency involving the hash. So if we're loading senses and the sense_inventory_hash :func:`is_not_loaded` then we shouldn't drop senses for being references against lemmas that don't exist. """ return a_hash and a_hash.keys() == ['NotLoaded']
f1b5475cfb91aef449f5c3108d49e9d108605b92
486,123
def distance(coord_a, coord_b): """ Calcuate the distance between 2 coordinates. Arguments: coord_a (array): coordinate of point a coord_b (array): coordinate of point b Return: dist (float) """ assert len(coord_a) == len(coord_b) dim = len(coord_a) sum_square_dist = 0 for a, b in zip(coord_a, coord_b): sum_square_dist += (a - b)**2 dist = sum_square_dist**(1 / dim) return dist
01d7c93c50d90a8e2089a754399431de4eacf962
623,226
import requests from bs4 import BeautifulSoup def extract_url(url, values=None): """Given an url, extracts the contents as a BeautifulSoup object. Args: url: URL to parse. Returns: The site contents as a BeautifulSoup object """ if values: r = requests.post(url, data=values) else: r = requests.get(url) # Create the soup :) html_raw = r.text soup = BeautifulSoup(html_raw, 'lxml') # Here we use lxml, because the default html.parser is not working properly... return soup
56ea95796cb718634390ac595d74f637bc8385d6
653,023
import sqlite3 def robot_type_exists(cursor: sqlite3.Cursor, type: str) -> bool: """Determines whether or not the robot type exists in the db :param cursor: [description] :type cursor: sqlite3.Cursor :param type: [description] :type type: str :return: [description] :rtype: bool """ type_exists: int = cursor.execute( '''SELECT COUNT(*) FROM robot_type WHERE id = :id''', {"id": type.upper()} ).fetchone().get("COUNT(*)") return True if type_exists == 1 else False
4a7646c286ad92683e819263bed8410bff069736
56,395
import requests def fetch_words(url): """Fetch a list of string from URL. Args: url: The URL of a UTF-8 text document. Returns: A list of strings containing words from the document. """ user_agents = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36' headers = {'User-Agents':user_agents} response = requests.get(url,headers = headers) story_word = [] for line in response.text.split('\n'): line_word = line.split() for word in line_word: story_word.append(word) return story_word
72085859e550e3909f635288c75203b497b98c22
124,563
def _matches_version(actual_version, required_version): """Checks whether some version meets the requirements. All elements of the required_version need to be present in the actual_version. required_version actual_version result ----------------------------------------- 1 1.1 True 1.2 1 False 1.2 1.3 False 1 True Args: required_version: The version specified by the user. actual_version: The version detected from the CUDA installation. Returns: Whether the actual version matches the required one. """ if actual_version is None: return False # Strip spaces from the versions. actual_version = actual_version.strip() required_version = required_version.strip() return actual_version.startswith(required_version)
65c36bcb942bbdcf64a2ae42dca73bc424124151
447,637
import click import re def sanitize_user_name(ctx, param, value): """Check usernames for length and invalid characters. """ del ctx del param if len(value) > 255: raise click.BadParameter('Username length > 255.') if not re.match(r'^[a-zA-Z0-9_\+=,\.@-]+$', value): raise click.BadParameter('Invalid characters in username') return value.lower()
dd5eb63ad5162cdeaafb6d5cdc2214091f7adbd1
474,946
def create_sample_data(directory, dataset): """Creates sample datasets for testing and saves it to a two column csv. Parameters ---------- directory : str Directory to write csv file to. dataset : int The integer associated with each sample testing dataset. Returns ------- type : csv file two column csv file with wavelength (um) and flux (Jy). """ target_filename = "sample_target_" + str(dataset) + ".csv" target_file_path = str(directory.join(target_filename)) file = open(target_file_path, "w") # simple dataset if dataset == 1: file.write("3.55,0.389\n4.49,0.357\n5.73,0.344\n7.87,0.506\n23.7,0.676") # longer dataset if dataset == 2: file.write("1.2,0.4\n1.5,0.1\n1.6,0.1\n1.7,0.2\n2,0.7\n5,1.0\n40,0.676") # out of order and bad fluxes if dataset == 3: file.write("1,0.389\n4,0.357\n3,0.344\n10,0\n40,-90") # # not enough sources (failure) # if dataset == 4: # file.write("1,0.389\n4,0.357") file.close() return target_file_path
d4925a82e9802305c1dc31c4c2afe7e8c1bba1da
411,209
import hashlib def make_merkle(blob_name: str) -> str: """Creates a "merkle" by hashing the blob_name to get a unique value. """ m = hashlib.sha256() m.update(blob_name.encode("utf-8")) return m.hexdigest()
1f61b235746e8ae34fe1e0b24e6d141d99517b64
655,615
def _mpv_coax_proptype(value, proptype=str): """Intelligently coax the given python value into something that can be understood as a proptype property.""" if type(value) is bytes: return value; elif type(value) is bool: return b'yes' if value else b'no' elif proptype in (str, int, float): return str(proptype(value)).encode('utf-8') else: raise TypeError( 'Cannot coax value of type {} into property type {}'.format( type(value), proptype))
71b7a1f82219bc4933293fc4c5f2f9ba322f654c
454,447
def merge_exact_duplicates(dictionaries): """ Given an array of dictionaries, merge the dictionaries into 1 final result""" final_dict = {} # Simple case for exact duplicates for d in dictionaries: duplicate_keys = set(d).union(final_dict) for key in duplicate_keys: arr = final_dict.get(key, []) + d.get(key, []) final_dict[key] = arr # If keys can have "nearness" we need a more advanced merge return final_dict
cf82d96ce88afe1b5240fa67b0e812bd2bfed8d9
296,802
def accession(data): """ Get the accession for the given data. """ return data["mgi_marker_accession_id"]
132dcbdd0712ae30ce7929e58c4bc8cdf73aacb2
3,699
from datetime import datetime def log_date_to_python_date(s): """ Convert a log date (string) to a Python date object. Args: s (str): string representing the date ('%d/%b/%Y'). Returns: date: Python date object. """ return datetime.strptime(s, '%d/%b/%Y').date()
9a6d514b2b3044dbe1cb5f2ece6c41892665c3f8
362,865
def convert_event_type_case(event, case_force_upper=False): """ Forces upper of lower case for event types at the end of the code engine. For Snowfalke force UPPER and for Redshift force lower. :param event: A dict with the entire event :param case_force_upper: True to for upper case. :return: An event with the case altered in event_type Examples: .. code-block:: python # Example #1 event = {'_metadata': {'event_type': 'My_SCHEMA.my_table'}} event = convert_event_type_case(event) event = {'_metadata': {'event_type': 'my_schema.my_table'}} # Example #2 event = {'_metadata': {'event_type': 'My_SCHEMA.my_table'}} event = convert_event_type_case(event, case_force_upper=True) event = {'_metadata': {'event_type': 'MY_SCHEMA.MY_TABLE'}} """ if case_force_upper: if type(event) == list: for each_event in event: if 'event_type' in each_event['_metadata']: each_event['_metadata']['event_type'] = each_event['_metadata']['event_type'].upper() else: if 'event_type' in event['_metadata']: event['_metadata']['event_type'] = event['_metadata']['event_type'].upper() else: if type(event) == list: for each_event in event: if 'event_type' in each_event['_metadata']: each_event['_metadata']['event_type'] = each_event['_metadata']['event_type'].lower() else: if 'event_type' in event['_metadata']: event['_metadata']['event_type'] = event['_metadata']['event_type'].lower() return event
c8cfe77d67d64fb8ab3448047d76a1275ee3df6a
281,742
import re def fix_inline_math(f): """ In the code, I prefer to use inline math like $\int \bm x \mathrm dx$, so we have to convert it to rst inline math :math:`\int \boldsymbol x \mathrm dx` This is done by using a regex to find groups of $something$ and replace the second one with `. Afterwards, we replace the remaining $ with :math:` """ f = re.sub(r"\\bm", r"\\boldsymbol", f) f = re.sub(r"(\$[^\$]*)\$", r"\1`", f) return re.sub(r"\$", r":math:`", f)
a4c5a853cb499f2ef4adab4178f2b7bd2888fede
224,795
def _print_and_append_loss( BCE_loss_series, W_loss_series, C_loss_series, loss_in_epoch ): """Helper function to print and save loss values in each epoch Parameters ---------- BCE_loss_series : list Binary Cross-Entropy loss values W_loss_series : list Wasserstein-1 loss values in generator C_loss_series : list Wasserstein-1 loss values in critic loss_in_epoch : list Losses in the epoch of the form [BCE_Loss, W_Loss, C_Loss] Returns ------- List of losses [BCE_loss, W_loss, C_loss] """ BCE_loss_series.append(loss_in_epoch[0]) W_loss_series.append(loss_in_epoch[1]) C_loss_series.append(loss_in_epoch[2]) print(f"Generator - Binary-Cross Entropy Loss: {loss_in_epoch[0]}") print(f"Generator - Wasserstein Loss: {loss_in_epoch[1]}") print(f"Critic - Wasserstein Distance: {loss_in_epoch[2]}") return BCE_loss_series, W_loss_series, C_loss_series
e3e612de01cff66099a17eac221f5d9608cff038
297,262
import re def parse_int(to_convert:str): """Converts a string to an integer with the given base. If base is None and the string starts with a 0, automatically resolve the given base. Supported bases are: - 0x: Hexadecimal - 0o or 0: Octal The rest is interpreted as base 10. Args: to_convert (string): The string to convert to an int. base (int, optional): Assume to_convert is in this base. If none, get the base from the string. Defaults to None. Raises: ValueError: If to_convert cannot be converted. Returns: int: The converted integer. """ to_convert = to_convert.lower() # Determine which system this uses if re.match("0x.*", to_convert): base = 16 elif re.match("0b.*", to_convert): base = 2 elif re.match("0o.*", to_convert): base = 8 else: base = 10 # Try to convert the number. return int(to_convert, base)
80ec513356738f3d872d2c3df7415ec373768805
540,360
def _get_return_value_name_from_line(line_str: str) -> str: """ Get the return value name from the target line string. Parameters ---------- line_str : str Target line string. e.g., 'price : int' Returns ------- return_value_name : str Return value name. If colon character not exists in line string, a blank string will be set. """ colon_exists: bool = ':' in line_str if colon_exists: return_value_name: str = line_str.split(':')[0] else: return_value_name = '' return_value_name = return_value_name.strip() return return_value_name
463f8caeb60b57703e15d5720013533d2c0c04f1
59,763
def delta(self): """ This is the omega of the adjoint form, which is the same as the omega of the reciprocal form. EXAMPLES:: sage: Q = DiagonalQuadraticForm(ZZ, [1,1,37]) sage: Q.delta() 148 """ return self.adjoint().omega()
a835655d7ff54ef7a66338bc96f0da5062a4e286
69,831
def flatlist(nested_list): """ flattens nested list """ flat_list = [item for inner_list in nested_list for item in inner_list] return flat_list
e2d9990a3ec24225c4447bfb20ad655ab94fa726
218,156
def get_genes(exp_file, samples, threshold, max_only): """ Reads in and parses the .bed expression file. File format expected to be: Whose format is tab seperated columns with header line: CHR START STOP GENE <sample 1> <sample 2> ... <sample n> Args: exp_file (str): Name of expression file. samples (list): Names of the samples in the vcf file. threshold (float): Expression threshold to filter lowly/unexpressed genes. max_only (bool): if true, gene_dict value is 1 value = max expression if false gene_dict value is list of expression values YYY: WARNING: if want list to have meaning then values needs to be tied to header sample names Returns: gene_dict (dict): {gene_name: [expression_vals]}. Only include values for samples in the vcf. """ data_cols = [] gene_dict = {} print('start read exp_file:' + format(exp_file)) if max_only: # read and only return max exp value in gene_dict with open(exp_file) as f: header = f.readline().strip().split('\t') for samp in header[4:]: if samp in samples: data_idx = header.index(samp) data_cols.append(data_idx) # Read in expression levels for each gene. for line in f: line = line.strip().split('\t') gene_name = line[3].upper() exp_val = -1e1000 for idx in data_cols: if float(line[idx]) > exp_val: exp_val = float(line[idx]) gene_dict[gene_name] = exp_val else: # read and return exp value list in gene_dict with open(exp_file) as f: header = f.readline().strip().split('\t') for samp in header[4:]: if samp in samples: data_idx = header.index(samp) data_cols.append(data_idx) # Read in expression levels for each gene. for line in f: line = line.strip().split('\t') gene_name = line[3].upper() exp_vals = [] for idx in data_cols: exp_vals.append(line[idx]) gene_dict[gene_name] = exp_vals return gene_dict
62b27eef9c863078c98dee0d09bada5e058909e2
704,776
def rivers_with_station(stations): """Takes in a list of station objects, returns a list of rivers with at least one station""" output_set = set() for station in stations: output_set.add(station.river) return output_set
381915de1c49b1e669005138dafe9c4f72569f88
333,310
from pathlib import Path import mimetypes def _isImage(p: Path) -> bool: """Determine if the given path is an image, based on mime type""" t, e = mimetypes.guess_type(str(p)) if t and t.startswith("image"): return True else: return False
4077b3b0a3016524c076d5b331893a9ccce00ce5
391,561
import json def process_jfile(jfile:str): """Load up a JSON file Args: jfile (str): JSON file Returns: dict: JSON parsed """ with open(jfile, 'rt') as fh: obj = json.load(fh) return obj
4304d0c5e5cfb62e03fd5736d78e18c3de91c90c
221,340
import torch def filter_points(points, shape): """ Remove points laying out of the image shape """ shape_tensor = torch.tensor(shape, dtype=torch.float) - 1 mask = (points >= 0) & (points <= shape_tensor) mask = torch.all(mask, dim=1) filtered = points[mask] return filtered
1f226990aba8282f6caae0ffaf81bfdc78c55789
406,553
def check_format(args, headers): """ check format requested from arguments or headers :param args: dict of request keyword value pairs :param headers: dict of request headers :returns: format value """ # Optional f=html or f=json query param # overrides accept format_ = args.get('f') if format_: return format_ # Format not specified: get from accept headers # format_ = 'text/html' headers_ = None if 'accept' in headers.keys(): headers_ = headers['accept'] elif 'Accept' in headers.keys(): headers_ = headers['Accept'] format_ = None if headers_: headers_ = headers_.split(',') if 'text/html' in headers_: format_ = 'html' elif 'application/ld+json' in headers_: format_ = 'jsonld' elif 'application/json' in headers_: format_ = 'json' return format_
b9382d4a75eec1d85c64da6bebeb46314549ae74
299,104
def min_max_scaler(img_ndarray, final_range=(0, 1)): """ Scale and transform feature values to given range. Parameters ---------- img_ndarray: 2D numpy ndarray, shape=(28, 28) Image to scale and transform final_range: tuple (min, max), default=(0, 1) Desired range of transformed data Returns ------- Scaled and transformed image as 2D numpy ndarray """ px_min = final_range[0] px_max = final_range[1] # Hard code pixel value range img_std = img_ndarray / 255 return img_std * (px_max - px_min) + px_min
6e612e3a29065667f9fe552e0da3192bf431bd5a
186,070
def output_T(results, contrast, retvals=('effect', 'sd', 't')): """ Convenience function to collect t contrast results Parameters ---------- results : object implementing Tcontrast method contrast : array contrast matrix retvals : sequence, optional None or more of strings 'effect', 'sd', 't', where the presence of the string means that that output will be returned. Returns ------- res_list : list List of results. It will have the same length as `retvals` and the elements will be in the same order as retvals """ r = results.Tcontrast(contrast, store=retvals) returns = [] for valname in retvals: if valname == 'effect': returns.append(r.effect) if valname == 'sd': returns.append(r.sd) if valname == 't': returns.append(r.t) return returns
7cd7e1b66a41e95936d2521abd3a9583feb6d57a
326,054
def _get_model(args, func): """ Returns the model class for given function. Note, that ``self`` is not available for proxy models. """ if hasattr(func, '__self__'): # Bound method model = func.__self__.model elif hasattr(func, '__wrapped__'): # Proxy model model = func.__wrapped__.__self__.model else: # Custom method on user-defined model manager. model = args[0].model return model
87115eeed5fa45ef95b0f8c5728ee246b516d2e2
338,428
from bs4 import BeautifulSoup def extract_feature_counts(html_summary: str) -> tuple: """Extract features counts from html summary using BeautifulSoup. If the html_summary from a raster dataset is supplied, there won't be any feature counts. An IndexError exception is caught and a tuple containing 'None' values is returned. Parameters ---------- html_summary : str The dataset update summary component from the LINZ Data Service Atom feed. Returns ------- tuple (total_features, adds, modifies, deletes, total_changes) """ soup = BeautifulSoup(html_summary, features="html.parser") feature_counts = soup.find_all("td") try: total_features = feature_counts[0].string adds = feature_counts[1].string modifies = feature_counts[2].string deletes = feature_counts[3].string except IndexError: return (None, None, None, None, None) total_changes = int(adds) + int(modifies) + int(deletes) return (total_features, adds, modifies, deletes, total_changes)
1227fd533a382bd495d6f2d11e7c652ce1ac88ca
403,335
def maybe(obj, name, default=None): """Return atributte if it exists or default""" if hasattr(obj, name): return getattr(obj, name) return default
ebfe9b69d12818c0f2cab2fcbbca434633835768
550,957
def first_word(str): """ returns the first word in a given text. """ text=str.split() return text[0]
73f1efc24c6c68e92b2af824358b0656cfbe278b
21,448
def topic_duration_capture(data_dict: dict): """ Gets the choice of practice from user along with minutes the user wants to practice :param data_dict: :return: dict | modified data_dict """ topic_duration_keeper = dict() for key in data_dict.keys(): choice = input(f"\n'{key.upper()}' -- wanna practice right now? " f"Press 'y' + Enter for yes, else Enter: ") if choice: while True: try: topic_duration_keeper[key] = int(input("Enter time in minutes for " f"practicing {key}). ")) break except: print("Please enter time in minutes i.e. a number.") return topic_duration_keeper
09d78dc6b88105c9c2aedb2e7daf1cfecdd3065b
467,739
def leafNames(node): """Return a list of labels of all leaf descendants of node""" if (node['kids']): return [ leaf for kid in node['kids'] for leaf in leafNames(kid) ] else: return [ node['label'] ]
02e5d91c1ec125fbffee9d9f5516ed488c5fbf68
257,504
def calculate_timeout(start_point, end_point, planner): """ Calucaltes the time limit between start_point and end_point considering a fixed speed of 5 km/hr. Args: start_point: initial position end_point: target_position planner: to get the shortest part between start_point and end_point Returns: time limit considering a fixed speed of 5 km/hr """ path_distance = planner.get_shortest_path_distance( [start_point.location.x, start_point.location.y, 0.22], [ start_point.orientation.x, start_point.orientation.y, 0.22], [ end_point.location.x, end_point.location.y, end_point.location.z], [ end_point.orientation.x, end_point.orientation.y, end_point.orientation.z]) return ((path_distance / 1000.0) / 5.0) * 3600.0 + 10.0
cb7ae44df9b6a89d2e171046fa0bdfe3f81445c5
706,002
from datetime import datetime def parse_date(iso_date): """ Parse basic ISO 8601 date-only format into datetime.date format. """ if isinstance(iso_date, str): date_obj = datetime.strptime(iso_date, "%Y-%m-%d") else: date_obj = iso_date parsed_date = datetime.strftime(date_obj, "%d %B %Y") return parsed_date
6c8a74f347bb56ac8ace16bea9d2966444d8cb41
171,592
import math def specific_psi_function( arg, k, l, exp_rates, freq, a ): # pylint: disable=unused-argument """ The specific version of the Ψ function that is used for the purpose of this study. This function is a simplification of the general_psi_function when the following are fixed: exp_rates = (0, C*mu, mu) freq = (1, ??, 1) a = 2 Due to the way the hypoexponential cdf works the function is called only for values of k=1 and k=2. For these values the following hold: - k = 1 -> l = 1, ..., n - k = 2 -> l = 1 Parameters ---------- arg : float The argument of the cdf k : int Variable that goes from 0 to the number of distinct parameters l : int Variable that goes from 0 to the frequencies of each parameter_k exp_rates : tuple Distinct exponential parameters freq : tuple Frequencies of each distinct parameter a : int The length of exp_rates and freq array Returns ------- float The output of the Ψ function that is needed for the cdf of the Hypoexponential distribution. """ if k == 1: psi_val = (1 / (arg ** l)) - (1 / (arg + exp_rates[2]) ** l) psi_val *= (-1) ** l * math.factorial(l - 1) / exp_rates[2] return psi_val if k == 2: psi_val = -1 / (arg * (arg + exp_rates[1]) ** freq[1]) return psi_val return 0
67b061c03b2684f38e007c025a479061981efac7
155,615
def get_dbf_from_config(config: dict) -> str: """Find the DBF file specified in a config. Must return a string, not a Path, in case there's a protocol. """ shp_path = config['SHAPEFILE'] dbf_path = shp_path.replace('shp', 'dbf') return dbf_path
020d4391ff4639a18b662f6ad539e80a170a429d
195,063
def _is_empty(db) -> bool: """ Helper method to check if the level db is empty (without any records). :param db: The level db handler. :return: True if the current level db is empty. """ empty = True with db.iterator() as it: for _, _ in it: empty = False break return empty
12a8e92ff5d696616f5005a545d0a53aef6911ac
142,942
import random def random_integer(n): """Returns a random integer between 0 and n.""" return random.randint(0, n)
d7f7c85559699a284463bc8095f5be77ab305098
561,874
def find_similarity(matches, current, source): """Function searches for a specific file similarity result among two files. Since the direction of similarity can be in either way, lookup is done in specified direction :param matches: Similarity detection results in JSON format :type matches: dict :param current: The name of the current file, assumed to copy from `source` file :type current: str :param source: The name of assumed source file, this is the file that `current` might have copied from :type source: str :return: Similarity results among both files with `percentage` and number of `lines` matched :rtype: dict """ for file in matches["match_results"]: if file["file"] != source: continue for possible_source in file["possible_sources"]: if possible_source["file"] == current: return possible_source["similarity"] # If file had not been found, then there is no detected similarity between # the files. Return no similarity scores return { "similarity": 0.0, "lines": 0 }
adbba4d84de21649d2f36a1332ae53a9d33f6201
277,866
def get_unique_words(tokens): """ Provide a list of unique tokens present in the list tokens :param tokens: List of lists containing all of the tokens in the corpus :return: A list of all the unique tokens in the corpus """ unique_words = set().union(*tokens) return list(unique_words)
5842d93f8dbf047ab395ef868bc4c61386bf1315
131,166
import re def is_abspath(pathname): """ Return True if pathname is an absolute pathname, else False. """ return bool(re.match(r"^([/\\])|([a-zA-Z]:)", pathname))
5ae179d0bd88d72531a59b2c522c69d0983d89c4
68,043
def read_wien2k_fermi(filename): """ Read the Wien2K .scf file and return the Fermi energy in units of Rydberg The Fermi energy is given on lines starting with ":FER :". The .scf file may contain multiple of such lines. We want to keep the last one and extract the Fermi energy from that. """ line = "" line_fermi = "" with open(filename,"r") as fp: while True: line = fp.readline() if not line: break if line.find(":FER :") > -1: line_fermi = line data = line_fermi.split("=") fermi_energy = float(data[1]) return fermi_energy
c97518fb9c96f819812cd7696aa820eb42e83017
567,139
def ckpt_recency(ckpt): """Recency as Checkpoint importance metric. This function can also act as an example of how to make checkpoint importance keyfuncs. This is a named function, but as you can see it could be easily implemented as a lambda in a pinch. """ return ckpt.meta["unixtime"]
3969e618389e20ced56e1026d2d4a011373c1bc9
51,909
def convert_type(data): """ This function converts each categorical feature with a numerical data type to a string data type. Parameters: data: The dataset in question. Returns: data: The transformed dataset. """ # Categorical features columns = ['Browser', 'OperatingSystems', 'Region', 'TrafficType'] for col in columns: data[col] = data[col].apply(lambda x: str(x)) return data
d0196e0a7f35b39860bbe371531241af65ad6b8d
566,183
import json def loadTagsFromFile(path): """ Return tags loaded from file at 'path'. """ # This can be used if you've already retrieved the tags. tags = None with open(path, 'r') as f: tags = json.loads(f.read()) return tags
ec741bf845cf3d7a7e2d1b0ee0665caf1801f827
215,138
import json from typing import Tuple from typing import List def get_proteins_list_from_json(file_path: str) -> Tuple[List[str], List[str]]: """Get proteins list from json. Args: file_path: path of the json file. Returns: Tuple of protein list. """ protein_json = {} with open(file_path) as fp: protein_json = json.load(fp) protein_list = [] for _, value in protein_json.items(): protein_list.append(value) return protein_list[0], protein_list[1]
b37a6e9cf85d56ded7ee6b3dd6ae4ba38b0c9970
243,453
import re def parse_points(points_str): """Parses the points specification for polyline and polygon elements (SVG 1.1, 9.7.1). """ # Treat #-# as # -# points_str = points_str.replace('-', ' -') return [float(s) for s in re.split("[\x20\x09\x0D\x0A]+|[,]", points_str) if s != ""]
b2b29ffcf9e240ea06ca75d55f2595416b75963d
36,260
def sqrt(x): """Return square root of x""" return x ** (0.5)
fb1b12a98f5c10dc04fce7827c8acd5be814926c
659,829
def report_exit(combined_test_report): """The exit code of this script is based on the following: 0: All tests have status "pass", or only non-dynamic tests have status "silentfail". 31: At least one test has status "fail" or "timeout". Note: A test can be considered dynamic if its name contains a ":" character.""" ret = 0 for test in combined_test_report.test_infos: if test.status in ["fail", "timeout"]: return 31 return ret
f3fa40a42c8f6b61f8d2729abdb0e10321cc50f0
104,436
def parse_line_str(line_str): """ Parse the line string to a list """ line_list = line_str.split(",") # cut the NEWLINE line_len = len(line_list) - 1 line_list[line_len] = line_list[line_len].strip('\n').strip('\r') return line_list
24857eba3e5a25f13d2a4be4edf05473aa66e417
65,943