content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def first(seq, pred=None): """Return the first item in seq for which the predicate is true. If the predicate is None, return the first item regardless of value. If no items satisfy the predicate, return None. """ if pred is None: pred = lambda x: True for item in seq: if pred(item): return item return None
f545ab4deb8c6d8103dd46dc85e0afd1f2597c6e
678,333
def find_scheduled(all_fact_classes): """ Finds which facts are scheduled to run now. Args: all_fact_classes: A list of all facts available. Returns: A list of facts which are scheduled to run at the current time. """ facts_to_update = [] for fact in all_fact_classes: if fact.__schedule__.should_run: facts_to_update.append(fact) return facts_to_update
c8fe2b611be77e3625ffc8958b3e7012cb6910c5
496,651
def is_int(arange): """ Check if a range is int Args: test_range ([int/float, int/float]): range to test Returns: .boolean """ if (isinstance(arange[0], int) and isinstance(arange[1], int)): return True return False
e9869e87f8b5111b8e6fe0ac5aca8b1f5412805a
623,287
import json from datetime import datetime def mfa_from_str(json_str: str, *, include_expiration=False) -> dict: """Create credentials dict from credentials as a json string. This function is a thin wrapper around json.loads Args: json_str (str): String containing a json object. e.g. ''' { "Credentials": { "AccessKeyId": "FAKEACCESSKEY", "SecretAccessKey": "Fake+Secret9Access-Key", "SessionToken": "f4k3-SE5510N_t0k3n", "Expiration": "2019-07-30T00:14:27Z" } } ''' include_expiration (bool, optional): Whether to include expiration in returned json. Defaults to False. Returns: dict: with types as { "aws_access_key_id": str, "aws_secret_access_key": str, "aws_session_token": str, "expiration": datetime, } Example: >>> import dynatrace_locksmith as ls >>> creds = ls.mfa_from_str(json_str) >>> creds { "Credentials": { "AccessKeyId": "1234567890", "SecretAccessKey": "qwertyuiop", "SessionToken": "asdfghjklzxcvbnm", "Expiration": "2018-11-02T05:15:21Z" } } >>> session = boto3.Session(**creds) """ if not isinstance(json_str, str): raise TypeError("json_str must be a str") if not isinstance(include_expiration, bool): raise TypeError("include_expiration must be a bool") creds = json.loads(json_str)["Credentials"] creds["aws_access_key_id"] = creds.pop("AccessKeyId") creds["aws_secret_access_key"] = creds.pop("SecretAccessKey") creds["aws_session_token"] = creds.pop("SessionToken") expiration = creds.pop("Expiration") if include_expiration: creds["expiration"] = datetime.strptime( expiration.replace("Z", "+0000"), "%Y-%m-%dT%H:%M:%S%z" ) return creds
c4514566c61c2abcb718ed82c96e8069c6418b3d
223,987
def generate_json(sourced_id, title, description, assign_date, due_date, class_sourced_id, category, result_value_max,result_value_min): """ Generate a JSON formatted value ready to be sent to OpenLRW :param sourced_id: :param title: :param description: :param assign_date: :param due_date: :param class_sourced_id: :param category: :param result_value_max: :param result_value_min :return: """ if result_value_max is None: result_value_max = 0.0 if result_value_min is None: result_value_min = 0.0 return { "sourcedId": sourced_id, "title": title, "description": description, "assignDate": assign_date, "dueDate": due_date, "resultValueMax": result_value_max, "resultValueMin": result_value_min, "class": { "sourcedId": class_sourced_id }, "metadata": { "type": category } }
1cc0a8ed71ecd08304d2e39215b57a2df3180bbd
74,330
def extract_relation(res,resource_type): """ this function takes a unique resource and create the entries for relation.csv Logic: Profile: Bound (Req) = element.binding[strength = required].valueset Bound (Ext) = element.binding[strength = extensible].valueset Bound (Pref) = element.binding[strength = preferred].valueset Bound (Exam) = element.binding[strength = example].valueset Extension = element.type[code = extension].profile ValueSet: valuesFrom = compose.include.system valuesFrom = expansion.contains.system includes = compose.include.valueSet """ dict_relat=[] relation_type_data={"required":"Bound_Req","extensible":"Bound_Ext","preferred":"Bound_Pref","example":"Bound_Exam"} # if res.get("id")=="be-ext-laterality": # print(resource_type,res.get("id")) if resource_type in ["Profile","Data type"]: elements=res.get('snapshot', {}).get('element',[] ) for element in elements: binding=element.get("binding",{}).get("strength") value=element.get("binding",{}).get("valueSet") if binding: # print(value) stripped = value.split("|", 1)[0] #remove pipes # if res.get("id")=="be-allergyintolerance": # print(stripped) #print(resource_type,"binding -> ",binding,value) dict_relat.append({"source":res.get("id"),"target_url":stripped,"relation":relation_type_data[binding]}) for l in element.get("type",[]): if l.get("code",{})=="Extension": #pass if l.get("profile"): dict_relat.append({"source":res.get("id"),"target_url":l.get("profile")[0],"relation":"extension"}) for target_profile in l.get("targetProfile",[]): dict_relat.append({"source":res.get("id"),"target_url":target_profile,"relation":"references"}) # print() elements=res.get('differential', {}).get('element', []) for element in elements: binding=element.get("binding",{}).get("strength") value=element.get("binding",{}).get("valueSet") if binding: # print(res.get("id"),value) # print(value,res.get("id")) stripped = value.split("|", 1)[0] #remove pipes #print(resource_type,"binding -> ",binding,value) dict_relat.append({"source":res.get("id"),"target_url":stripped,"relation":relation_type_data[binding]}) for l in element.get("type",[]): if l.get("code",{})=="Extension": #pass if l.get("profile"): # print(l.get("profile")[0],res.get("id")) dict_relat.append({"source":res.get("id"),"target_url":l.get("profile")[0],"relation":"extension"}) for target_profile in l.get("targetProfile",[]): dict_relat.append({"source":res.get("id"),"target_url":target_profile,"relation":"references"}) # print() elif resource_type=="ValueSet": for s in res.get("compose",{}).get("include",[]): #print(s) if s.get("system"): dict_relat.append({"source":res.get("id"),"target_url":s.get("system"),"relation":"valuesFrom"}) if s.get("valueSet"): # print(s.get("valueSet")) dict_relat.append({"source":res.get("id"),"target_url":s.get("valueSet")[0],"relation":"includes"}) #print(res.get("expansion",{}).get("contains",[])) return dict_relat
1d2e058946c99613c8c811ed8da5008ec9d6cf62
66,667
def gen_feat_val_list(features, values): """ Generates feature value lists sorted in descending value. Args: features: A list of feature names. values: A list of values. Returns: A sorted list of feature-value tuples. """ sorted_feat_val_list = sorted( zip(features, values), key=lambda x: abs(x[1]), reverse=True # noqa: E731 ) return sorted_feat_val_list
277e09c68b4b653386bb49fb50e4654e24d0e0ea
272,347
import torch def rescale_img(imgs): """ Rescale the values of an images from [-1,1] to [0, 255] and permute the channels of the images from [N, C, H, W] to [N, H, W, C]. Args: imgs: Image to rescale. Returns: Rescaled images. """ return (imgs.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
5da1b1fab54878721d6d7cc3c6338f6832e911f9
590,449
def blackout(im, bb): """ Set intensity values at all locations on the given image im within bounding box bb to 0. Parameters ---------- im : ndarray image with region to be blacked out bb : bounding box tuple of ints structured as (x_min, x_max, y_min, y_max) Returns ------- blacked out : the image with all locations within bounding box bb set to 0 """ blacked_out = im for r in range(blacked_out.shape[0]): for c in range(blacked_out.shape[1]): if (r >= bb[2]) and (r <= bb[3]): if (c >= bb[0]) and (c <= bb[1]): blacked_out[r][c] = 0 return blacked_out
87e6aa575be5ecb4b1472a01a52afc59d4d7f0fb
437,660
def spotify_id_from_url(url: str) -> str: """Extract the `Spotify ID`_ from a Spotify URL. Args: url (str): The URL to extract the `Spotify ID`_ from. Returns: str: The extracted `Spotify ID`_. """ url = url[::-1] url = url.split("/")[0] url = url[::-1] url = url.split("?")[0] return url
8946ea44a06a2ec838636a5db25f061fb28300a5
109,694
def case_transform_dict_values(dictionary, func_name, transform): """Transform the string-type values of a dictionary. :param dictionary: dict to transform :param func_name: name of the transformation function used for error messages :param transform: transformation function :returns: dictionary where string values have been transformed """ if not isinstance(dictionary, dict): raise TypeError(f'{func_name} accepts only dictionaries as argument, got {type(dictionary)}') new_dict = {} for k, v in dictionary.items(): if isinstance(v, str): new_dict[k] = transform(v) else: new_dict[k] = v return new_dict
95168dd83fa61c640654cc6f89c70b032a8bb2db
541,515
def toLowerCase(s): """ Convert a sting to lowercase. E.g., 'BaNaNa' becomes 'banana' """ return s.lower()
0e4b5ceefc7ee3f5475befacc5c76788704d832f
508,123
import socket def is_socket(sock): """Return True if the object can be used as a socket.""" return isinstance(sock, socket.socket)
acfc01e11ee090ce6e7ffcbc3e4d8b667f45ca1b
216,298
import re def parse_prerequisites(course_description): """Parses prerequisites from a course description. Args: course_description: The course description text. Returns: The course prerequisite codes. """ if 'Prerequisite:' not in course_description: return [] parts = course_description.split('Offered:')[0].split('Prerequisite:') return sorted( set([k.strip() for k in re.findall(r'([A-Z& ]+ \d+)', parts[1])]))
a6b4caa8b1dbc49c200c7aaa9d0f103072560452
145,114
def roman_event(date): """Return the event of Roman date 'date'.""" return date[2]
f0c6ccf62b8c63a43a0158a7c744343cabb7d311
635,136
import re def get_flag(txt): """Return anything matching basic flag format, otherwise empty string.""" flag = "" pattern = r'.*([a-zA-Z]{3}\{.*\}).*' # 3 letters followed by text in {} m = re.match(pattern, txt) if m: flag = m.group(1) return flag
69abf7d160294c112393720d48554eaea4a83310
191,198
import struct def read_plain_float(file_obj, count): """Read `count` 32-bit floats using the plain encoding.""" return struct.unpack("<{}f".format(count).encode("utf-8"), file_obj.read(4 * count))
6fc6da70ad99749ab8b0e68a3d556d5d99a33d48
216,903
from datetime import datetime def float2timestamp(float_timestamp): """Converts POSIX timestamp to datetime.datetime object.""" return datetime.fromtimestamp(float_timestamp)
fd62b0a1a4f4b71acf86db9d3f9d849c339f87a4
59,982
def ignore_exception(exception_class): """A decorator that ignores `exception_class` exceptions""" def _decorator(func): def newfunc(*args, **kwds): try: return func(*args, **kwds) except exception_class: pass return newfunc return _decorator
bc092312ca09f6e7635768c1387871b494f306cc
173,068
def _shell_quote(s): """Copy of bazel-skylib's shell.quote. Quotes the given string for use in a shell command. This function quotes the given string (in case it contains spaces or other shell metacharacters.) Args: s: The string to quote. Returns: A quoted version of the string that can be passed to a shell command. """ return "'" + s.replace("'", "'\\''") + "'"
64d58b7ced621246d193c1b217fdd81a6c258a24
598,112
def distance(x1, y1, x2, y2, root=True): """ Pythagorean theorem. can be rooted can be not for performance """ return ((x2-x1)**2 + (y2-y1)**2)**0.5 if root else (x2-x1)**2 + (y2-y1)**2
53d0665b6acb53828f274517e3010c961d51951c
192,010
def gens(x): """ Return the generators of ``x``. EXAMPLES:: sage: R.<x,y> = SR[] sage: R Multivariate Polynomial Ring in x, y over Symbolic Ring sage: gens(R) (x, y) sage: A = AbelianGroup(5, [5,5,7,8,9]) sage: gens(A) (f0, f1, f2, f3, f4) """ return x.gens()
362fcc78f6a44fd3b8d8ab650cdfb9459af5c344
615,406
def needs_review(revision): """ Returns bool if revision needs review. If autolabel is empty, assume true. """ return revision['autolabel'].get('needs_review', True)
a8c550ba7c65201f462489d0257c1406ca51d924
106,217
from datetime import date def make_dir(parent_path, *dir_name, add_date=True): """ Make a new directory Parameters ---------- parent_path : str dir_name : str (optional), if not exists, files will be saved in parent_dir add_date : bool make a sub-dir with a date Returns ------- save_path : path """ global save_path if dir_name: if add_date: today = date.today() save_path = parent_path / dir_name[0] / today.strftime("%Y-%m-%d") # 2020-07-04 else: save_path = parent_path / dir_name[0] else: if add_date: today = date.today() save_path = parent_path / today.strftime("%Y-%m-%d") # 2020-07-04 else: save_path = parent_path # print(save_path) if not save_path.exists(): save_path.mkdir(parents=True) return save_path
d739d06c465b599b23ac0e3eeee277a8cd166a3d
401,195
def total_angular_momentum(particles): """ Returns the total angular momentum of the particles set. >>> from amuse.datamodel import Particles >>> particles = Particles(2) >>> particles.x = [-1.0, 1.0] | units.m >>> particles.y = [0.0, 0.0] | units.m >>> particles.z = [0.0, 0.0] | units.m >>> particles.vx = [0.0, 0.0] | units.ms >>> particles.vy = [-1.0, 1.0] | units.ms >>> particles.vz = [0.0, 0.0] | units.ms >>> particles.mass = [1.0, .5] | units.kg >>> particles.total_angular_momentum() quantity<[0.0, 0.0, 1.5] m**2 * kg * s**-1> """ # equivalent to: # lx=(m*(y*vz-z*vy)).sum() # ly=(m*(z*vx-x*vz)).sum() # lz=(m*(x*vy-y*vx)).sum() return (particles.mass.reshape((-1,1)) *particles.position.cross(particles.velocity)).sum(axis=0)
8eca23b7b1a8fc8a7722543f9193f0e4a3397f24
3,979
def get_wolfram_query_url(query): """Get Wolfram query URL.""" base_url = 'www.wolframalpha.com' if not query: return 'http://{0}'.format(base_url) return 'http://{0}/input/?i={1}'.format(base_url, query)
0122515f1a666cb897b53ae6bd975f65da072438
705,792
from typing import Counter def system_call_count_feats(tree): """ arguments: tree is an xml.etree.ElementTree object returns: a dictionary mapping 'num_system_calls' to the number of system_calls made by an executable (summed over all processes) """ c = Counter() in_all_section = False for el in tree.iter(): # ignore everything outside the "all_section" element if el.tag == "all_section" and not in_all_section: in_all_section = True elif el.tag == "all_section" and in_all_section: in_all_section = False elif in_all_section: c['num_system_calls'] += 1 return c
15f2e8cb7ce46a84732e2641b10c0e1bf9e4d0b2
661,551
def gpm2m3_h(gpm): """gpm -> m^3/hr""" return 0.2271247056*gpm
f0660a661a3c4db870100e7e3c06d7198e6f1122
521,630
def medium_file(file_path): """Open a medium file (headerless tsv, 2 columns (str, float)) Return a generator of (str, float) tuples This function is used by argparse for type validation.""" def row_generator(file_path): with open(file_path, "r") as fh: for line in fh: reaction_id, lower_bound, upper_bound = line.rstrip().split("\t") yield (reaction_id, float(lower_bound), float(upper_bound)) return row_generator(file_path)
eba4c372a1c07feab28634d65dcbd148f9dfa995
28,489
from typing import Dict from typing import OrderedDict def compare_constraints(old_constraints: Dict, new_constraints: Dict) -> Dict: """Function to figure out which constraints are new and or deleted :param Dict old_constraints: The existing configuration :param Dict new_constraints: The new configuration :return: The altered constraints :rtype: Dict """ # Set the pks old_pk = old_constraints.get('primary_key', {}) new_pk = new_constraints.get('primary_key', {}) # Set the change object pk_change = OrderedDict() # Calculate the delta if not new_pk and not old_pk: return pk_change elif old_pk and not new_pk: pk_change['drop_pk'] = old_pk elif new_pk and not old_pk: pk_change['new_pk'] = new_pk else: if new_pk['name'] != old_pk['name']: pk_change['drop_pk'] = old_pk pk_change['new_pk'] = new_pk elif new_pk['name'] != old_pk['name']: pk_change['drop_pk'] = old_pk pk_change['new_pk'] = new_pk return pk_change
1ef42ac08aa38a7d6b1dec2eb0aba76c5fa97b3c
188,042
def feq(a, b, max_relative_error=1e-12, max_absolute_error=1e-12): """ Returns True if a==b to the given relative and absolute errors, otherwise False. """ # if the numbers are close enough (absolutely), then they are equal if abs(a-b) < max_absolute_error: return True # if not, they can still be equal if their relative error is small if abs(b) > abs(a): relative_error = abs(a-b)/abs(b) else: relative_error = abs(a-b)/abs(a) #print abs(a-b), relative_error return relative_error <= max_relative_error
8d93f44535b2fb14db605cc39975b0d409db832c
310,750
def url(anchor, uri): """Return a Markdown URL.""" return f"[{anchor}]({uri})"
330bb36a77e40b62ad33d897f6043e825f1fb156
608,692
import math def stdev(valuelist, mean=None): """ Returns standard deviation of a list. Tested equivalent to but faster than numpy.std If the mean is already available it can be passed in as a parameter to avoid recomputation""" N = float(len(valuelist)) if mean is None: mean = math.fsum(valuelist) / N VarList = [(val - mean) ** 2 for val in valuelist] return math.sqrt(math.fsum(VarList) / N)
6dadb0858711ebcbceebdc3c91d062a5a2cd3b55
253,981
def get_strides_and_extra(numpoints, col): """ stride data to get less numpoints. Calculate the strides and get the points left at the end as extra :param col: column to calculate with :param numpoints: number of wanted points """ lx = col.size if lx <= numpoints: return None, None stride = int(lx / numpoints) points = numpoints * stride values_to_stride = col[:points] if hasattr(values_to_stride, 'values'): # pandas df or object behaving like it strided = values_to_stride.values.reshape((numpoints, stride)) else: # numpy array or objects behaving like it strided = values_to_stride.reshape((numpoints, stride)) extra = col[points:] return strided, extra
03f4c7f03a9285e59aa46ca999a3860ec3b1be0d
611,621
def checkColExist(DF, key): """ This function checks if the column exists in a dataframe Args: DF: pandas dataframe key: Expected column header name Returns: bool: True if column exists """ result = False if DF.get(key) is None else True return(result)
baf72b08d68c3ed8d22542d2ff64db112bd1b4f2
549,468
def title_case(s): """Convert a string to have title case.""" if not s: return None s = s.lower() parts = s.split(' ') lower_case = ( 'a', 'an', 'and', 'as', 'at', 'by', 'for', 'in', 'of', 'on', 'or', 'the', 'to', 'with' ) parts[0] = parts[0].title() parts = map( lambda part: part.title() if part not in lower_case else part, parts ) return ' '.join(parts)
ae600e697b335e3643e48a83c5f8a2f2246076e8
422,387
def HashKey(flavor): """Generate the name of the key for this flavor's hash. Arguments: flavor: kind of tool. Returns: The string key for the hash. """ return 'NACL_TOOL_%s_HASH' % flavor.upper()
094041893b022599ffcac45e82464d67f1c7fa6b
276,256
def _generate_binary_deferer(op_func): """ Given a binary operator, generate a method that applies that operator element-wise to a self and an other. See ReplicatedThinMatrices._defer_binary_elementwise for more. """ def deferer(self, other, *args, **kwargs): return type(self)._defer_binary_elementwise( self, other, op_func, *args, **kwargs, ) return deferer
1b20b8616d70ecf491640b27aec3e002aaad3d88
118,709
def compute_exposures(positions, factor_loadings): """ Compute daily risk factor exposures. Parameters ---------- positions: pd.Series A series of holdings as percentages indexed by date and ticker. - Examples: dt ticker 2017-01-01 AAPL 0.417582 TLT 0.010989 XOM 0.571429 2017-01-02 AAPL 0.202381 TLT 0.535714 XOM 0.261905 factor_loadings : pd.DataFrame Factor loadings for all days in the date range, with date and ticker as index, and factors as columns. - Example: momentum reversal dt ticker 2017-01-01 AAPL -1.592914 0.852830 TLT 0.184864 0.895534 XOM 0.993160 1.149353 2017-01-02 AAPL -0.140009 -0.524952 TLT -1.066978 0.185435 XOM -1.798401 0.761549 Returns ------- risk_exposures_portfolio : pd.DataFrame df indexed by datetime, with factors as columns - Example: momentum reversal dt 2017-01-01 -0.238655 0.077123 2017-01-02 0.821872 1.520515 """ risk_exposures = factor_loadings.multiply(positions, axis="rows") return risk_exposures.groupby(level="dt").sum()
46421690b65eaf081929427900861808e4ec820e
188,144
def splitNotaries(lines): """Segment the txt file into chunks of information for one notary. Args: lines (list): lines from the txt file Returns: list: list of lists, each with lines for one notary """ notaryLines = [] notaryInfo = [] for i in lines: if i == '' and notaryInfo != []: notaryLines.append(notaryInfo) notaryInfo = [] elif i != '': notaryInfo.append(i) return notaryLines
061f76131489271ca7d15b1f7eca4135d67c2ee6
439,321
import torch def fliplr(img): """ Flip image horizontally in a differentiable manner. Parameters ---------- img: Torch.Tensor Image batch (BCHW) to be horizontally flipped along the last dimension. Returns ------- torch.Tensor: Horizontally flipped image (BCHW). """ assert img.dim() == 4 return torch.flip(img, [3])
920d01a603e8c819b5b42ca844219bcda6837b0e
388,080
def replace_layer(model, layer_name, replace_fn): """Replace single layer in a (possibly nested) torch.nn.Module using `replace_fn`. Given a module `model` and a layer specified by `layer_name` replace the layer using `new_layer = replace_fn(old_layer)`. Here `layer_name` is a list of strings, each string indexing a level of the nested model.""" if layer_name: nm = layer_name.pop() model._modules[nm] = replace_layer(model._modules[nm], layer_name, replace_fn) else: model = replace_fn(model) return model
2e0ee082d6ab8b48979aa49e303a0e12583812b7
701,798
def populate_countries_dict(file_path): """ Function to populate dictionary of countries and url numbers. """ countries_dict = {} with open(file_path, 'r') as f: for line in f.readlines(): key_value = line[:-1].split(':') countries_dict[key_value[0]] = int(key_value[1]) return countries_dict
49f8a00573bc3900a7c0ec9be5bf653c8115c711
630,798
def _punctuation_config_preset(kwargs): """Populates the config to use punctuations as separators in the prompt.""" return dict( kwargs, orig_input_prefix="", exemplar_input_prefix="", exemplar_output_prefix=" ## ", exemplar_separator=" @@ ")
ca2133121b0958aa225c8604b473c566470c12f9
588,947
def list_difference(list1, list2): """Values in list1 that are not in list2 = list1 - list2""" list3 = [value for value in list1 if value not in list2] return list3
dacc2d890b1235aebf5abd176b28b350e165ef75
459,346
def least_residue(a , m): """ Returns least residue of a (mod m) Parameters ---------- a : int denotes a in a (mod m) m : int denotes m in a (mod m) return : int returns integer least residue """ return a%m
a697656664fa11c64c32d8902ebce893b70f9203
73,633
import re def sanitized_name(name, wid=''): """Clean a step name and change it to proper format. It replaces all the unwanted characters with `_`. Args: name(str): The crud step name. wid(str): It is a workflow ID produced by a utils.get_id(). Returns: str: The sanitize step name. """ return f"popper_{re.sub('[^a-zA-Z0-9_.-]', '_', name)}_{wid}"
29ae9713015a0ae93b318779d7c420dbb7d772fd
519,701
from typing import Dict def master_name(tf_vars: Dict) -> str: """Construct master name for provided Terraform deployment.""" return f"det-master-{tf_vars.get('cluster_id')}-{tf_vars.get('det_version_key')}"
4f375768b1b3678d1c3384cdf9bfcc0e3c681430
654,701
def port_int(p): """Pass through a port number (as a number or a string) provided it is valid and in range, otherwise raise an exception""" try: port=int(p) except: raise ValueError("Invalid port number") if port>=0 and port<=65535: return port else: raise ValueError("Port number out of range")
0e177caa716df35b231462e8da2deded6a0ca60c
244,234
import mpmath def interval_prob(x1, x2, k, theta): """ Compute the probability of x in [x1, x2] for the log-gamma distribution. Mathematically, this is the same as loggamma.cdf(x2, k, theta) - loggamma.cdf(x1, k, theta) but when the two CDF values are nearly equal, this function will give a more accurate result. x1 must be less than or equal to x2. k is the shape parameter of the gamma distribution. theta is the scale parameter of the log-gamma distribution. """ if x1 > x2: raise ValueError('x1 must not be greater than x2') with mpmath.extradps(5): x1 = mpmath.mpf(x1) x2 = mpmath.mpf(x2) k = mpmath.mpf(k) theta = mpmath.mpf(theta) z1 = x1/theta z2 = x2/theta return mpmath.gammainc(k, mpmath.exp(z1), mpmath.exp(z2), regularized=True)
f89c75490b1151d9cb1e5e1fb3cffb482870934f
500,127
def split_qualified(fqname): """ Split a fully qualified element name, in Clark's notation, into its URI and local name components. :param fqname: Fully qualified name in Clark's notation. :return: 2-tuple containing the namespace URI and local tag name. """ if fqname and fqname[0] == '{': return tuple(fqname[1:].split('}')) return None, fqname
5ca24065c95b4a1a300789ec2a3b26b97f75dfb5
230,351
def _get_before_after(prep_str: str, element: str) -> str: """ Add the text, 'before' or 'after', if these words are the prepositions related to the TIME or EVENT. These words are NOT included in SpaCy's NER date/time/event named entity extractions. @param prep_str: String holding the dictionary entry for the preposition @param element: String holding the named entity element text @return: A string of either the element_text (if 'before'/'after' is not found) or the element_text preceded by 'before' or 'after' """ elem_text = element prep_str = prep_str.split("prep_text': '")[-1].lower() if prep_str.lower().startswith('after'): elem_text = 'after ' + element elif prep_str.lower().startswith('before'): elem_text = 'before ' + element return elem_text
32041ce14faa8521d75a5ba78c44aa7348452a50
532,511
def rotate_point_by_90(x, y, k, w = 1.0, h = 1.0): """ Rotate a point xy on an image by k * 90 degrees. Params: x, y: a point, (x, y). If not normalized within 0 and 1, the width and height of the image should be specified clearly. w, h: the width and height of image k: k * 90 degrees will be rotated """ k = k % 4 if k == 0: return x, y elif k == 1: return y, w - x elif k == 2: return w - x, h - y elif k == 3: return h - y, x
df8e0a31d25d13dd01faa7db8ed7b91d8ce87e49
79,734
def get_axis_indexes(kernel_axis_length, center_index): """Calculate the kernel indexes on one axis depending on the kernel center. Args: kernel_axis_length (int): The length of the single axis of the convolutional kernel. center_index (int): The index of the kernel center on one axis. """ axis_indexes = [] for i in range(-center_index, kernel_axis_length - center_index): axis_indexes.append(i) return axis_indexes
f056d91185200ba533bf03f164201c3eb7cef22b
128,071
def euler1(lim=1000): """Solution for problem 1.""" # could use sum formula here return sum(i for i in range(lim) if i % 3 == 0 or i % 5 == 0)
98a2abc09683dd73523d2b71bc1321fd540630c1
506,934
def to_header(wcs, relax=True): """Modify `astropy.wcs.WCS.to_header` to produce more keywords Parameters ---------- wcs : `~astropy.wcs.WCS` Input WCS. relax : bool Passed to `WCS.to_header(relax=)`. Returns ------- header : `~astropy.io.fits.Header` Output header. """ header = wcs.to_header(relax=relax) if hasattr(wcs, '_naxis1'): header['NAXIS'] = wcs.naxis header['NAXIS1'] = wcs._naxis1 header['NAXIS2'] = wcs._naxis2 for k in header: if k.startswith('PC'): cd = k.replace('PC','CD') header.rename_keyword(k, cd) return header
ba3317477f86020139f357cd793b35ef3f4c11b1
367,658
def strip_list(l): """Strip leading and trailing whitespace from all values in `l`.""" for i, value in enumerate(l): l[i] = value.strip() return l
a7762b3b425ce2bcdd3ac59db30aa522415f537c
244,121
def _fix_vars(obj, variable): """ Recursively searches the nested sim dict structure, and attempts to find every instance of '&&', replace it with some variable, and evaulate that string. It does this by recursively building a copy of the object if it contains dict or list substructures. Note that this function will not search in any structures other than lists or dicts. """ if isinstance(obj, dict): return {key: _fix_vars(val, variable) for key, val in obj.items()} elif isinstance(obj, list): return [_fix_vars(key, variable) for key in obj] elif isinstance(obj, str): if '&&' in obj: return eval(obj.replace("&&", str(variable))) else: return obj return obj
37c641faf3fe7c986c0d599bf9ab1f13e07ffac9
343,131
def compare_set_genes_list(training_df, validation_df, test_df): """ Compare the 3 sets to see how many genes they have in common. :param training_df: pandas dataframe containing the training data :param validation_df: pandas dataframe containing the validation data :param test_df: pandas dataframe containing the test data :return: 4 lists. Each list has the shared genes between different sets: genes shared between train and validation genes shared between train and test genes shared between validation and test genes shared between all 3 sets """ train_set_genes = set(training_df['gene_symbol']) validation_set_genes = set(validation_df['gene_symbol']) test_set_genes = set(test_df['gene_symbol']) train_validation_shared_genes_list = list(set(train_set_genes) & set(validation_set_genes)) train_test_shared_genes_list = list(set(train_set_genes) & set(test_set_genes)) validation_test_shared_genes_list = list(set(test_set_genes) & set(validation_set_genes)) all_shared_genes_list = list(set(train_set_genes) & set(validation_set_genes) & set(test_set_genes)) print("Number of shared genes between train and validation: ", len(train_validation_shared_genes_list)) print("Number of shared genes between train and test: ", len(train_test_shared_genes_list)) print("Number of shared genes between validation and test: ", len(validation_test_shared_genes_list)) print("Number of shared genes between all 3 sets: ", len(all_shared_genes_list)) print ("\n") return train_validation_shared_genes_list, train_test_shared_genes_list, validation_test_shared_genes_list, all_shared_genes_list
b71c9027dbce08afa4cb4a4f4a02bdeb83df9796
435,903
def sorted_insertion(nodeList, childrenList): """ Sorted_insertion: It inserts each of the elements of childrenList into the nodeList. The insertion must be sorted depending on the evaluation function value. : params: - nodeList : LIST of NODES to be visited - childrenList: LIST of NODES, set of childs that should be studied if they contain rendundant path or not. :returns - nodeList: sorted LIST of NODES to be visited updated with the childrenList included """ return sorted(nodeList+childrenList, key=lambda x: x.f)
dde6053a553d5e11a64485a9bfe6fb4ba7815312
464,003
def require_open_hdf5_file(func): """A decorator to verify the HDF5 file is open before calling the wrapped method If the HDF5 file is currently open for writing, call the method, else log the reason the file is not open NOTE: This should only be used on MetaWriter methods (that take self as the first argument) """ def wrapper(*args, **kwargs): writer = args[0] # Extract class instance (self) from args if writer.file_open: # It is safe to call the wrapped method return func(*args, **kwargs) # It is not safe to call the wrapped method - log the reason why if writer.finished: reason = "Already finished writing" else: reason = "Have not received startacquisition yet" writer._logger.error( "%s | Cannot call %s - File not open - %s", writer._name, func.__name__, reason, ) return wrapper
6253acdc00f3879ba780c699f7a6c0a4114c1a72
378,867
def group_key_filename(members): """Return the name of the group key file.""" return f"groupkeys-{members}.tsv"
aad8ac25ad5fc9fb307d0302efb46c8076642571
520,642
def single_quote(string): """Place single quotes around the given string""" return "'" + string + "'"
57f84223f99f4e9421a8ac68574e7013579ba8ef
361,559
def limit_permissions(permissions: dict, limit_filter: dict) -> dict: """ Make sure permission values in `permissions` do not exceed those in `limit_filter`. Returns a filtered set of permissions. :param limit_filter: the limiting permissions that cannot be exceeded :param permissions: a permissions to filter :return: filtered permissions """ return {p: (permissions.get(p, False) and limit_filter[p]) for p in permissions}
260969d7cfe7ed0e4d4c90278d07abf8030a87c0
407,291
def get_request_header() -> dict: """ Common functionality of forming header for further requests :return: Header as dictionary """ # Sample user agent user_agent = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:75.0) Gecko/20100101 Firefox/75.0' headers = {'User-Agent': user_agent} return headers
4720463f62c18fe38ada9059ed4514c68e4b143c
75,464
def get_img_dev_width_mapping(column_width): """ returns a img_dev_width mapping for a given column_dev_width, e.g. - col-md-12 -->: 1 - col-lg-6 -->: 1/2 """ wmap = { '1': '1/6', '2': '1/5', '3': '1/4', '4': '1/3', '5': '1/2', '6': '1/2', '7': '2/3', '8': '2/3', '9': '3/4', '10': '1', '11': '1', '12': '1', } k = column_width.rsplit('-', 1)[1] return wmap.get(k)
233a927b42095b4f57ad365ca84e901682a8d53a
68,096
def split_docstring(doc): """Split docstring into first line (header) and full body.""" return (doc.split("\n", 1)[0], doc) if doc is not None else ("", "")
5684a5d43eabd39762b9515b3ac965fc2a0ba953
106,184
from bs4 import BeautifulSoup def strip_anchor_from_id(soup: BeautifulSoup) -> BeautifulSoup: """ Loop through every id anchor and strip it, without editing content. Strip <id> attributes such as <h2 class="centrat2" id="aid-F8901">A</h2> Args: soup (BeautifulSoup): A BeautifulSoup object """ for tag in soup.find_all(attrs={'id':True}): del tag['id'] return soup
67a64b3b971402e080c6e20ee7479f8ee54877df
143,891
import hashlib def sha_sum(fname): """Returns the sha256 checksum of a file. Args: fname (str): Path to a file. Returns: str: The sha256 checksum as a hex string. """ hasher = hashlib.sha256() with open(fname, 'rb') as fd: for block in iter(lambda: fd.read(65536), b""): hasher.update(block) return hasher.hexdigest()
e3a95c962d50cdc8118b4298a3b083b7bcd6ee58
691,975
import hashlib import binascii def hash_utf8(string): """given utf8 string return md5 hash value as hex string""" hasher = hashlib.md5() hasher.update(string.encode("utf-8")) return binascii.hexlify(hasher.digest()).decode("utf-8")
ad88e01938dcab8b5af2c41cd7e0e9e2f103d246
91,276
def ifmatchinstance(tokens, classes): """ >>> tokens = (TimeToken(15), TimeToken(16)) >>> ifmatchinstance(tokens, (TimeToken, TimeToken)) 1 >>> ifmatchinstance(tokens, (TimeToken, DayToken)) 0 >>> both = (DayToken, TimeToken) >>> ifmatchinstance(tokens, (both, both)) 1 >>> tokens = (TimeToken(15), DayToken(5, 7, 2018)) >>> ifmatchinstance(tokens, (DayToken, TimeToken)) -1 >>> ifmatchinstance(tokens, ()) 0 """ if len(tokens) != len(classes): return 0 if all([isinstance(token, cls) for token, cls in zip(tokens, classes)]): return 1 if all([isinstance(token, cls) for token, cls in zip(tokens[::-1], classes)]): return -1 return 0
94d12a325a9c29a3c212ad626330763ad401e505
370,953
from typing import List def interleave_splits(splits: List[str], percentage: int = 50) -> List[str]: """Adds a portion of `dev` (or, `test` if there's only `train` and `test`) set to the `train` set. Assumes that there are at two splits are passed ordered as (train, dev, test). Args: splits: list of strings percentage: percentage (represented as an integer value between 0 and 100) of samples to extract from `dev` and add to `train` Returns: List[str] containing mixing instructions (e.g. ['train+validation[:50%]', 'validation[-50%:]']) """ if len(splits) < 2: raise ValueError("At least two splits should be passed to this function") mixed_splits = [f"{splits[0]}+{splits[1]}[:{percentage}%]", f"{splits[1]}[-{percentage}%:]"] if len(splits) == 3: mixed_splits += [splits[2]] return mixed_splits
8f09dcf178313393708069f81a20c006164d39f8
204,775
from typing import Callable import pydantic def _validator(*names, fn: Callable) -> Callable: """Construct reusable Pydantic validator. Args: names: Names of attributes to validate. fn: Validation function (see :meth:`pydantic.validator`). Examples: >>> class Class(Base): ... x: list = None ... _check_unique = _validator("x", fn=_check_unique) >>> Class(y=[0, 0]) Traceback (most recent call last): ValidationError: ... """ return pydantic.validator(*names, allow_reuse=True)(fn)
3088f7ac07b8a9db3efd00c2e60128c1043f23b2
132,748
import math def calculate_distance(coord1, coord2, box_length=None): """ Calculate the distance between two 3D coordinates. Parameters ---------- coord1, coord2: list The atomic coordinates Returns ------- distance: float The distance between the two points. """ distance = 0 for i in range(3): dim_dist = (coord1[i] - coord2[i]) if box_length: dim_dist = dim_dist - box_length * round(dim_dist / box_length) dim_dist = dim_dist**2 distance += dim_dist distance = math.sqrt(distance) return distance
a43eb15406ea4eaf3c59bb27953b3d55f166a037
683,273
def worth_to_put_in_snippet(code_line: str) -> bool: """Check if a line of source code is worth to be in a code snippet""" if "async " in code_line or "def " in code_line: return True if code_line.strip().startswith("assert"): return True return False
d668935f1b3526968a497febfe040037b2917acf
234,118
def cons_list_to_dict(cons): """ Allows us to access constraints py type, instead of using a list index""" new_dict = {} for c in cons: new_dict[c["type"]] = c return new_dict
626364b003c2e8a0814eb14c440d4d7a2af09fa0
422,935
import inspect def rbac_methods(obj): """ Returns a list of names of the methods of `obj` to be exposed. obj: object Object to be scanned. """ methods = [] for name in dir(obj): attr = getattr(obj, name) if inspect.ismethod(attr): if hasattr(attr.__func__, '_rbac'): methods.append(name) return methods
7bba1e6ebed2cb1ba12803295a1a46307d54b61b
176,266
import copy def simplex(n, k): """ Get all ordered combinations of n integers (zero inclusive) which add up to k; the n-dimensional k simplex. """ if k == 0: z = [0]*n return [z] l = [] for p in simplex(n,k-1): for i in range(n): a = p[i]+1 ns = copy.copy(p) ns[i] = a if ns not in l: l.append(ns) return l
6469af119da49a3548fdbf6ce9e97289bfd986ba
239,214
def turn(orientation, direction): """Given an orientation on the compass and a direction ("L" or "R"), return a a new orientation after turning 90 deg in the specified direction.""" compass = ['N', 'E', 'S', 'W'] if orientation not in compass: raise ValueError('orientation must be N, E, S, or W') if direction not in ['R', 'L']: raise ValueError('direction must be R or L') i = (compass.index(orientation) + (1 if direction == 'R' else -1)) % len(compass) return compass[i]
393d80e317b959a33861c5f0b1745c730eb02745
660,827
def identity(t): """ Returns its single argument. :returns: Its argument. """ return t;
2d0420dcf41df7a45f67dde74177bcd2b69bc713
602,361
def hhsex_recode(hhsex: int): """ This is recoding the household histogram variable sex, which can only be 0 or 1, to the MDF HHSEX variable. The MDF specifies HHSEX variable as: 0 = NIU 1 = Male householder 2 = Female householder """ assert 0 <= hhsex <= 1, f'SEX must be between 0 and 1, inclusive. Found {hhsex} instead.' if hhsex == 0: return "1" return "2"
29e88166886d777a83e959f71b68e66553ef9afb
507,700
def clean_dollars(x): """ Used to clean up dollar fields of $ and commas """ if type(x) != float: return x.replace('$','').replace(',','').replace('city','')
f31efc374aa24ab051efa69e61a273d15a62d62d
622,756
def Or(s1, s2): """ Or(s1, s2) returns a new selector that selects a node if EITHER s1 or s2 select the node.""" return lambda x: s1(x) or s2(x)
ed55979944dcd6022918e521332398c20c8e9ebf
351,374
def smallestIndif(instance): """ Returns the size of the smallest indifference class of any voter of the instance. :param instance: The instance. :type instance: preflibtools.instance.preflibinstance.PreflibInstance :return: The size of the smallest indifference class of the instance. :rtype: int """ return min([len(p) for o in instance.orders for p in o if len(p) > 0] + [instance.nbAlternatives])
e10603e2f476678d5d7438a4232b062b04629071
496,562
def get_indices(pop, state, masked=None): """ Finds the population agents with a given state. Flattens the population for easier index finding. """ # use flattened pop pop = pop.flatten() if masked is None: indices = [i for i, agent in enumerate(pop) if agent.cur_state() == state] else: assert isinstance(masked, bool), 'masked input must be a boolean' indices = [i for i, agent in enumerate(pop) if agent.cur_state() == state and agent.masked == masked] return indices
547cc23a6d4a7d69d68db8faf0271b6c099c053e
374,891
def is_private_bool(script_dict): """ Returns is_private boolean value from user dictionary object """ return script_dict['entry_data']['ProfilePage'][0]['graphql']['user']['is_private']
1e8b30a38dc527dc5e2ea73e75c253d8f1a59550
708,726
def cli(ctx, user): """Get a specific user Output: a dictionary containing user information """ return ctx.gi.users.show_user(user)
e6d57ce021251b36f5243cb5bbdafde441e83fe7
360,742
def Taxes(income, MARS, tbrk_base, rate1, rate2, rate3, rate4, rate5, rate6, rate7, rate8, tbrk1, tbrk2, tbrk3, tbrk4, tbrk5, tbrk6, tbrk7): """ Taxes function returns tax amount given the progressive tax rate schedule specified by the rate* and (upper) tbrk* parameters and given income, filing status (MARS), and tax bracket base (tbrk_base). """ if tbrk_base > 0.: brk1 = max(tbrk1[MARS - 1] - tbrk_base, 0.) brk2 = max(tbrk2[MARS - 1] - tbrk_base, 0.) brk3 = max(tbrk3[MARS - 1] - tbrk_base, 0.) brk4 = max(tbrk4[MARS - 1] - tbrk_base, 0.) brk5 = max(tbrk5[MARS - 1] - tbrk_base, 0.) brk6 = max(tbrk6[MARS - 1] - tbrk_base, 0.) brk7 = max(tbrk7[MARS - 1] - tbrk_base, 0.) else: brk1 = tbrk1[MARS - 1] brk2 = tbrk2[MARS - 1] brk3 = tbrk3[MARS - 1] brk4 = tbrk4[MARS - 1] brk5 = tbrk5[MARS - 1] brk6 = tbrk6[MARS - 1] brk7 = tbrk7[MARS - 1] return (rate1 * min(income, brk1) + rate2 * min(brk2 - brk1, max(0., income - brk1)) + rate3 * min(brk3 - brk2, max(0., income - brk2)) + rate4 * min(brk4 - brk3, max(0., income - brk3)) + rate5 * min(brk5 - brk4, max(0., income - brk4)) + rate6 * min(brk6 - brk5, max(0., income - brk5)) + rate7 * min(brk7 - brk6, max(0., income - brk6)) + rate8 * max(0., income - brk7))
934658c0f335e3f4f72e9e165efc5ec41cd6599d
339,699
def decode_message(encoded, key=4): """Decoding the message. Parameters ---------- encoded : string Code point string to be converted back to original string. key : int or float Code point number needed for user to decrypt the encrypted message. Return --------- decoded : string Result string after the encoded string has been converted back to show the original message. """ decoded = '' for character in encoded: ucp = ord(character) ucp = ucp - key ucp = chr(ucp) #this is the decoder decoded += ucp return decoded
f6837110462e8cef448a25f29adcb3814220d290
598,719
def borders_flipped(array): """ Return true if any of the border elements are flipped. """ return array[0,:,:].any() or array[-1,:,:].any() or array[:,0,:].any() or array[:,-1,:].any() or array[:,:,0].any() or array[:,:,-1].any()
a8649ab88ee8e84b44f7e0f20dc4f380dd4b0dde
585,730
def get_percentages(MAT, INS, DEL, numref): """Calculates percentages given number of reference segments.""" MAT = float(MAT) / float(numref) * 100.0 INS = float(INS) / float(numref) * 100.0 DEL = float(DEL) / float(numref) * 100.0 return [MAT, INS, DEL]
02ed422923194621a4d043b0f499978725ffcfae
360,290
from typing import Dict def save_epoch_logs(epoch_logs: Dict, loss: float, score: Dict, stage: str): """ Function to improve readability and avoid code repetition in the training/validation loop within the Trainer's fit method Parameters ---------- epoch_logs: Dict Dict containing the epoch logs loss: float loss value score: Dict Dictionary where the keys are the metric names and the values are the corresponding values stage: str one of 'train' or 'val' """ epoch_logs["_".join([stage, "loss"])] = loss if score is not None: for k, v in score.items(): log_k = "_".join([stage, k]) epoch_logs[log_k] = v return epoch_logs
e4dd9d3819ce8e7f3464d21992aaf9029b6e2eb4
123,950
def next_csv_element(csv_str, delimiter=","): """Helper function for csv_to_list Parameters ---------- csv_str : str A comma-separated value string (with double quotes around values containing the delimiter) delimiter : str The str separator between values Returns ---------- str, str Return a tuple, the next value and remainder of csv_str """ if csv_str.startswith('"'): split = csv_str[1:].find('"') + 1 return csv_str[1:split], csv_str[split + 2 :] next_delimiter = csv_str.find(delimiter) return csv_str[:next_delimiter], csv_str[next_delimiter + 1 :]
ea3203e74db6ffcc624eddc6458ec8f70d79f838
266,421
def sfbool(string): """ Parse a string into a boolean value The string "true" regradless of case parses to True. Everything else parses to False. If anything other than a string is passed, an exception (ValueError) is raised. """ return string.lower() == 'true'
3ff42d73e5687d4ef81eb25d4ebc4211a57ac073
231,632
def interface_to_ip(interface): """ Gets the IPv4 address from a `net_if_addrs` interface record. The record is passed as a `snic` `namedtuple`. This function locates the IPv4 one and returns it. """ for record in interface: if record.family == 2: # AF_INET return record.address return None
1ee2ae312c892e11e7abcabcf7846651030215e2
673,972
def _sample_discrete__python(pmf, rand): """Returns a sample from a discrete distribution. Note: This version has no bells or whistles. Parameters ---------- pmf : list of floats A list of floats representing the probability mass function. The events will be the indices of the list. The floats should represent probabilities (and not log probabilities). rand : float The sample is drawn using the passed number. Returns ------- s : int The index of the sampled event. """ total = 0 for i, prob in enumerate(pmf): total += prob if rand < total: return i
3c1d9482c0d39f205477f9f3a3733599fb3cdd80
570,351
def lagged_values(data, target_var, features_list, window_size): """ Cacluate lagged values of target variable and store results in new columns Parameters ---------- data: data frame It has columns location, date, and a column with the response variable to forecast. This data frame needs to be sorted by location and date columns in ascending order. target_var: string Name of the column in the data frame with the forecast target variable. features_list: list of strings Running list of feature column names window_size: integer Time window to calculate lagged values for Returns ------- data: data frame Original data frame with additional columns for lagged values. features_list: list of strings Running list of feature column names """ for lag in range(1, window_size + 1): data['lag_' + str(lag)] = data.groupby('location')[target_var].shift(lag) features_list.append('lag_' + str(lag)) return data, features_list
6caccc1af9c5ab33d58ee3804af210082ba3eb5c
544,247
from typing import Counter def stats_from_aligned_read(read, references, lengths): """Create summary information for an aligned read :param read: :class:`pysam.AlignedSegment` object Get the statistics from sam/bam file, such as Insertion, Deletion, Subsititution and Gap-compressed Identity by the same definition from minimap2 developer Heng Li's blog: http://lh3.github.io/2018/11/25/on-the-definition-of-sequence-identity # I get value from https://pysam.readthedocs.io/en/latest/api.html#pysam.AlignedSegment.cigar M BAM_CMATCH 0 match I BAM_CINS 1 insertion relative to reference D BAM_CDEL 2 deletion relative to reference N BAM_CREF_SKIP 3 skipped region from the reference S BAM_CSOFT_CLIP 4 soft clip, not aligned but still in sam file H BAM_CHARD_CLIP 5 hard clip, not aligned and not in sam file P BAM_CPAD 6 padding (silent deletion from padded reference) = BAM_CEQUAL 7 I don't know exactly what it is X BAM_CDIFF 8 I don't know exactly what it is B BAM_CBACK 9 I don't know exactly what it is """ tags = dict(read.tags) try: tags.get('NM') except: raise IOError("Read is missing required 'NM' tag. Try running 'samtools fillmd -S - ref.fa'.") counts, _ = read.get_cigar_stats() match = counts[0] ins = counts[1] cigar = read.cigartuples counter = Counter(elem[0] for elem in cigar) ins2 = counter [1] delt = counts[2] delt2 = counter [2] # NM is edit distance: NM = INS + DEL + SUB tagsNM = tags['NM'] sub = tags['NM'] - ins - delt block_length = match +tags['NM'] blast_iden = 100*float(match)/(block_length) gap_compress = ins2 +delt2+sub gap_compress_iden = 100*float(match)/(block_length-gap_compress) #coverage = 100*float(read.query_alignment_length) / read_length direction = '-' if read.is_reverse else '+' results = { "ref": references[read.reference_id], "readID": read.qname, #"coverage": coverage, "read_len": read.infer_read_length(), "length": block_length, "blast_iden": blast_iden, "gap_compress_iden":gap_compress_iden, "qstart": read.query_alignment_start, "qend": read.query_alignment_end, "direction": direction, "rstart": read.reference_start, "rend": read.reference_end, "match": match, "ins": ins, "ins2": ins2, "del": delt, "del2": delt2, "sub": sub, "gap_compress":gap_compress, #"ref_coverage": 100*float(read.reference_length) / lengths[read.reference_id], } return results
aea001541bf133f5a8fa5f183eee09bfb6fb5ed0
513,575
import re def get_text(file): """Read text from a file, normalizing whitespace and stripping HTML markup.""" _text = open(file).read() _text = re.sub(r'<.*?>', ' ', _text) _text = re.sub('\s+', ' ', _text) return _text
9cc1bb58b2e29163d99d8012dd1206b9229600fa
642,975
def test_trainable_parameters_changed(trainer): """Performs a training step and verifies that at least one parameter in every trainable layer has changed.""" print("At least one parameter changed in the trainable layers", end="") passed, msg = True, "" trainer.fit(epochs=1, max_steps=1, max_eval_steps=0) for name, new_param in trainer.model.named_parameters(): if new_param.requires_grad: old_param = trainer._saved_state["model"][name] if not (new_param.data != old_param).any(): msg += " expected changes in: %s\n" % name passed = False return passed, msg
29a0b0ffab55b62e9cb5ff5d4b29f1b655e99ad9
35,677