content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import math def get_normal(line, length=0): """Gets the normal of a line (flips it 90 degrees). Line is a pair of tuples -- ((x, y), (x, y)) -- representing the two ends of the line. If length is set to a value > 0, the normal is also set to that fixed length. """ if type(line) not in [list, tuple]: line = ( line[0][0], line[1][0] ) if length <= 0: return ( (line[1][0], line[0][1]), (line[0][0], line[1][1]) ) half_length = length / 2 center = ( line[0][0] + ((line[1][0] - line[0][0]) / 2), line[0][1] + ((line[1][1] - line[0][1]) / 2) ) diff = ( line[1][0] - line[0][0], line[1][1] - line[0][1] ) angle = math.degrees(math.atan2(diff[0], diff[1])) return ( (center[0] - half_length * math.sin(angle), center[1] - half_length * math.cos(angle)), (center[0] + half_length * math.sin(angle), center[1] + half_length * math.cos(angle)) )
e9b00c197e7a6f92a328186f7a529ab245f01bab
554,201
def to_letters(number_message: list) -> str: """ Converts list of numbers into uppercase string. Numbers in list should be in range [11, 36] for proper ASCII decoding. [11, 36] + 54 = [65, 90], i.e. A-Z """ return "".join([chr(number + 54) for number in number_message])
377f5292d22fa868add7b8abb8e21c146b38c7a7
304,187
import re def _parse_trace_span(header): """Given an X_CLOUD_TRACE header, extract the trace and span ids. Args: header (str): the string extracted from the X_CLOUD_TRACE header Returns: Tuple[Optional[dict], Optional[str]]: The trace_id and span_id extracted from the header Each field will be None if not found. """ trace_id = None span_id = None if header: try: split_header = header.split("/", 1) trace_id = split_header[0] header_suffix = split_header[1] # the span is the set of alphanumeric characters after the / span_id = re.findall(r"^\w+", header_suffix)[0] except IndexError: pass return trace_id, span_id
b771c8190f91e1de7d2f1a304b5bf974fa3882ad
52,804
import copy def db_entry_trim_empty_fields(entry): """ Remove empty fields from an internal-format entry dict """ entry_trim = copy.deepcopy(entry) # Make a copy to modify as needed for field in [ 'url', 'title', 'extended' ]: if field in entry: if (entry[field] is None) or \ (type(entry[field]) is str and len(entry[field]) == 0): del entry_trim[field] return entry_trim
d5b31c823f4e8091872f64445ab603bcbf6a2bef
707,102
import torch def normalize( x: torch.Tensor, Mean: torch.Tensor, Range: torch.Tensor ) -> (torch.Tensor): """ Compute standardized inputs/outputs (internal). Parameters ---------- x: torch.Tensor input/output Mean: torch.Tensor mean values to be subtracted. Range: torch.Tensor interval range to be divided by. Returns ------- out : torch.Tensor standardized inputs/outputs """ # if shape == if x.ndim == 2: batch_size = x.size(0) x_size = x.size(1) Mean_ = Mean.unsqueeze(0).expand(batch_size, x_size) Range_ = Range.unsqueeze(0).expand(batch_size, x_size) elif x.ndim == 1: Mean_ = Mean Range_ = Range else: raise ValueError( "Input tensor must of shape (n_features) or (n_batch,n_features)." ) return x.sub(Mean_).div(Range_)
fc7abc3e35f1aca5c7598f7331ee04aa32c62e31
378,261
from typing import List def boolean_array_to_integer(values: List[bool]) -> int: """Converts a boolean array to integer by interpretting it in binary with LSB 0 bit numbering. Args: values (List[bool]): The list of booleans to convert. Returns: int: The interpretted integer. """ return sum(v << i for i, v in enumerate(values))
5630199d6932edc6c7d215e93a58a94a4d9fdb06
615,455
def coref_rule_applies(lex, constraints, mark, anaphor=None): """ Check whether a markable definition from a coref rule applies to this markable :param lex: the LexData object with gazetteer information and model settings :param constraints: the constraints defining the relevant Markable :param mark: the Markable object to check constraints against :param anaphor: if this is an antecedent check, the anaphor is passed for $1-style constraint checks :return: bool: True if 'mark' fits all constraints, False if any of them fail """ for constraint in constraints: if not constraint.match(mark,lex,anaphor): return False return True
ee8d926b82332062471ddd13b01b235f7197936c
475,816
import torch def rotate(vector, angle): """ Rotate a vector around the y-axis """ sinA, cosA = torch.sin(angle), torch.cos(angle) xvals = cosA * vector[..., 0] + sinA * vector[..., 2] yvals = vector[..., 1] zvals = -sinA * vector[..., 0] + cosA * vector[..., 2] return torch.stack([xvals, yvals, zvals], dim=-1)
91b9005daa1582386b4d204d571541d43e0a23ca
487,702
def query_release(release): """ Build formatted query string for ICESat-2 release Arguments --------- release: ICESat-2 data release to query Returns ------- query_params: formatted string for CMR queries """ if release is None: return '' #-- maximum length of version in CMR queries desired_pad_length = 3 if len(str(release)) > desired_pad_length: raise RuntimeError('Release string too long: "{0}"'.format(release)) #-- Strip off any leading zeros release = str(release).lstrip('0') query_params = '' while len(release) <= desired_pad_length: padded_release = release.zfill(desired_pad_length) query_params += '&version={0}'.format(padded_release) desired_pad_length -= 1 return query_params
28b91076a5a01beeb7c85c2494b34a8f856050c6
440,712
def fgsm_attack(delta, epsilon, data_grad): """ This function modifies a delta by epsilon * sign of data_grad Params: delta --> A torch.tensor of the adversarial patch epsilon --> The amount to modify delta (float) data_grad --> The current gradient direction (torch.tensor) Returns: perturbed_delta --> The new delta (torch.tensor) """ # Collect the element-wise sign of the data gradient sign_data_grad = data_grad.sign() # Create the perturbed delta by adjusting each pixel of the input delta perturbed_delta = delta + epsilon*sign_data_grad # Return the perturbed delta return perturbed_delta
05a02ea8025343af77093d80a0ebc7ea043bab8a
396,726
def find_support(pattern, supports): """ This method considers support of a pattern as the minimum support among its items patterns: List. list of items in pattern. supports: Dict """ min_support = None for item in pattern: if min_support is None: min_support = supports[item] else: if supports[item] < min_support: min_support = supports[item] return min_support
a7cb42a6b816bc6cb2f2e527f08e319c0adc5317
55,684
def isprivilege(value): """Checks value for valid privilege level Args: value (str, int): Checks if value is a valid user privilege Returns: True if the value is valid, otherwise False """ try: value = int(value) return 0 <= value < 16 except ValueError: return False
a6e5c1cbc1cc6a03a71c2a51646c50b7684881ab
185,582
from typing import Optional def parse_key(headers) -> Optional[str]: """ Parses the header for an API key, and returns it if found. The authorization header must be in the following format: ``Token <api key>``. :param headers: A dictionary of request headers :return: If present, the key parsed from header """ auth = headers.get('Authorization') if auth: parts = auth.split() if len(parts) == 2 and parts[0] == 'Token': return parts[1] return None
cda11fd0afa08fb3a85ed457b913f4e32d229c1f
629,181
import re def file_sort_key(file): """ Given a directory with... "test1.txt, test2.txt, test10.txt" The os.listdir() will return... "test1.txt, test10.txt, test2.txt" To make %n work, we need it to return the files in a logical/natural form. We use this key to achieve that, while also using the sorted() function. Parameters: unsorted_files : list A list of unsorted files """ key = re.split(r'(\d+)', file) key[1::2] = map(int, key[1::2]) return key
241310b6409bea6b5200130816daa05ce5da53b4
361,869
from typing import Iterable def dot_product(u: Iterable[float], v: Iterable[float]) -> float: """Returns the dot product of vectors u and v.""" return sum(i * j for i, j in zip(u, v))
4c2a81b0da92eadb557d63e2859165704fa937cb
369,166
import torch def apply_gains_rgb(rgb, red_gains, blue_gains): """Applies white balance gains to a batch of RGB images.""" red_gains = red_gains.squeeze() blue_gains = blue_gains.squeeze() red_gains = red_gains.unsqueeze(0) if len(red_gains.shape) == 0 else red_gains blue_gains = blue_gains.unsqueeze(0) if len(blue_gains.shape) == 0 else blue_gains rgb = rgb.permute(0, 2, 3, 1) # Permute the image tensor to BxHxWxC format from BxCxHxW format green_gains = torch.ones_like(red_gains) gains = torch.stack([red_gains, green_gains, blue_gains], dim=-1) gains = gains[:, None, None, :] outs = rgb * gains outs = outs.permute(0, 3, 1, 2) # Re-Permute the tensor back to BxCxHxW format return outs
c8181099715462f20464285196aad3ea0d82f422
302,708
def item_can_circulate(item_pid): """Return True if the item can circulate, False otherwise.""" return True
8d536cc5f71bda38b88f306a134bf96710bdec75
441,965
def get_rst_header(header_name): """Get rst formatted code with a header. Args: header_name: name of header. Returns: Formatted rst code with the header. """ rst_output = "" rst_output += header_name + "\n" rst_output += "^" * len(header_name) + "\n" rst_output += "\n" return rst_output
c38630a5c2d71116a75fc001233c0366cbc19e03
193,102
def encode_db_connstr(name, # pylint: disable=too-many-arguments host='127.0.0.1', port=5432, user='postgres', password='password', scheme='postgresql'): """ builds a database connection string """ conn_str = ( str(scheme) + '://' + str(user) + ':' + str(password) + '@' + str(host) + ':' + str(port) + '/' + str(name) ) return conn_str
5db05ad434f429714183da8d3d1c1ed058f1bff7
661,549
def get_all_cfn_resources_by_type(resource_array: list, resource_type: str) -> list: """ Given a list of cloudformation stack resources, filters the resources by the specified type Parameters: resource_array (list): an Array of Cloudformation Stack Resources resource_type (string): the Name of the Cloudformation Resource type to filter for - example: AWS::EC2::Instance Returns: An array of dict - containing the filtered Cloudformation resources """ result = [] for resource in resource_array: if resource['ResourceType'] == resource_type: result.append(resource) return result
14eaa760fe0f4dd8de90ef145115f19bc659ced3
66,485
from pathlib import Path import json def read_jupyter_as_json(filepath: Path) -> Path: """ Read in rendered notebook-- read in the JSON representation that is 'under the hood' :param filepath: path to jupyter notebook. """ with open(filepath, "r") as fout: contents = fout.read() return json.loads(contents)
d91344b1bddcd0e1078effe6dd7947f7e04ea6af
698,964
def _replace_and_save(md, fns, replacing_fn): """ Replaces functions in `fns` list in `md` module with `replacing_fn`. Returns the dictionary with functions that were replaced. """ saved = dict() for check_f in fns: try: fn = getattr(md, check_f) setattr(md, check_f, replacing_fn) saved[check_f] = fn except: pass return saved
a57fdca4ffad6a3f5d4567e47e14d40cb59c7ea7
629,772
import pathlib import gzip def read_file(file_path): """ Wrapper to read either gzipped or ordinary text file input. """ if not isinstance(file_path, pathlib.Path): file_path = pathlib.Path(file_path) if file_path.suffix == '.gz': return gzip.open(file_path, 'rt') else: return open(file_path, 'r')
313fe7f8d57f40d60620d17ace0839bceb5e79a4
269,521
import collections import itertools def partition_dict(items, key): """ Given an ordered dictionary of items and a key in that dict, return an ordered dict of items before, the keyed item, and an ordered dict of items after. >>> od = collections.OrderedDict(zip(range(5), 'abcde')) >>> before, item, after = partition_dict(od, 3) >>> before OrderedDict([(0, 'a'), (1, 'b'), (2, 'c')]) >>> item 'd' >>> after OrderedDict([(4, 'e')]) Like string.partition, if the key is not found in the items, the before will contain all items, item will be None, and after will be an empty iterable. >>> before, item, after = partition_dict(od, -1) >>> before OrderedDict([(0, 'a'), ..., (4, 'e')]) >>> item >>> list(after) [] """ def unmatched(pair): test_key, item = pair return test_key != key items_iter = iter(items.items()) item = items.get(key) left = collections.OrderedDict(itertools.takewhile(unmatched, items_iter)) right = collections.OrderedDict(items_iter) return left, item, right
af49884bda5f7de8412e64c0ce0f7d29a0ebc9f2
390,571
def is_sequence(x): """Algorithmically determines if x is a sequence.""" try: len(x) x[0:0] return True except: return False
175e9ffbda72559796386a2e44479ac409bfe280
642,206
import threading def communicateWithTimeout(subProc, timeout=0, input=None): """Communicates with subProc until its completion or timeout seconds, whichever comes first.""" if timeout > 0: to = threading.Timer(timeout, subProc.kill) try: to.start() return subProc.communicate(input=input) finally: to.cancel() else: return subProc.communicate(input=input)
a4f807fc4d6bdd03a5a1d23e1481a9070693f3c7
609,655
def parse_instruction(instruction): """Parse instruction to modes and opcode.""" opcode = instruction % 100 result_modes = [] modes = instruction // 100 for _ in range(3): result_modes.append(modes % 10) modes = modes // 10 return result_modes, opcode
116d746ef1fe3e01afe3edecfc1aeb61c8f3b6f3
224,350
def get_file_content(filepath, return_type='list'): """Bring content of file at `filepath` to memory. Args: filepath: file to read content of return_type: data structure to represent content in """ with open(filepath, 'r') as f: if return_type == 'list': content = [line.rstrip('\n') for line in f.readlines()] else: content = f.read() return content
af2474e1c4ceaff202d6394c26f1f74bec27f4dd
284,480
def min_nones(*args): """ min(), ignoring nones """ ret = min(v for v in args if v is not None) return ret
d06a399609baae309742863e0e26b15cbe3f87fd
418,566
def add_dask_options(parser): """ Adds 'cores' and 'memory' option to a argparse parser. Defaults the value to 4 cores and 8 memory. :param parser: ArgumentParser object :return: parser with added options. """ parser.add_argument( '--cores', type=int, default=4, help='Number of CPU cores to use for parallelization. Default: 4' ) parser.add_argument( '--memory', type=int, default=8, help='Amount of memory to allocate for parallelization: Default: 8 GB' ) return parser
30f3509b97c86fd6dff27e9e607c3ac55d57e84a
290,376
from datetime import datetime import pytz def _ensure_timezone(date: datetime): """ Some datetime objects are timezone naive and these cannot be compared to timezone aware datetime objects. This function sets a default timezone of UTC for any object without a timezone Args: date (datetime): The date object to add a timezone to Returns: datetime: A timezone aware datetime object """ if date.tzinfo is None: return date.replace(tzinfo=pytz.UTC) return date
df9874104dc1a5e63ddec329e9fc090d47a1b618
566,288
def megapipe_query_sql(ra, dec, size): """ Return SQL query command of CFHT megapipe Parameters: ra (float): in degree dec (float): in degree size (float): in degree Returns: url (str): The query URL, need to be opened by `wget` or `curl`. """ return ("https://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/tap/sync?REQUEST=doQuery&LANG=ADQL&FORMAT=CSV&QUERY=" "SELECT * " "FROM caom2.Observation AS o JOIN caom2.Plane AS p ON o.obsID=p.obsID " "WHERE INTERSECTS(p.position_bounds, CIRCLE('ICRS', " + str(ra) + ', ' + str(dec) + ', ' + str(size) + ')) = 1 ' "AND p.calibrationLevel >= 1 AND o.collection='CFHTMEGAPIPE' ") #"AND o.observationID LIKE 'MegaPipe%'")
67209b446736fbfb858acf69e31153fcca92e6c8
278,846
def hex2rgb(hexstring, digits=2): """Converts a hexstring color to a rgb tuple. Example: #ff0000 -> (1.0, 0.0, 0.0) digits is an integer number telling how many characters should be interpreted for each component in the hexstring. """ if isinstance(hexstring, (tuple, list)): return hexstring top = float(int(digits * 'f', 16)) r = int(hexstring[1:digits + 1], 16) g = int(hexstring[digits + 1:digits * 2 + 1], 16) b = int(hexstring[digits * 2 + 1:digits * 3 + 1], 16) return r / top, g / top, b / top
78ff771461ea8483579736f8c0fac4c127cb9fd9
587,041
def child_structure_dfs(sampler, seen=None): """Return the structure of a composed sampler using a depth-first search on its children. Args: sampler (:obj:`.Sampler`): :class:`.Structured` or composed sampler with at least one structured child. seen (set, optional, default=False): IDs of already checked child samplers. Returns: :class:`~collections.namedtuple`: A named tuple of the form `Structure(nodelist, edgelist, adjacency)`, where the 3-tuple values are the :attr:`.Structured.nodelist`, :attr:`.Structured.edgelist` and :attr:`.Structured.adjacency` attributes of the first structured sampler found. Raises: ValueError: If no structured sampler is found. Examples: >>> sampler = dimod.TrackingComposite( ... dimod.StructureComposite( ... dimod.ExactSolver(), [0, 1], [(0, 1)])) >>> print(dimod.child_structure_dfs(sampler).nodelist) [0, 1] """ seen = set() if seen is None else seen if sampler not in seen: try: return sampler.structure except AttributeError: # hasattr just tries to access anyway... pass seen.add(sampler) for child in getattr(sampler, 'children', ()): # getattr handles samplers if child in seen: continue try: return child_structure_dfs(child, seen=seen) except ValueError: # tree has no child samplers pass raise ValueError("no structured sampler found")
9cb6f997e12a93230ed18bf1121493f2365adf24
32,712
import typing def get_path_from_list(to_parse: typing.List[str], path_prefix: str) -> str: """Parse a path from a list of path parts.""" ret = path_prefix for i in to_parse: ret += f"/{i}" return ret.lstrip("/").rstrip("/")
90ccb309751d3bff6d816422b50e23bcf799927b
630,938
def map_calculated_scores_to_user(predictions, user): """ This function replaces the previous scores (only 0's in production) with the computed scores) :param predictions: the list of prediction-scores :param user: the user with its (original) scores :return: the user with its new scores """ for row in predictions: user.scores[row[0]] = row[1] return user
23a1dcc077cab2f5f27750c660abbab09bf0ff4c
697,546
def decodeClass(cls): """ By default webdataset converts str and int values into binary values. To obtain ints for processing, they need to be decoded. Commonly pytorch expects an int class, so here is a simple decoder that can be used along with the BytToPil function like: WebDataSet(FileName).to_tuple('img','cls').map_tuple(ByteToPil,decodeClass).map_tuple(image_cropping, lambda x:x).shuffle(10000) """ return int(cls)
4010c9e747655730378c625ff728cb16c326f857
233,795
from typing import Optional from typing import Dict def merge_dict(dict1: Optional[Dict], dict2: Optional[Dict]) -> Dict: """Merge two dictionaries into new one.""" new_dict = {} if dict1: new_dict.update(dict1) if dict2: new_dict.update(dict2) return new_dict
65ee891806b0bc8d2d710908475047112839b100
218,836
import re def parse_shutdown_result(result): """Parse the shutdown result string and return the strings (grace left, deadline left, queries registered, queries executing).""" assert len(result.data) == 1 summary = result.data[0] match = re.match(r'startup grace period left: ([0-9ms]*), deadline left: ([0-9ms]*), ' + r'queries registered on coordinator: ([0-9]*), queries executing: ([0-9]*), ' + r'fragment instances: [0-9]*', summary) assert match is not None, summary return match.groups()
fb3846fb1372e1d63721786fec1b05bea5a4b223
126,165
import json def read_json(file: str) -> dict: """ Read JSON file :param file :type file: str :raises IOError, JSONDecodeError :returns dict """ hnd = open(file, 'r') data = hnd.read() hnd.close() return json.loads(data)
eee360a22bd19dc19adcb02f21350917b9983849
566,692
import yaml import re def create_kubeconfig_for_ssh_tunnel(kubeconfig_file, kubeconfig_target_file): """ Creates a kubeconfig in which the Server URL is modified to use a locally set up SSH tunnel. (using 127.0.0.1 as an address) Returns a tuple consisting of: - the original IP/Servername of the K8s API - the original Port of the K8s API """ with open (kubeconfig_file, "r") as f: kubeconfig = yaml.load(f.read(), Loader=yaml.FullLoader) original_server_address = kubeconfig["clusters"][0]["cluster"]["server"] address_pattern = re.compile('https://([^:]*):([0-9]+)') match = address_pattern.match(original_server_address) if not match: print('Error: No API address found in kubeconfig') exit(1) original_api_hostname = match.group(1) original_api_port = match.group(2) kubeconfig["clusters"][0]["cluster"]["server"] = f"https://127.0.0.1:{original_api_port}" with open (kubeconfig_target_file, "w") as f: f.write(yaml.dump(kubeconfig, default_flow_style=False)) f.close() return (original_api_hostname, original_api_port)
39c85681486abda0008a040ad13a37032fc182b5
702,611
def lca(nodes): """Returns the Least Common Ancestor (LCA) of a list of nodes""" if len(nodes) == 1: return nodes[0] elif len(nodes) > 2: return lca([lca(nodes[:2])] + nodes[2:]) elif len(nodes) == 2: node1, node2 = nodes set1 = set([node1]) set2 = set([node2]) while True: if node1 in set2: return node1 if node2 in set1: return node2 if node1.parent is not None: node1 = node1.parent if node2.parent is not None: node2 = node2.parent set1.add(node1) set2.add(node2) else: raise Exception("No nodes given")
55612c3eff74d595962b5093245daec0f2d17bde
292,847
def interaction2mols(interaction): """provide 2 molecules from interaction Args: interaction: e.g. H2-H2 Returns: mol1, mol2 Examples: >>> from exojax.spec.defcia import interaction2mols >>> print(interaction2mols("H2-CH4 (equilibrium)")) >>> ('H2', 'CH4') >>> print(interaction2mols("CH4-He")) >>> ('CH4', 'He') """ mm=interaction.split(" ")[0] mm=mm.split("-") return mm[0],mm[1]
b8aeb9281c3635f90ab3fb40fa6bf32d6b0ac5fe
355,513
from typing import Any def set_attribute(obj: Any, attr: str, new_value: Any) -> bool: """ Set an attribute of an object to a specific value, if it wasn't that already. Return True if the attribute was changed and False otherwise. """ if not hasattr(obj, attr): setattr(obj, attr, new_value) return True if new_value != getattr(obj, attr): setattr(obj, attr, new_value) return True return False
672abacc8cfbeeceb3c8e2e8df08bc7984e6ad69
123,613
def get_contain_flag(correct_entity_group, source_dependence): """determine whether correct_entity_group is included in source_dependence""" contain_flag = True for inline_entity in correct_entity_group: if inline_entity not in source_dependence: contain_flag = False break return contain_flag
3046ebac5f41053c0210fed297c19b898da7d94f
141,726
def is_pandigital(n): """Returns True if n is a pandigital integer, otherwise False An m-digit number is pandigital if it makes use of all digits 1 to m exactly once. Example: 2143 is a 4-digit pandigital number. Args: n (int): The number to determine if it's pandigital Returns: True or False """ digits = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0'] if len(str(n)) > 10: return False for i in range(1, len(str(n)) + 1): if digits[i - 1] not in str(n): return False return True
36ac42f41cac4cdcccd1a4c42df85031c63909de
467,892
import torch def linspace(start: torch.Tensor, stop: torch.Tensor, num_samples: int) -> torch.Tensor: """Generalization of linspace to arbitrary tensors. Args: start (torch.Tensor): Minimum 1D tensor. Same length as stop. stop (torch.Tensor): Maximum 1D tensor. Same length as start. num_samples (int): The number of samples to take from the linear range. Returns: torch.Tensor: (D, num_samples) tensor of linearly interpolated samples. """ diff = stop - start samples = torch.linspace(0, 1, num_samples) return start.unsqueeze(-1) + samples.unsqueeze(0) * diff.unsqueeze(-1)
44155176573f276937b10292cc11f45de1d0d277
693,092
def number_format(num, places=0): """Format a number with grouped thousands and given decimal places""" places = max(0,places) tmp = "%.*f" % (places, num) point = tmp.find(".") integer = (point == -1) and tmp or tmp[:point] decimal = (point != -1) and tmp[point:] or "" count = 0 formatted = [] for i in range(len(integer), 0, -1): count += 1 formatted.append(integer[i - 1]) if count % 3 == 0 and i - 1: formatted.append(",") integer = "".join(formatted[::-1]) return integer+decimal
6d6f2412fa94857f77043a30ec2a14f809c5f039
682,991
import smtplib def prep_smtp_instance(kwargs): """ Prepare a smtplib.SMTP/SMTP_SSL instance. :param is_ssl: True if server uses SSL, False if TLS only. :type is_ssl: bool :param server: SMTP email server. :type server: str :param port: Port to use. :type port: int """ args = kwargs['server'], kwargs['port'] smt = smtplib.SMTP_SSL(*args) if kwargs['is_ssl'] else smtplib.SMTP(*args) return smt
a1a2ff283b18f0a816f3844c1990fbc12f55e80a
193,270
def get_channel_type(channel_name, sigtypes_filename): """Returns type of channel Arguments --------- channel_name: name of channel (e.g. "II", "V", etc.) sigtypes_filename: file mapping channel names to channel types Returns ------- Type of channel (e.g. "ECG", "BP", "PLETH", "Resp") """ channel_types_dict = {} with open(sigtypes_filename, "r") as f: for line in f: splitted_line = line.split("\t") channel = splitted_line[-1].rstrip() channel_type = splitted_line[0] channel_types_dict[channel] = channel_type if channel_name in channel_types_dict.keys(): return channel_types_dict[channel_name] raise Exception("Unknown channel name")
bd41f891388b9ffd1a25875e853147cfb87d3f7f
329,644
def monthAbbrevFormat(month: int) -> str: """Formats a (zero-based) month number as an abbreviated month name, according to the current locale. For example: monthAbbrevFormat(0) -> "Jan".""" months = [ "Jan", "Feb", "Ma", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ] return months[month % 12]
62490e073a8f2aca2b97c31dc9b1ae7e05001333
200,478
def project(d, ks): """Return the subdict of `d` containing only the keys in `ks`.""" return dict((k, d[k]) for k in ks)
c3fe6fa08375eb1c4ad0e0ed909d432643b11120
121,418
import random def process_value(value, p, k): """ :param value: The value to apply the differentially private scheme to. :param p: The probability of returning a random value instead of the true one :param k: The probability of returning 1 when generating a random value : returns: A new, differentially private value """ rv = random.random() if rv <= p: #we return a random value rv = random.random() if rv <= k: return 0 return 1 else: return value
08aa4dba98b2e822435db1d8626f7e41d6886702
452,667
def _gen_constraint(name, op): """ Generate a comparison function from a comparison operator. """ return '''\ class Constraint%(name)s(Constraint): """ Constraint operator %(op)s """ def __init__(self, a, b): self.a, self.b = a,b def __bool__(self): return float(self.a) %(op)s float(self.b) __nonzero__ = __bool__ def __str__(self): return "(%%s %(op)s %%s)"%%(self.a,self.b) ''' % dict(name=name, op=op)
8b41501adbaeda814a47d4d2d1e0d0d6b65657a7
212,798
def Convert2mAhg(c, mass=1.0): """Converts capacity in Ah to capacity in mAh/g. Args: c (float or numpy array): capacity in mA. mass (float): mass in mg. Returns: float: 1000000 * c / mass """ return 1_000_000 * c / mass
c23f68377dc549a906ac54d8829ebf54f4ebea72
461,461
def _get_template_name(template_reference): """Get current template name.""" return template_reference._TemplateReference__context.name
93f286978c16a34cf98880c90bbed1ab8fa98f36
233,462
def odd(n, add=1): """Return next odd integer to `n`. Can be used to construt odd smoothing kernels in :func:`smooth`. Parameters ---------- n : int add : int number to add if `n` is even, +1 or -1 """ assert add in [-1, 1], "add must be -1 or 1" return n if n % 2 == 1 else n + add
d3a4be5fc092eb4681da6a3ec062c8b8743c66c6
508,306
def filter_df_on_case_length(df, case_id_glue="case:concept:name", min_trace_length=3, max_trace_length=50): """ Filter a dataframe keeping only the cases that have the specified number of events Parameters ----------- df Dataframe case_id_glue Case ID column in the CSV min_trace_length Minimum allowed trace length max_trace_length Maximum allowed trace length """ df = df.groupby(case_id_glue).filter(lambda x: (len(x)>= min_trace_length and len(x)<=max_trace_length)) return df
a3eadc9534b41c62f895def2611a68157abfe091
44,229
def setup_parser(parser): """ Setup argument parser. :param parser: argument parser :return: configured parser """ parser.add_argument('--master', required=False, default="local[*]", help="Spark master URL defaults to 'local'.") parser.add_argument('--driver-cores', required=False, type=int, default="3", help="Number of cores.") parser.add_argument('--driver-memory', required=False, default="8g", help="Memory limit of Spark driver.") parser.add_argument('--executor-cores', required=False, type=int, default="4", help="Number of cores for each executor.") parser.add_argument('--executor-memory', required=False, default="8g", help="Memory limit of Spark executor.") parser.add_argument('--jars', required=False, default="", help="Additional JARs to be included in driver and executors(comma-seperated).") return parser
b4d37c6270f70e5a56475896a20e32796650d232
341,969
def bytes_to_msg(seq, standard="utf-8"): """Decode bytes to text.""" return seq.decode(standard)
5664d97b3fec5d119daa2171bcb431ca5a4b5f33
706,274
import json def load_attribute_map_from_json(file_path): """Load an attribute dictionary stored in a JSON file. Parameters ---------- file_path : str Path to the attribute map file. Returns ------- dict Dictionary whose keys and values are provided by the object stored in the given JSON file. Keys are attributes found in the sample or prep metadata files; values are the names of attributes in a Sample, Subject or Preparation object. """ with open(file_path) as file: json_str = file.read() attribute_map = json.loads(json_str) return attribute_map
bb9e7e5477b17576c1177da02bdcd9cf69c41039
510,828
def interpret_version(cur_version, new_version): """ This function interprets the command to set a new version. Parameters: - cur_version: str, the current version, the one to alter. - new_version: str, the command to set a new version. A command can be an actual new version string, or one of the keywords: - "maj": to increment the major number of the current version, - "min": to increment the minor number of the current version, - "rev": to increment the revision number of the current version. Returns: The new version as it should be saved in version.py """ if new_version not in ("maj", "min", "rev"): return new_version cache = cur_version.split(".") if not cache: return "0.0.1" # normalize the size if len(cache) == 1: cache.extend(["0", "0"]) elif len(cache) == 2: cache.append("0") # interpret 'maj', 'min' and 'rev' if new_version == "maj": number = int(cache[0]) + 1 cache = [str(number), "0", "0"] elif new_version == "min": number = int(cache[1]) + 1 cache = [cache[0], str(number), "0"] elif new_version == "rev": number = int(cache[2]) + 1 cache = [cache[0], cache[1], str(number)] version = ".".join(cache) return version
de401771de80bd8c113054b84284e4416906fbc6
416,961
from typing import Mapping from typing import Sequence from typing import Hashable def exclude_keys(dictionary: Mapping, keys: Sequence[Hashable]) -> dict: """ Removes all key-value pairs, matching the provided keys from the dictionary and returns a new one. Does not crash when non-existant keys are provided. Arguments: dictionary: A mapping-type from which keys should be removed. keys: A sequence of hashable keys to be excluded from the new dictionary. Returns: A new dictionary minus the specified key-value pairs. """ return {k: v for k, v in dictionary.items() if k not in keys}
ed405e96253757acb9c582970e7657d557ec958b
287,281
def dBm_to_watt(p_dBm): """ Convert power in units of dBm into Watt """ return 1e-3 * 10 ** (p_dBm / 10)
cacfaae7f1d41a102f8c5beab3db452d31c7c1c9
568,260
def make_modifier_resized(target_size): """Make a string designating a resize transformation. Note that the final image size may differ slightly from this size as it only reflects the size targeted. Args: target_size: Target size of rescaling in x,y,z. Returns: String designating the resize transformation. """ return "resized({},{},{})".format(*target_size)
0c1b5a12f6d7888a44fe4e720f3d333b8d0906ee
97,672
def update_halley(yvals, y0): """Calculate the variable increment using Halley's method. Calculate the amount to increment the variable by in one iteration of Halley's method. The goal is to find the value of x for which y(x) = y0. `yvals` contains the values [y(x0), y'(x0), y''(x0), ...] of the function and its derivatives at the current estimate of x. The Halley update increment is dx0 = (y0 - y)/y' dx = dx0 / (1 + dx0*y''/(2*y')). Here, dx0 is the increment used by Newton's method. Halley's method iterates by adding x += dx at each step. Arguments: yvals (iterable of float or array): The value of y(x) and its derivatives at the current estimate of x. Must contain at least 3 entries, for y, y' and y''. y0 (float or array): The target value of y. Returns: dx (float or array): The update increment for x. """ dx0 = (y0 - yvals[0])/yvals[1] dx = dx0 / (1 + dx0*yvals[2]/(2*yvals[1])) return dx
c2021d0c21bd8a345da6808756c207a27afefeff
664,986
from typing import OrderedDict def times(*combined): """Generate a product of N sets of combinations. times(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4]) Args: *combined: N lists of dictionaries that specify combinations. Returns: a list of dictionaries for each combination. Raises: ValueError: if some of the inputs have overlapping keys. """ assert combined if len(combined) == 1: return combined[0] first = combined[0] rest_combined = times(*combined[1:]) combined_results = [] for a in first: for b in rest_combined: if set(a.keys()).intersection(set(b.keys())): raise ValueError("Keys need to not overlap: {} vs {}".format( a.keys(), b.keys())) combined_results.append(OrderedDict(list(a.items()) + list(b.items()))) return combined_results
6a8475de9b35f4a931969e304880ebe657a3e6ee
65,644
def load_synapses_tsv_data(tsv_path): """Load synapse data from tsv. Args: tsv_path (str): path to the tsv synapses data file Returns: list of dicts containing each data for one synapse """ synapses = [] with open(tsv_path, "r", encoding="utf-8") as f: # first line is dimensions for line in f.readlines()[1:]: syn = {} items = line.strip().split("\t") syn["sid"] = int(items[0]) syn["pre_cell_id"] = int(items[1]) syn["sectionlist_id"] = int(items[2]) syn["sectionlist_index"] = int(items[3]) syn["seg_x"] = float(items[4]) syn["synapse_type"] = int(items[5]) syn["dep"] = float(items[6]) syn["fac"] = float(items[7]) syn["use"] = float(items[8]) syn["tau_d"] = float(items[9]) syn["delay"] = float(items[10]) syn["weight"] = float(items[11]) syn["Nrrp"] = float(items[12]) syn["pre_mtype"] = int(items[13]) synapses.append(syn) return synapses
097c6f591b6e68e3bd99f53a41a22b89d565e612
677,494
def convert_mib_to_bytes(size_in_mib: float) -> int: """Converts a size expressed in MiB to Bytes. Parameters ---------- size_in_mib: float A file size in MiB. Returns ------- int The size in Bytes equivalent to the passed size in MiB. """ return round(size_in_mib * (1024**2))
b4c4670ede6053775252ce1b7ec38a9485c7cd3e
328,495
import re def count_occurrences(path): """ Produeix un recompte de quantes vegades apareix el patró Huffington Post i quantes url condueixen a un document pdf. Keyword arguments: path -- ruta de l'arxiu que conté els patrons a analitzar. """ # declarem els comptadors huff_count = 0 pdf_count = 0 # llegim l'arxiu linia a linia i augmentem els comptadors si trobem el patró. with open(path, "r") as infile: for line in infile: if re.search(r"Huffington post\b", line, flags=re.IGNORECASE): huff_count += 1 if re.search(r"https?:.*\.pdf/*$", line, flags=re.MULTILINE): pdf_count += 1 print('El patró Huffington_post apareix %s vegades' % huff_count) print('El patró url_pdf apareix %s vegades' % pdf_count) return huff_count, pdf_count
a4c12364ac61fa9c721b98133c177eaecc36c674
58,833
import math def AMS(s, b, br=10.0): """ Approximate Median Significance defined as: AMS = sqrt( 2 { (s + b + b_r) log[1 + (s/(b+b_r))] - s} ) where b_r = 10, b = background, s = signal, log is natural logarithm """ radicand = 2 *( (s+b+br) * math.log (1.0 + s/(b+br)) -s) if radicand < 0: print('radicand is negative. Exiting') return -1 else: return math.sqrt(radicand)
a8ff6697d0e3ffc83ce1b74b289c291c96e47dd5
631,998
def get_team_names(driver): """ gets the team names of all teams on current driver page """ name_elements = driver.find_elements_by_class_name("name") team_names = [name.text for name in name_elements] return team_names
875f2693c762298d5d6520e35f6e9c0a6abcb02e
397,008
import re def replace_quotes(string): """ >>> test_string = '„Kyogre HBF Freiburg“' >>> replace_quotes(test_string) '"Kyogre HBF Freiburg"' >>> test_string = '“Kyogre HBF Freiburg“' >>> replace_quotes(test_string) '"Kyogre HBF Freiburg"' >>> test_string = '”Kyogre HBF Freiburg”' >>> replace_quotes(test_string) '"Kyogre HBF Freiburg"' """ # replace („|“|\'|„|“|”|‘|’) with " pattern = re.compile(r'(„|“|\'|„|“|”|‘|’)') string = pattern.sub('"', string) return string
b50e1f6b6e4be6747703269d23a1888cebe9abfc
209,282
import re def remove_citation(paragraph: str) -> str: """Remove all citations (numbers in side square brackets) in paragraph""" return re.sub(r'\[\d+\]', '', paragraph)
dc88606e69187143d767215ddc098affdbd185d5
7,445
def fast_multiply(matrix, value): """ Fast multiply list on value :param matrix: List of values :param value: Multiplier :return: Multiplied list """ for ix in range(0, matrix.shape[0]): matrix[ix] *= value return matrix
8a9eb29b1c6c0cc56b35f43f128e2595b45e1ff6
14,598
import typing def dirOffset(d: typing.List[int]) -> int: """ Returns the actual file-relative offset of the content pointed at by the tuple of uint16_ts `d` that represents the DirEntry in question """ offset = d[0] + ((d[1] & 0xFF) << 16) + (d[4] << 24) return (offset - 1) * 0x200
cebe7d577d2e22bc35dbc58190213a85aa023a39
483,537
def myrange_using_list(target): """ Generate items in range of [0,target-1] and build a list """ num, num_list = 0, [] while num < target: num_list.append(num) num += 1 return num_list
73fc90d1040e38deb5aa1ab36a1434570fddba78
406,558
def GetSources(arc, target): """All source nodes for a specified arc pointing to a specified node.""" sources = {} for triple in target.arcsIn: if (triple.arc == arc): sources[triple.source] = 1 return sources.keys()
2e0363a466b8161e6ddaec6507fb326083506dc1
140,424
def population_size(n: int, k: int) -> int: """ Returns the population at time 'n' given 'k' generation size. Fn = Fn−1 + (Fn−2 * k) """ if n <= 2: # F1 = F2 = 1 return 1 return population_size(n - 1, k) + (population_size(n - 2, k) * k)
6961d92042dc62a4408ec0e0616838530ecf85d4
531,597
def df_mask_by_str_or_list(df, column, keys): """Find str match and make mask of dataframe. If multiple keys are fed, mask will be AND-calculated among keys. """ mask = None if type(keys) == str: keys = [keys] for key in keys: this_mask = df[column].str.find(key) >= 0 mask = this_mask if mask is None else (mask & this_mask) return mask
6393f502aac4423b6ccb7b430f59b5cde220d5b4
271,946
from typing import Sequence from typing import Tuple from typing import List def pad_string_sequences(seq: Sequence[Sequence[str]]) -> Tuple[List[List[str]], Sequence[int]]: """ Like keras.preprocessing.sequence.pad_string_sequences but for strings, and it also returns seq_length. """ seq_length = [len(item) for item in seq] maxlen = max(seq_length) result = [] for i, item in enumerate(seq): result.append(list(item) + [''] * (maxlen - seq_length[i])) return result, seq_length
db8483ca16b1cecbd4b07168d1dd218e516b5950
97,605
def extract_title_md(md_file): """ Return the first level 1 md title (without # and trailing newline) """ with open(md_file, "r") as f: #text = f.read() line='' while ('# ' not in line): line = f.readline() return line.lstrip('# ').rstrip('\n')
1d9ff01812c4936548672c1e1eeafdf129e2c22c
423,223
def find_field(schema, field_name): """Find field in schema by field name. :param schema: Schema instance (e.g. input_schema) :type schema: dict :param field_name: Field name :type field_name: string """ for field in schema: if field['name'] == field_name: return field
7d0186682e353911479274c54f83cd56a033b60d
439,293
def numUniqueChars( word ): """ Determine how many letters of the alphabet are present in a given word. @param word An English word. @returns The number of unique letters in that word. """ seen = ['\n'] count = 0 for c in word: if c not in seen: count += 1 seen.append( c ) return count
f3e416a0e2d3ebe3fc8cca442f7b96ce80993bad
520,092
def refold_vdop(vdop_art, v_nyq_vel, rth_position): """ Refold Doppler velocity from PHIDP folding position. Parameters: =========== vdop_art: array Doppler velocity v_nyq_vel: float Nyquist velocity. rth_position: list Folding position of PHIDP along axis 2 (length of rth_position is length of axis 1). Returns: ======== new_vdop: array Properly folded doppler velocity. """ new_vdop = vdop_art for j in range(len(rth_position)): i = rth_position[j] if i == 0: continue else: new_vdop[j, i:] += v_nyq_vel pos = (vdop_art > v_nyq_vel) new_vdop[pos] = new_vdop[pos] - 2*v_nyq_vel return new_vdop
51ed7372f69ac0bb34eddf47ba4a3e18e9741440
460,829
def extract_confidence(result): # pragma: no cover """Extracts the confidence from a parsing result.""" return result.get('intent', {}).get('confidence')
a437114d67338f58fe23619ffe9c4bcea73ad40f
453,813
import torch def charbonnier_loss(pred: torch.Tensor, target: torch.Tensor, q: float = 0.2, eps: float = 0.01) -> torch.Tensor: """Generalized Charbonnier loss function between output and ground truth. The loss function is .. math:: loss = ((u-u_gt)^2+(v-v_gt)^2+eps)^q Generalized Charbonnier loss was used in LiteFlowNet when fine tuning, with eps=0.01 q=0.2. Args: pred (torch.Tensor): output flow map from flow_estimator shape(B, 2, H, W). target (torch.Tensor): ground truth flow map shape(B, 2, H, W). q (float): the exponent in charbonnier loss. eps (float): small constant to numerical stability when fine-tuning model. Defaults to 0.01. Returns: Tensor: loss map with the shape (B, H, W). """ assert pred.shape == target.shape, \ (f'pred shape {pred.shape} does not match target ' f'shape {target.shape}.') diff = torch.add(pred, -target) loss_map = (torch.sum(diff * diff, dim=1) + eps)**q # shape (B, H, W). return loss_map
481de25b3d379f0c5f6c6f79f353166bc1bc1194
34,500
def is_unlimited(rate_limit): """ Check whether a rate limit is None or unlimited (indicated by '-1'). :param rate_limit: the rate limit to check :return: bool """ return rate_limit is None or rate_limit == -1
8e19a8aff46713c6e1886b40bf005217166eda01
664,822
import requests def get_doc_list(domain, app_name, collection='editions', verbose=True): """ retrieves a list of doc-uris stored in a dsebaseapp :param domain: Domain hosting the dsebaseapp instance, e.g. "http://127.0.0.1:8080" :param app_name: The name of the dsebaseapp instance. :param collection: The name of the collection to process :verbose: Defaults to True and logs some basic information :return: A list of absolut URLs """ endpoint = "{}/exist/restxq/{}/api/collections/{}".format(domain, app_name, collection) r = requests.get(endpoint) if r.status_code == 200: if verbose: print('connection to: {} status: all good'.format(endpoint)) else: print( "There is a problem with connection to {}, status code: {}".format( endpoint, r.status_code ) ) return None hits = r.json()['result']['meta']['hits'] all_files = requests.get("{}?page[size]={}".format(endpoint, hits)).json()['data'] files = ["{}{}".format(domain, x['links']['self']) for x in all_files] if verbose: print("{} documents found".format(len(files))) return files
3b30eeff97d2e8f62eddd509661b5622732434e1
440,556
def WyllieModel(Phi, Vpmat, Vpfl): """ WYLLIE MODEL Wyllie's equation. Written by Dario Grana (August 2020) Parameters ---------- Phi : float or array_like Porosity (unitless). Vpmat : float or array_like P-wave velocity of the solid phase (km/s). Vpfl : float or array_like P-wave velocity of the fluid phase (km/s). Returns ------- Vp : float or array_like P-wave velocity of saturated rock (km/s). References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 2.1 """ # Wyllie Vp = 1 / ((1 - Phi) / Vpmat + Phi / Vpfl) return Vp
f6b04b10d3bc2b4c0c7cb34cac65ecc1d8d4dab4
243,513
def _merge_candidate_name(src, dest): """Returns the formatted name of a merge candidate branch.""" return f"xxx-merge-candidate--{src}--{dest}"
a08b6d4b57385bc390e649448ce264cffd5a1ffa
7,950
def levenshtein_dynamic(s1: str, s2: str) -> int: """Return the minimum edit distance between strings s1 and s2. This function implements the Levenshtein distance algorithm using Dynamic Programming. Note: This function is not required by the levenshtein automaton, but I felt it that it could be useful to illustrate the basic idea of the Levenshtein algorithm. This function is the same function as the levenshtein function in helpers.py. """ dp = list(range(0, len(s2) + 1)) # dp stands for dynamic programming # technically, I can reduce len(dp) to min(len(s1), len(s2)), but its not necessary. for i in range(len(s1)): for d in range(len(dp) - 1, 0, -1): j = d - 1 dp[d] = min(dp[d] + 1, dp[d - 1] + (s1[i] != s2[j])) dp[0] = i + 1 for d in range(1, len(dp)): dp[d] = min(dp[d], dp[d - 1] + 1) # print(dp) return dp[-1]
12662765867b853f0851ea1d63a3e4fa3d592844
206,644
import io from typing import Any import yaml def object_from_yaml_stream(stream: io.BytesIO) -> Any: """Create an object from a stream of YAML bytes.""" return yaml.safe_load(stream)
d58e2350c2b23a625b5c8ed0faa1c15a03cf499d
491,519
def strip(s): """ Leerzeichen am Anfang und Ende löschen :param s: Eingangswert :type s: alle :return: bearbeiteter Wert oder None :rtype: str/None """ if s is None: return None return str(s).strip()
309d7f05e28c5d094f0eae9f999f954ca510d849
501,343
def _first_or_none(array): """ Pick first item from `array`, or return `None`, if there is none. """ if not array: return None return array[0]
e8430cf316e12b530471f50f26d4f34376d31ce2
28,363
def check_exists(key, dict_to_check): """Returns the value of the key in dict_to_check if found, else returns False. If dict_to_check is None, returns False :param key: the key to look for in dict_to_check :type key: str :param dict_to_check: the key to look for in dict_to_check :type dict_to_check: dict :return: value of key in dict_to_check else False """ if dict_to_check is None: return False assert isinstance(dict_to_check, dict) return dict_to_check.get(key, False)
643969b9294936ef5e8c882b2d9e07f00eefac02
243,338
def convert_history(history): """ Converts datatypes in a model history dictionary to builtin `float`. :param history: model history dictionary :return: history with values converted to float """ return {k: list(map(float, v)) for k, v in history.items()}
8d72ee10b8777a98b1c256ec036c87f9983ed1b9
484,529
def is_sigma_algebra__brute(F, X=None): """Returns True if F is a sigma algebra on X. Parameters ---------- F : set of frozensets The candidate sigma algebra. X : frozenset, None The universal set. If None, then X is taken to be the union of the sets in F. Returns ------- issa : bool True if F is a sigma algebra and False if not. Notes ----- This is a brute force check against the definition of a sigma algebra on a finite set. Its time complexity is O( len(F)**2 ). """ # This works because its not necessary to test countable unions if the # base set X is finite. One need only consider pairwise unions. if X is None: X = frozenset().union(*F) else: X = frozenset(X) for subset1 in F: if X.difference(subset1) not in F: return False for subset2 in F: if subset1.union(subset2) not in F: return False else: return True
138a72f3c6cd4c8b157f716337e36525a9d587b1
169,468
def levenshtein(s1, s2, allow_substring=False): """Return the Levenshtein distance between two strings. The Levenshtein distance (a.k.a "edit difference") is the number of characters that need to be substituted, inserted or deleted to transform s1 into s2. Setting the `allow_substring` parameter to True allows s1 to be a substring of s2, so that, for example, "hello" and "hello there" would have a distance of zero. :param string s1: The first string :param string s2: The second string :param bool allow_substring: Whether to allow s1 to be a substring of s2 :returns: Levenshtein distance. :rtype int """ len1, len2 = len(s1), len(s2) lev = [] for i in range(len1 + 1): lev.append([0] * (len2 + 1)) for i in range(len1 + 1): lev[i][0] = i for j in range(len2 + 1): lev[0][j] = 0 if allow_substring else j for i in range(len1): for j in range(len2): lev[i + 1][j + 1] = min( lev[i][j + 1] + 1, lev[i + 1][j] + 1, lev[i][j] + (s1[i] != s2[j]) ) return min(lev[len1]) if allow_substring else lev[len1][len2]
a92058eda3afd152ecb5cb4af04cc29b7ec19e21
473,958
def crop(im, r, c, target_r, target_c): """ Crops an image :param im: input image :param r: start row coord :param c: start column coord :param target_r: end row coord :param target_c: end column coord :return: cropped image """ return im[r:r + target_r, c:c + target_c]
656876987a56a49bda1247e3086dae63eaf3eddb
322,609