content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def res_2_tuple(resolution): """ Converts a resolution map to a tuple """ return resolution['h'], resolution['w']
b984ad152305e68e8c62114195eaf00daae19a28
176,753
def get_client_ip(request): """Extract client ip from request headers Retrieves rows pertaining to the given keys from the Table instance represented by big_table. Silly things may happen if other_silly_variable is not None. Args: request: django HttpRequest object. Returns: IP Address in string format from header HTTP_X_FORWARDED_FOR if exist or from header REMOTE_ADDR """ x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: ip = x_forwarded_for.split(',')[0] else: ip = request.META.get('REMOTE_ADDR') return ip
09c0a65f71f070ca3f927924e9d23bd6d3ddaa62
581,140
def flatten_config(config): """Take dict with ':'-separated keys and values or tuples of values, flattening to single key-value pairs. Example: _flatten_config({'a:b': (1, 2), 'c': 3}) -> {'a: 1, 'b': 2, 'c': 3}.""" new_config = {} for ks, vs in config.items(): ks = ks.split(":") if len(ks) == 1: vs = (vs,) for k, v in zip(ks, vs): assert k not in new_config, f"duplicate key '{k}'" new_config[k] = v return new_config
50b4db33826c5a1c5e2b44baafeaad09299f5907
158,914
def list_contacts(client): """ List domain contacts. """ return client.domain.contacts.all()
f8dd19f389567e51299329e1a495a38f08ddbac4
274,745
import gzip import re def search_page(path, pattern): """ Search an OpenAFS man page for a given token. """ with gzip.open(path) as z: content = z.read().decode() content = re.sub(r'\\&', '', content) content = re.sub(r'\\f.', '', content) content = re.sub(r'\s+', ' ', content) m = re.search(pattern, content) if not m: raise Exception('Failed to find directory in %s.' % path) return m.group(1)
491effb6ae0a1af0d881edde2026434bd492d191
526,482
import torch def center_of_mass(x, pytorch_grid=True): """ Center of mass layer Arguments --------- x : network output pytorch_grid : use PyTorch convention for grid (-1,1) Return ------ C : center of masses for each chs """ n_batch, chs, dim1, dim2, dim3 = x.shape eps = 1e-8 if pytorch_grid: arange1 = torch.linspace(-1,1,dim1).float().view(1,1,-1).repeat(n_batch, chs, 1) arange2 = torch.linspace(-1,1,dim2).float().view(1,1,-1).repeat(n_batch, chs, 1) arange3 = torch.linspace(-1,1,dim3).float().view(1,1,-1).repeat(n_batch, chs, 1) else: arange1 = torch.arange(dim1).float().view(1,1,-1).repeat(n_batch, chs, 1) arange2 = torch.arange(dim2).float().view(1,1,-1).repeat(n_batch, chs, 1) arange3 = torch.arange(dim3).float().view(1,1,-1).repeat(n_batch, chs, 1) if x.is_cuda: arange1, arange2, arange3 = arange1.cuda(), arange2.cuda(), arange3.cuda() m1 = x.sum((3,4)) #mass along the dimN, shape [n_batch, chs, dimN] M1 = m1.sum(-1, True) + eps #total mass along dimN m2 = x.sum((2,4)) M2 = m2.sum(-1, True) + eps m3 = x.sum((2,3)) M3 = m3.sum(-1, True) + eps c1 = (arange1*m1).sum(-1,True)/M1 #center of mass along dimN, shape [n_batch, chs, 1] c2 = (arange2*m2).sum(-1,True)/M2 c3 = (arange3*m3).sum(-1,True)/M3 C = torch.cat([c3,c2,c1],-1) #center of mass, shape [n_batch, chs, 3] return C.transpose(-2,-1)
bf45efc0c38a72e0fa93a07327eab6196c8fb4b3
318,456
def first(it): """Get the first element of an iterable.""" return next(iter(it))
535f9028d96e0e78bc310b4a8e75e558c9217172
41,671
from pathlib import Path def get_timestamp(log_path: Path) -> str: """ Opens the provided log file and extracts the timestamp from the first line. Args: log_path: The Path object for the log file. Returns: The string representation of the timestamp in the format of YYYY-MM-DD-HH-MM-SS. """ with log_path.open() as f: line = f.readline() timestamp = ( line.split(',')[0] .replace(' ', '-') .replace(':', '-') ) return timestamp
2afdbcd541c8bd337c7722fa4cfdd6a251ee1dcf
234,306
def get_project_name(task_id, tasks_df, projects_df): """Return the project name from the task ID.""" task = tasks_df.loc[int(task_id)] project = projects_df.loc[int(task.project_id)] return project['name']
747d6dd54b6a6cc03e3016f865f95e730c411431
414,178
def get_truncate_datetime(truncate_datetime): """ Validates truncate_datetime value """ if truncate_datetime not in {None, 'second', 'minute', 'hour', 'day'}: raise ValueError("truncate_datetime must be second, minute, hour or day") return truncate_datetime
31b753557ff4c9d58d789505a97384ca8f57e667
284,276
def _sf_quote(ast, env): """Evaluate the special form, `quote`. """ return ast.second()
38a749f43505f12869599fb49b0bafcbbfb42184
423,531
import yaml import json def load_config_file(config_file: str, child_name="dockerConfiguration") -> dict: """ Load OSDF configuration from a file -- currently only yaml/json are supported :param config_file: path to config file (.yaml or .json). :param child_name: if present, return only that child node :return: config (all or specific child node) """ with open(config_file, 'r') as fid: res = {} if config_file.endswith(".yaml"): res = yaml.safe_load(fid) elif config_file.endswith(".json") or config_file.endswith("json"): res = json.load(fid) return res.get(child_name, res) if child_name else res
2814bf1e67b79a4f48fd67564e00b0f822aedd64
670,608
import math def rd(v, n=100.0): """Round down, to the nearest even 100""" n = float(n) return math.floor(v/n) * int(n)
fe0be5c2e252ed1601a05126fbfe9812436384a0
136,304
def compute_sigmag(Sigma_sfr, k, ks_fit=1.4): """ Gas surface density from the SFR surface density assuming modified Kennicutt-Schmidt relation. Sigma_sfr in Msun/yr/kpc^2 k is the burstiness parameter """ out=(((k*(10.0**-12))**-1)*Sigma_sfr)**(1./ks_fit) return out
fa94d762738e651145c7c60cef630efe1bc25c48
221,795
def qr_to_cube(p): """Convert axial coordinates to cube in q-type hexagonal grid.""" q, r = p x, y, z = q, -q-r, r return x, y, z
462a4ad3536c9fe57964921a9b42d39802fd4fdd
503,312
def getCode(sn): """ Gets the code, which is the last 3-4 characters of all 2008+ serial numbers, depending on length """ return sn[8:]
ce5e18b58c8172852faac149351141df16520b04
121,676
import json def make_json_response(status_code, json_object, extra_headers=None, add_status=False): """ Helper function to serialize a JSON object and add the JSON content type header. """ headers = { "Content-Type": 'application/json' } if extra_headers is not None: headers.update(extra_headers) if add_status: json_object['status'] = status_code return status_code, json.dumps(json_object), headers
d735682fed9a6c16e30c47cdfaac45b81cd4defc
331,963
def create_field_ratemap(ratemap, field_idx): """Returns a copy of input `ratemap`, where fields not True in `field_idx` are set to `0`. :param numpy.ndarray ratemap: shape (n_ybins, n_xbins) :param numpy.ndarray field_idx: boolean array same shape as ratemap, specifying which elements belong to the field. :return: field_ratemap :rtype: numpy.ndarray """ field_ratemap = ratemap.copy() field_ratemap[~field_idx] = 0 return field_ratemap
99dd90a2f0f55bbd459bcc2cdc6b7e6f38c18767
166,679
import click def percentage_option(function): """Define the common percentage option""" function = click.option( "-p", "--percentage", type=click.FLOAT, required=True, help="Percentage you want to invest in.", )(function) return function
dc65a7cb4ed11590cd01a0bab084f6a80fcb49c9
507,194
def get_devices_dict(version, image=None, arch=None, feature=None): """Based on version and image, returns a dictionary containing the folder location and the patterns of the installation files for each device type :param version: build version, e.g. 6.2.3-623 :param image: optional, 'Autotest' or 'Restore', required for M3, M4, S3 (FMC and Sensor) :param arch: optional, device architecture, required for S3 (FMC and Sensor) - e.g x86_64 :param feature: optional, whether the build is on a feature branch (e.g. MARIADB) :return: a dictionary """ if feature is None: feature = '' else: feature = ".{}".format(feature) devices = { 'kenton': {'patterns': ['ftd-[\d\.-]+{}.pkg'.format(feature), 'ftd-boot-[\d.]+lfbff'], 'subdir': ['installers', 'installers/doNotRelease'], }, 'saleen': {'patterns': ['ftd-[\d\.-]+{}.pkg'.format(feature), 'ftd-boot-[\d.]+cdisk'], 'subdir': ['installers', 'installers/doNotRelease'], }, 'elektra': {'patterns': ['asasfr-sys-[\d.-]+.pkg', 'asasfr-5500x-boot-[\d.-]+img'], 'subdir': ['installers', 'installers/doNotRelease'], }, 'm3': {'patterns': ['Sourcefire_Defense_Center_S3-{}{}-{}.iso'.format(version, feature, image), 'Sourcefire_Defense_Center-{}{}-{}.iso'.format(version, feature, image), 'Cisco_Firepower_Mgmt_Center-{}{}-{}.iso'.format(version, feature, image)], 'subdir': ['iso', 'iso/doNotRelease'], }, 'm4': {'patterns': ['Sourcefire_Defense_Center_M4-{}{}-{}.iso'.format(version, feature, image), 'Cisco_Firepower_Mgmt_Center-{}{}-{}.iso'.format(version, feature, image), 'Sourcefire_Defense_Center-{}{}-{}.iso'.format(version, feature, image)], 'subdir': ['iso', 'iso/doNotRelease'], }, 'm5': {'patterns': ['Sourcefire_Defense_Center-{}{}-{}.iso'.format(version, feature, image), 'Cisco_Firepower_Mgmt_Center-{}{}-{}.iso'.format(version, feature, image)], 'subdir': ['iso', 'iso/doNotRelease'], }, 's3fmc': {'patterns': ['Sourcefire_Defense_Center_S3-{}{}-{}.iso'.format(version, feature, image), 'Cisco_Firepower_Mgmt_Center-{}{}-{}.iso'.format(version, feature, image)], 'subdir': ['iso', 'iso/doNotRelease'], 'boot_images': {'os/{}/boot'.format(arch): 'bzImage.*', 'os/{}/ramdisks'.format(arch): 'usb-ramdisk*'} }, 's3': {'patterns': ['Sourcefire_3D_Device_S3-{}{}-{}.iso'.format(version, feature, image), 'Cisco_Firepower_NGIPS_Appliance-{}{}-{}.iso'.format(version, feature, image)], 'subdir': ['iso', 'iso/doNotRelease'], 'boot_images': {'os/{}/boot'.format(arch): 'bzImage.*', 'os/{}/ramdisks'.format(arch): 'usb-ramdisk*'} }, 'kp': {'patterns': ['cisco-ftd-fp2k[\d.-]+[a-zA-Z]{3}', 'fxos-k8-fp2k-lfbff[\w.-]+[a-zA-Z]{3}', 'fxos-k8-lfbff[\w.-]+[a-zA-Z]{3}'], 'subdir': ['installers', 'installers/doNotRelease'], }, 'ssp': {'patterns': ['cisco-ftd[\d.-]+[a-zA-Z]{3}.csp'], 'subdir': ['installers', 'installers/doNotRelease'], } } return devices
104fb35f8abe4cbe2e6cb1297a3cc778d229d0ed
679,094
def avg_word_length_extractor(word_tokens): """avg_word_length Counts the average number of characters for words in the text. The length the concatenation of all words over "total words" is counted. Known differences with Writeprints Static feature "average word length": None. Args: word_tokens: List of lists of token.text in spaCy doc instances. Returns: Average length of words in the document. """ avg_word_length = [ [sum([len(word) for word in word_token]) / len(word_token)] for word_token in word_tokens ] label = ["avg_word_length"] return avg_word_length, label
22f53732051c1d185da81cc993d04eebe8cfc710
160,829
def flatten(a): """ Recursively flatten tuple, list and set in a list. """ if isinstance(a, (tuple, list, set)): l = [] for item in a: l.extend(flatten(item)) return l else: return [a]
0d1bf6b28a879c23ce0256335d62a239d7b617b9
178,872
def get_tree_tweet_edges(tree): """ Input: - tree: recursive tree structure {tweet: "tweet_id", replies: [ .... ]} Output: - list of parent-child tweet_ids """ parent_tweet_id = tree["tweet"] edges_to_children = [] childrens_edges = [] for reply in tree["replies"]: reply_tweet_id = reply["tweet"] # add an edge from the parent to the reply edges_to_children.append((parent_tweet_id, reply_tweet_id)) # recursively get the edges of child childrens_edges += get_tree_tweet_edges(reply) return edges_to_children + childrens_edges
87957f903d90ab22d1c4ac4b80d95df804eef93d
357,845
import torch def compute_gcam_map(gradients, activations) -> torch.Tensor: """Take the mean of `gradients`, multiply by `activations`, sum it up and return a GradCAM feature map """ # Mean over the feature maps. If you don't use `keepdim`, it returns # a value of shape (1280) which isn't amenable to `*` with the activations gcam_weights = gradients.mean(dim=[1,2], keepdim=True) # (1280,7,7) --> (1280,1,1) gcam_map = (gcam_weights * activations) # (1280,1,1) * (1280,7,7) --> (1280,7,7) gcam_map = gcam_map.sum(0) # (1280,7,7) --> (7,7) return gcam_map
d73d6a947db44d443dbdbdaa2f12b6723cd9c3b3
551,549
def get_no_cyclic(mesh, elem_no_nan): """Compute non cyclic elements of the mesh.""" d = mesh.x2[elem_no_nan].max(axis=1) - mesh.x2[elem_no_nan].min(axis=1) no_cyclic_elem = [i for (i, val) in enumerate(d) if val < 100] return no_cyclic_elem
561d12df715230d6995f47071608b69c4b93e459
251,848
def replace_spaces(text: str) -> str: """Replaces spaces with '+' in given text. :param text: The text to be formatted. :returns: Text with spaces replaced with '+'. """ return text.replace(" ", "+")
f5024461aa2ead7fb05f50274c11b089467f9ec3
509,383
def RelationshipLengthProperty(relationship_name): """Return a property representing number of objects in relationship.""" def getter(self): relationship = getattr(self._object, relationship_name) try: return relationship.countObjects() except Exception: return len(relationship()) return property(getter)
8550a6374d5d7bc405352bbee11bb3b40684711c
638,044
def sparseVectorDotProduct(v1, v2): """ Given two sparse vectors |v1| and |v2|, each represented as collections.defaultdict(float), return their dot product. You might find it useful to use sum() and a list comprehension. This function will be useful later for linear classifiers. """ # BEGIN_YOUR_CODE (our solution is 4 lines of code, but don't worry if you deviate from this) result = 0 for k1, val1 in v1.items(): for k2, val2 in v2.items(): if k1 == k2: result += val1*val2 return result # END_YOUR_CODE
3fa8a93fd6960a23c52672ccc0e1785e8442182c
228,417
from typing import Tuple from typing import Any def remove_duplicates(tuple_: Tuple[Any, ...]) -> Tuple[Any, ...]: """Remove duplicate in tuple `tuple_`. Example: tuple_ = (3, 1, 2, 2, 1, 4) The returned tuple is: (3, 1, 2, 4) """ return tuple(sorted(set(tuple_), key=tuple_.index))
cfcc57c2f936c63cf92c1c95d31fbfa11cc39d9a
350,713
def add_one(number: int) -> int: """Add one to 'number'. Args: number (int): integer to add one to. Returns: integer Example: >>> assert add_one(0) == 1 >>> assert add_one(10) == 11 """ return number + 1
6b92fa75af2700530b0f41fbe7245d37cd937536
387,178
from typing import Dict from typing import Any from typing import List def annotation_to_entities(annotation: Dict[str, Any]) -> List[Dict[str, Any]]: """Flatten the annotation dict to a list of 'entities'.""" entities = [] for paragraph in annotation['paragraphs']: paragraph_id = len(entities) paragraph['type'] = 3 # 3 for paragraph paragraph['parent_id'] = -1 entities.append(paragraph) for line in paragraph['lines']: line_id = len(entities) line['type'] = 2 # 2 for line line['parent_id'] = paragraph_id entities.append(line) for word in line['words']: word['type'] = 1 # 1 for word word['parent_id'] = line_id entities.append(word) return entities
0f95827abc721d035979cfa50a8734fd096d31c6
589,535
def clamp(value: float, min_value: float, max_value: float) -> float: """Ensures the **value** is contained within bounds (a minimum and a maximum). :param value: The value to *clamp* :param min_value: The lower bound :param max_value: The upper bound :type value: float :type min_value: float :type max_value: float :return: The clamped value :rtype: float """ return max(min_value, min(value, max_value))
a980a30f9efa5412c4c20372494cc33257468e8b
418,975
def glyphRecordsToGlyphNames(glyphRecords): """ >>> glyphList = ["a", "b"] >>> glyphRecords = glyphNamesToGlyphRecords(glyphList) >>> glyphRecordsToGlyphNames(glyphRecords) ['a', 'b'] """ return [record.glyphName for record in glyphRecords]
1b4f73ca5d285e8e5f132ccdf7b87f409ea04651
367,192
def B(value): """Returns 1 if value is truthy, 0 otherwise.""" return 1 if value else 0
ce788b5fa6aff71379dd3ca326e2e4e2e5da950f
657,979
def _get_team_membership_csv_headers(course): """ Get headers for team membership csv. ['user', 'mode', <teamset_id_1>, ..., ,<teamset_id_n>] """ headers = ['user', 'mode'] for teamset in sorted(course.teams_configuration.teamsets, key=lambda ts: ts.teamset_id): headers.append(teamset.teamset_id) return headers
6565202b295cd530933a53209c02ba5da59074cd
119,700
def ismember(ui, username, userlist): """Check if username is a member of userlist. If userlist has a single '*' member, all users are considered members. Can be overridden by extensions to provide more complex authorization schemes. """ return userlist == [b'*'] or username in userlist
5c968c778e7991e631cf66146a692c1b41b5dd2a
334,123
def get_gcp_zones(compute, project): """ Get all zones in GCP (needs compute engine) """ zones = [] details = compute.zones().list(project=str(project)).execute() if details.has_key('items'): for item in details['items']: zones.append(str(item['name'])) return zones
042a67f6b8a51ca4022f435adb205b5e919b0351
29,323
def kelvin_to_celsius(temp): """ From Kelvin (K) to Celsius (ºC) """ return temp - 273.15
dc63cad132e3e2ae09500a34cb164b97d41341c8
527,474
def lmParamToPoint(a, c): """ Return the coordinates of a landmark from its line parameters. Wall landmarks are characterized by the point corresponding to the intersection of the wall line and its perpendicular passing through the origin (0, 0). The wall line is characterized by a vector (a, c) such as its equation is given by y = ax + c. """ xp = float(-c*a / (1+a**2)) yp = float(c / (1+a**2)) return [xp, yp]
6b98613216f1287ed9b25f1345ea0a18aa0fc90b
27,847
def get_partial_dict(prefix, dictionary, container_type=dict, ignore_missing=False, pop_keys=False): """Given a dictionary and a prefix, return a Bunch, with just items that start with prefix The returned dictionary will have 'prefix.' stripped so:: get_partial_dict('prefix', {'prefix.xyz':1, 'prefix.zyx':2, 'xy':3}) would return:: {'xyz':1,'zyx':2} """ match = prefix + "." n = len(match) new_dict = container_type(((key[n:], dictionary[key]) for key in dictionary if key.startswith(match))) if pop_keys: for key in list(dictionary.keys()): if key.startswith(match): dictionary.pop(key, None) if new_dict: return new_dict else: if ignore_missing: return {} raise AttributeError(prefix)
6cdf5ad9254c3e879942f8d91506dbb93390ef22
105,581
def _complex_matrix_multiplication(input_tensor, other_tensor, mult_func): """ Perform a matrix multiplication, helper function for complex_bmm and complex_mm. Parameters ---------- input_tensor : torch.Tensor other_tensor : torch.Tensor mult_func : Callable Multiplication function e.g. torch.bmm or torch.mm Returns ------- torch.Tensor """ if not input_tensor.is_complex() or not other_tensor.is_complex(): raise ValueError("Both input_tensor and other_tensor have to be complex-valued torch tensors.") output = ( mult_func(input_tensor.real, other_tensor.real) - mult_func(input_tensor.imag, other_tensor.imag) + 1j * mult_func(input_tensor.real, other_tensor.imag) + 1j * mult_func(input_tensor.imag, other_tensor.real) ) return output
1d748d6ca54db00fd0385e11f82b1c435a8609f6
396,003
def cast_ulonglong(value): """ Cast value to 64bit integer """ value = value & 0xffffffffffffffff return value
50cd3a11763cedafdd4c1004eab24e3d4f0bfb3c
492,230
import itertools def calc_permutations(num_dice): # ----------- The result of this function is used in all other functions ----------- # """ calculates all the different permutations for a certain number of dice """ hex_options = [1,2,3,4,5,6] #One Liner #permutations = [r for r in itertools.product(*(num_dice*[hex_options]))] ## One Liner Broken Down #Create list of hex_option lists of lenght num_dice all_dice = num_dice * [hex_options] #Create cartesianc prodcut using itertools cart_prod = itertools.product(*all_dice) #Populate a list from the cartesian product iterator permutations = [r for r in cart_prod] return permutations
bbd7130fb2ad1f4f979b0b341add56a2eb3c3a3c
133,489
def get_capabilities(conn): """ Returns a string which is the XML of the hypervisor'nova_tests capabilities :param conn: libvirt Connection object :return: (str) of capabilities in XML format """ return conn.getCapabilities()
ffe9cc18d84b88183a0f0ba4658cee1f98383386
113,112
def check_columns(filename): """ Check the columns in an input file for numerical values. This routine takes as input a file name. It reads in the lines from the file and determines the number of columns plus the index values of the numerical columns. It is assumed that the first column without '#', '|' or '\' is the template for all the lines, so only the first "data" line is split up and checked for numerical values in the columns. The indices are then used with numpy.loadtxt. Parameters ---------- filename : the name of the file with regular columns to check Returns ------- inds : a list of the numerical columns in the file ncols : the total number of columns in the file If an error occurs, inds and ncols are returned as None. """ try: infile = open(filename, 'r') lines = infile.readlines() infile.close() ncols = 0 inds = [] for line in lines: if ncols == 0: line = line.strip('\n') if ('#' in line[0:1]) | ('\\' in line[0:1]) | \ ('|' in line[0:1]) | ('x_or_RA' in line): pass else: values = line.split('#') subline = values[0] values = subline.split('|') subline = values[0] values = subline.split('\\') subline = values[0] values = subline.split() ncols = len(values) for loop in range(ncols): try: x = float(values[loop]) inds.append(loop) except: pass return inds, ncols except Exception: return None, None
6c74d62bea20cd186884fa6f8f1f156b4d90ac40
276,767
def paths_are_not_structured(paths): """ returns true if the list of paths are not yet file system structured. i.e. if not all paths start with the root path (contribution label) """ if not paths: return False root = paths[0][: paths[0].find('/', 1)] for path in reversed(paths): if not path.startswith(root): return True return False
f37e368ea63cf651af00bfcc19fb9c7957138c51
622,419
import sqlite3 def get_pdb_chain_subset(db, gene, use_cutoffs = False, res_cutoff = 2.8, rfac_cutoff = .3): """ Return a list of tuples of [pdb, chain] of the particular gene """ #print db_fname db.row_factory = sqlite3.Row c = db.cursor() if not use_cutoffs: c.execute("SELECT DISTINCT PDB, original_chain FROM cdr_data WHERE gene =?", (gene,)) else: c.execute("SELECT DISTINCT PDB, original_chain FROM cdr_data WHERE gene = ? and resolution <= ? and rfactor <= ?", [gene, res_cutoff, rfac_cutoff]) rows = c.fetchall() entries = [] for row in rows: list(row.keys()) entries.append([row['PDB'],row['original_chain']]) return entries
ef3d3c9dc3105f4ef024112cf561fe05200be8bc
239,872
from typing import Iterable def to_list(item_or_list): """ Convert a single item, a tuple, a generator or anything else to a list. :param item_or_list: single item or iterable to convert :return: a list """ if isinstance(item_or_list, list): return item_or_list elif isinstance(item_or_list, (str, bytes)): return [item_or_list] elif isinstance(item_or_list, Iterable): return list(item_or_list) else: return [item_or_list]
fe81e6e142120d2247c548e16dfd85634280dfe9
598,353
def optional(converter): """ A converter that allows an attribute to be optional. An optional attribute is one which can be set to ``None``. :param callable converter: the converter that is used for non-``None`` values. .. versionadded:: 17.1.0 """ def optional_converter(val): if val is None: return None return converter(val) return optional_converter
128042c7a95bb91c665ab6ac0f6771e4a72632ed
701,366
def unitTransformed(unit,transform,freq): """ :param unit: a data series' unit type :param transform: what transformation was applied to the data series :param freq: the frequency of the data series :return: returns a string representing unit transformed """ if unit == "Index": unit_transformed = "Index" elif transform == "chg": if "%" in unit: unit_transformed = "Ppt. change" else: unit_transformed = "Level change" elif "pch" == transform and freq == "m": unit_transformed = "MoM %" elif "pca" == transform and freq == "q": unit_transformed = "QoQ AR %" else: unit_transformed = unit + " [{}]".format(transform) return unit_transformed
4ec984fcd60780884cee7f07c9d294c6491d0fe2
159,157
def _handle_docker_port(port): """Translates a Docker-Compose style port to a Kubernetes servicePort """ kube_port = {} try: kube_port["port"] = port.split(":")[1] kube_port["targetPort"] = port.split(":")[0] except IndexError: kube_port["port"] = port.split(":")[0] return kube_port
19fd48a4ab87d0eff61bc198f96985280470c1fb
328,677
def lift(x): """ Lift an object of a quotient ring `R/I` to `R`. EXAMPLES: We lift an integer modulo `3`:: sage: Mod(2,3).lift() 2 We lift an element of a quotient polynomial ring:: sage: R.<x> = QQ['x'] sage: S.<xmod> = R.quo(x^2 + 1) sage: lift(xmod-7) x - 7 """ try: return x.lift() except AttributeError: raise ArithmeticError("no lift defined.")
0eb345764280709ceae00cb323a4a5b23beb49b6
101,853
def project_08_largest_product(count): """ Problem 8: Find the largest product of n numbers in a hardcoded series.. Args: count (int): The number of adjacent numbers to determine product for. """ def product(sequence): if 0 in sequence: return 0 else: product = 1 for term in sequence: product = int(int(product) * int(term)) return product series = '73167176531330624919225119674426574742355349194934' \ '96983520312774506326239578318016984801869478851843' \ '85861560789112949495459501737958331952853208805511' \ '12540698747158523863050715693290963295227443043557' \ '66896648950445244523161731856403098711121722383113' \ '62229893423380308135336276614282806444486645238749' \ '30358907296290491560440772390713810515859307960866' \ '70172427121883998797908792274921901699720888093776' \ '65727333001053367881220235421809751254540594752243' \ '52584907711670556013604839586446706324415722155397' \ '53697817977846174064955149290862569321978468622482' \ '83972241375657056057490261407972968652414535100474' \ '82166370484403199890008895243450658541227588666881' \ '16427171479924442928230863465674813919123162824586' \ '17866458359124566529476545682848912883142607690042' \ '24219022671055626321111109370544217506941658960408' \ '07198403850962455444362981230987879927244284909188' \ '84580156166097919133875499200524063689912560717606' \ '05886116467109405077541002256983155200055935729725' \ '71636269561882670428252483600823257530420752963450' max_terms = list(map(int, series[0:count])) max_product = product(max_terms) for start_index in range(1, len(series)-count-1, 1): terms = list(map(int, series[start_index:start_index+count])) term_product = product(terms) if term_product > max_product: max_terms = terms max_product = term_product return max_product
49d7e5d5d2b90bc22b07d9af7863b8367d16501d
30,262
def div(a, b): """Helper function for division :param a: first parameter :type a: int :param b: second parameter :type b: int :return: integer division result :rtype: int """ return int(a / b)
030354e96f120c19c95dd0e7ff71f15ac6d99047
253,481
def make_url_path_regex(*path): """Get a regex of the form ^/foo/bar/baz/?$ for a path (foo, bar, baz).""" path = [x.strip("/") for x in path if x] # Filter out falsy components. return r"^/%s/?$" % "/".join(path)
b7031c8d94ac4ae3b290c32f504cd2264196e15a
518,303
import torch def masked_log_softmax(vector, mask): """ ``torch.nn.functional.log_softmax(vector)`` does not work if some elements of ``vector`` should be masked. This performs a log_softmax on just the non-masked portions of ``vector``. Passing ``None`` in for the mask is also acceptable; you'll just get a regular log_softmax. We assume that both ``vector`` and ``mask`` (if given) have shape ``(batch_size, vector_dim)``. In the case that the input vector is completely masked, this function returns an array of ``0.0``. You should be masking the result of whatever computation comes out of this in that case, anyway, so it shouldn't matter. """ if mask is not None: vector = vector + mask.log() return torch.nn.functional.log_softmax(vector)
16f0ad8ba47fa6bc9971d135ba8494d1e2c9a914
409,504
from typing import List def read_file_as_lines(filename: str) -> List[str]: """Reads in filename as list of lines. Args: filename: The path of the filename to read in. Returns: A list of strings. """ with open(filename) as infile: file_lines = infile.readlines() return file_lines
566ec2251d3d34ebccf355be5d1c471247262409
371,333
from typing import List import re def parse_active_site_data_line(line: str) -> List[str]: """ Parse active site data line. Args: line (str): a line from the active site data file. Returns: List[str]: a list containing identifiers and the sequence. """ identifiers, sequence = re.split(r",\s+", line.strip(">\n")) return identifiers.split() + [sequence]
275c33c94a7b6442d2b209abec1ac70ff494a96e
42,160
def request_get_home_information(userId: str): """个人主页信息 Args: userId (str): 用户 ID Returns: Dict: 封装的请求 """ return { "operationName": "visionProfile", "query": "query visionProfile($userId: String) { visionProfile(userId: $userId) { result hostName " "userProfile { ownerCount { fan photo follow photo_public " "__typename } profile { gender user_name user_id headurl " " user_text user_profile_bg_url __typename } isFollowing __typename " "} __typename }} ", "variables": { "userId": userId } }
63a03c2a06c564438dc7bd5168b0838e3530d79d
222,690
def MMAX_POSITION_FROM_ID(ID): """ Extract the position from a MMAX ID """ return int(str(ID).split('_')[-1])
9b0556868b3185204caec88992809eb231a97c9d
623,207
from pathlib import Path from typing import List from typing import Tuple from typing import Dict import yaml def find_configs(path : Path) -> List[Tuple[Dict, Path]]: """Returns the parsed content and paths of qaboard.yaml files that should be loaded for a (sub)project at the `path`. Returns a tuple (configs, paths). Each element is a list - the root qaboard.yaml is first and the subproject's is last. """ configsxpaths = [] # We need a full path to iterate on the parents path = path.resolve() # We look for qaboard.yaml configuration files in the path folder and its parents parents = [path, *list(path.parents)] for parent in parents: qatools_config_path = parent / 'qaboard.yaml' if not qatools_config_path.exists(): qatools_config_path = parent / 'qatools.yaml' # backward compatibility if not qatools_config_path.exists(): continue with qatools_config_path.open('r') as f: qatools_config = yaml.load(f, Loader=yaml.SafeLoader) if not qatools_config: # support empty files that just mark subprojects qatools_config = {} configsxpaths.append((qatools_config, qatools_config_path)) if qatools_config.get('root'): break configsxpaths.reverse() return configsxpaths
e7007eff5e986933f082c530351dc7e9fda5e27a
69,225
def is_slug(string): """ Function to test if a URL slug is valid """ return all([s in '0123456789-abcdefghijklmnopqrstuvwxyz' for s in string])
e81a54afd9c4b4cbc789e610f7fa70e7d8e73668
123,374
def query_merge(query: dict, **kwargs: dict) -> dict: """ Merge a dictionary and key word arguments into a single dictionary. Used to merge a MongoDB query and key word arguments into a single query. :param query: The dictionary to merge. :param kwargs: The key word arguments to merge. :return: A dictionary merging query and kwargs. """ if query is None: return kwargs else: return {**query, **kwargs}
5a53047cdc2983225815506203f9134de371c0c0
658,019
def keras_variable_names(model): """List all variable names in Keras Model.""" return [x.name for x in model.variables]
e4347ad831035b87a8b7c543512b5133c72045f1
392,422
def cal_sort_key(cal): """ Sort key for the list of calendars: primary calendar first, then other selected calendars, then unselected calendars. (" " sorts before "X", and tuples are compared piecewise) :param cal: a calendars :return: the sorted calendar """ if cal["selected"]: selected_key = " " else: selected_key = "X" if cal["primary"]: primary_key = " " else: primary_key = "X" return primary_key, selected_key, cal["summary"]
5e695202cc5efaf7aa0e15adffcd8141e68b63c8
218,571
def qnn_dense_legalize(attrs, inputs, types): """Default legalization is None.""" return None
7ef41d7ef028e368ce7b7457864e6cdc5f606339
599,783
def _sparse_gradient(vol, positions): """Gradient of a 3D volume at the provided `positions`. For SIFT we only need the gradient at specific positions and do not need the gradient at the edge positions, so can just use this simple implementation instead of numpy.gradient. """ p0 = positions[..., 0] p1 = positions[..., 1] p2 = positions[..., 2] g0 = vol[p0 + 1, p1, p2] - vol[p0 - 1, p1, p2] g0 *= 0.5 g1 = vol[p0, p1 + 1, p2] - vol[p0, p1 - 1, p2] g1 *= 0.5 g2 = vol[p0, p1, p2 + 1] - vol[p0, p1, p2 - 1] g2 *= 0.5 return g0, g1, g2
6676014543066dbc75e1a9e069b959cc30e5bd50
451,072
def count_occurrences(text: str) -> dict: """ Counts the number of time that each different character appears in the text. :param text: the text to be coded :return: a dictionary with each character of the text as a key and the number of appearance of this character in the text in value """ occ = dict() for char in text: if char not in occ: occ[char] = 0 occ[char] = occ[char] + 1 return occ
dcfcb0b17bd16a9f499218cf6f0d721f78dc862c
147,017
def get_linestyle(experiment): """Select linestyle depending on experiment.""" if experiment == 'linear combination: AA, GHG': linestyle = 'dotted' else: linestyle = '-' return linestyle
42d4c7d74c6eb313113d05661bb9cbaafbf738c5
59,510
from typing import List from typing import Any import json def retrieve_msg(identifier: str, msg_q) -> List[Any]: """Retrieve one msg from msg_q. If the msg does not fit identifier, put the msg back in the queue. :param identifier: A unique string to differentiate the recipient of the received message. :param msg_q: A queue to receive msg from rpi_out :return: A json-loaded object (or empty list) from the received message. """ msg = msg_q.get() msg_list = json.loads(msg) # if the received message is not what we want if msg_list[0] != identifier: msg_q.put(msg) # put the message back msg_list = [] # reset msg_list return msg_list
b944d9e150b8dc7dd3355640e7f5f9129ce919a0
460,560
def look_and_say(digits): """Describes a digit as in a look-and-say manner""" desc = [1, digits[0]] for digit in digits[1:]: if desc[-1] == digit: desc[-2] += 1 else: desc.extend([1, digit]) return desc
2e998da0268cda8a3a4cbcc1f5ef0d140a94e6fb
212,416
def send_sns_mobile_push_notification_to_device( device, notification_type, text, data, title, badge=None ): """ Method that sends out a mobile push notification to a specific self. :param device: device to send the notification to. :param notification_type: type of notification to be sent :param text: text to be included in the push notification :param data: data to be included in the push notification :param title: title to be included in the push notification :param badge: badge number to be included in the push notification :return: response from SNS """ return device.send( notification_type=notification_type, text=text, data=data, title=title, badge=badge, )
f33604afe865e5257100ba1010dc2de72510cb76
451,001
def freeze(model): """ Freezes all the parameters in the model Returns ------- Model the model itself """ for param in model.parameters(): param.requires_grad = False model.training = False return model
70afe94a83eaf1203e099d47ecc53209d70ba658
666,334
def read(filepath, readfunc, treant): """Read data from a treant Args: filepath: the filepath to read from readfunc: the read callback treant: the treant to read from Returns: the data """ return readfunc(treant[filepath].abspath)
24e94b244dacd603158a9d779a167133cdd2af50
683,890
def baseline_calc_hmean ( SSP1, SSP2, BSL_range, TAT): """ Calculates measured baseline lengths (harmonic mean sound speed). It needs: SSP1 ... sound speed at beacon 1 in metres per second SSP2 ... sound speed at beacon 2 in metres per second BSL_range ... measured traveltime in milliseconds TAT ... turn around time in milliseconds It returns: baseline length in metres """ return(((2*SSP1*SSP2)/(SSP2+SSP1))*(((BSL_range - TAT)/2)/1000))
2de458102142307993546c4d19cea7520aed5632
654,009
def get_num_words(data_frame, tokens_col): """Get the maximum token found within a column. Args: data_frame: The frame from which the max value should be found. tokens_col: The string column name to be returned. Returns: Maximum token found within the given column. """ return max(data_frame[tokens_col].apply(lambda x: max(x) if len(x) > 0 else 0))
2754cc6760e003fd587a611cc8690b28cd5e6a5d
468,095
def split_strip(text, delimiters): """ split the data into a 2D list and strip out whitespace, returns a 2D list of data and an integer representing the number of rows in this data """ text = text.strip() data = delimiters.row_border.split(text) rows = len(data) for i in range(rows): data[i] = data[i].strip() data[i] = delimiters.cell_border.split(data[i]) return data, rows
01cc58b8b012f290f3c84ed1705f0a2552e32ba9
239,106
import torch def decimate(tensor, m): """ Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value. This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size. :param tensor: tensor to be decimated :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension :return: decimated tensor """ assert tensor.dim() == len(m) for d in range(tensor.dim()): if m[d] is not None: tensor = tensor.index_select(dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) return tensor
389d2e6c9c04bbc2cefa9d16393c9fbb68f543ee
163,508
def are_all_equal(tiles): """ Checks if all tiles are the same. """ return len(set(tiles)) <= 1
45be6a153b94769c59ac260a4a8ef37f0b5433b5
480,852
def _ExtractResNetThroughput(output): """Extract throughput from Horovod output. Args: output: Horovod output Returns: A tuple of: Average throuput in images per second (float) Unit of the throughput metric (str) """ # Start from last line and iterate backwards. avg_throughput = 0 for line in output.splitlines()[::-1]: if 'train_throughput' in line: split_line = line.split() avg_throughput = float(split_line[-1]) break return round(avg_throughput, 1), 'images/second'
671d745b0f73e9a84fa9a8b55f45054c711329c0
697,256
import re def get_pattern_cols(cols, pos_pattern=None, neg_pattern=None): """Get columns names from a list that matches ``pos_pattern``, but does not match ``neg_pattern``. If a column name matches both ``pos_pattern`` and ``neg_pattern``, it is excluded. Parameters ---------- cols : `List` ['str'] Usually column names of a DataFrame. pos_pattern : regular expression If column name matches this pattern, it is included in the output. neg_pattern : regular expression If column name matches this pattern, it is excluded from the output. Returns ------- pattern_cols : `List` ['str'] List of column names that match the pattern. """ if pos_pattern is None: pos_pattern_cols = [] else: pos_regex = re.compile(pos_pattern) pos_pattern_cols = [col for col in cols if pos_regex.findall(col)] if neg_pattern is None: neg_pattern_cols = [] else: neg_regex = re.compile(neg_pattern) neg_pattern_cols = [col for col in cols if neg_regex.findall(col)] pattern_cols = [col for col in cols if col in pos_pattern_cols and col not in neg_pattern_cols] return pattern_cols
78a306eb749e76b8fa96ba746bea3d884bb7c6dd
343,673
def ask_yes_no(question: str) -> bool: """ asks for a valid yes&no answer :return: bool """ while True: # ask until return answer = input(question + " [yes, y, 1; no, n, 0] ") if answer.lower() == "yes" or answer.lower() == "y" or answer == "1": return True elif answer.lower() == "no" or answer.lower() == "n" or answer == "0": return False else: print("invalid answer")
9b39c0f00b47f25689e44f789c69935b54a09e88
444,697
def facility_name(hutch): """Return the facility name for an instrument""" if hutch in [ 'dia', 'mfx', 'mec', 'cxi', 'xcs', 'xpp', 'sxr', 'amo', 'DIA', 'MFX', 'MEC', 'CXI', 'XCS', 'XPP', 'SXR', 'AMO', ]: return '{}_Instrument'.format(hutch.upper()) return '{}_Instrument'.format(hutch.lower())
6a27a873ce8d498e15ac2459520b8366a16039c9
185,894
def __compare(x, y): """ Parameters ---------- x : comparable y : comparable Returns ------- result : int Returns 1 if x is larger than y, -1 if x is smaller then y, 0 if x equals y. """ if x > y: return 1 elif x < y: return -1 else: return 0
1fbd814c96607adcf5f04d9c2597c121790cea13
173,333
def get_cloudformation_template(cfn_client, stack_name): """ Returns the template body of a CloudFormation stack. """ response = cfn_client.get_template(StackName=stack_name) return response["TemplateBody"]
37d87f3c4d85e138cb58cd70ef84ce2f3abd2ee0
603,155
def fullsize_stats(f,bpred): """Summarizes a binary prediction file that has one row per tile. Calculates four items for a full-size file: 1. is_empty (all tiles are empty) 2. has_objects 2. has_elephants (at least one tile has an elephant) 3. is_uncertain; i.e. the file is a mix of empty and uncertain tiles but contains no other objects. """ fmask = bpred[(bpred['filename']==f)].index tilerows = bpred.loc[fmask] n_tiles = len(tilerows) n_empty = len(tilerows[tilerows['empty']==1]) n_elephants = len(tilerows[tilerows['elephant']==1]) non_empty_mask = tilerows[tilerows['empty']==0].index nonemptyrows = tilerows.loc[non_empty_mask] n_uncertain = sum((nonemptyrows.iloc[:,3:].apply(sum,axis=1).values) == 0) n_objectrows = sum(nonemptyrows.iloc[:,4:].apply(sum,axis=1).values) is_empty = (True if n_tiles == n_empty else False) has_elephants = (True if n_elephants > 0 else False) is_uncertain = (True if (n_tiles - n_empty == n_uncertain) & (n_uncertain > 0) else False) has_objects = (True if n_objectrows > 0 else False) return (is_empty,has_objects,has_elephants,is_uncertain)
080828c982336115edbca5b525557b398eaaab65
381,528
from typing import Union from typing import Any def cast2lowercase(input: Union[list, tuple, str]) -> Any: """Cast input into lowercase. Example: >>> cast2lowercase('Hello World') 'hello world' >>> cast2lowercase(['Hello', 'World']) ['hello', 'world'] """ inputs = [] outputs = [] if isinstance(input, str): inputs = [input] else: inputs = input # type: ignore for _input in inputs: outputs.append(_input.lower()) if isinstance(input, str): return outputs[0] elif isinstance(input, tuple): return tuple(outputs) else: return outputs
397d3887c67ce26613b87cfe0aa762cafe080e2f
619,355
def bdev_nvme_add_error_injection(client, name, opc, cmd_type, do_not_submit, timeout_in_us, err_count, sct, sc): """Add error injection Args: name: Name of the operating NVMe controller opc: Opcode of the NVMe command cmd_type: Type of NVMe command. Valid values are: admin, io do_not_submit: Do not submit commands to the controller timeout_in_us: Wait specified microseconds when do_not_submit is true err_count: Number of matching NVMe commands to inject errors sct: NVMe status code type sc: NVMe status code Returns: True on success, RPC error otherwise """ params = {'name': name, 'opc': opc, 'cmd_type': cmd_type} if do_not_submit: params['do_not_submit'] = do_not_submit if timeout_in_us: params['timeout_in_us'] = timeout_in_us if err_count: params['err_count'] = err_count if sct: params['sct'] = sct if sc: params['sc'] = sc return client.call('bdev_nvme_add_error_injection', params)
3833256e71f47a49eef2643bf8c244308795a0b1
708,108
def _count_datacenters(grouped_networks): """Count the number of datacenters in each group of networks Returns: list of tuples: the first element is the group key, while the second element is the number of datacenters in each group. """ return ((key, len(set(n['datacenter_id'] for n in group))) for key, group in grouped_networks.items())
c97d68574d661ac3b7cff1dc3c31ac81a7f2a1e3
94,194
import typing import itertools def concat(xss: typing.Iterable[typing.Iterable[typing.Any]] ) -> typing.List[typing.Any]: """ Concatenates a list of lists. """ return list(itertools.chain.from_iterable(xs for xs in xss))
3d22d4bd5639499772063c4abb5dd67b9c670193
408,205
def trimmed_mean(numpy_arr, lperc=0, hperc=0): """Get a trimmed mean value from array, with low and high percentage ignored.""" alen = len(numpy_arr) return numpy_arr[(alen / 100 * lperc): (alen - (alen / 100 * hperc))].mean()
ce55788da409c152f03db4f5b1bbe5943e0a3ce0
342,393
def _Contains(i, j, areas, lens, cls): """Return True if path i contains majority of vertices of path j. Args: i: index of supposed containing path j: index of supposed contained path areas: list of floats - areas of all the paths lens: list of ints - lenths of each of the paths cls: dict - maps pairs to result of _ClassifyPathPairs Returns: bool - True if path i contains at least 55% of j's vertices """ if i == j: return False (jinsidei, joni) = cls[(i, j)] if jinsidei == 0 or joni == lens[j] or \ float(jinsidei) / float(lens[j]) < 0.55: return False else: (insidej, _) = cls[(j, i)] if float(insidej) / float(lens[i]) > 0.55: return areas[i] > areas[j] # tie breaker else: return True
a88cb7a16b7b856cdcd5dc30991cc1c1efa4387b
52,690
def get_distance(sigma_phi_1, sigma_phi_2, mean_phi_1, mean_phi_2, phi_1, phi_2): """ Returns the "distance" that an object has relative to a specific region in terms of phi_1 and phi_2 considering the standar deviation. Arguments: sigma_phi_1 {float} - Standard deviation in phi_1 axis. sigma_phi_2 {float} - Standard deviation in phi_2 axis. mean_phi_1 {float} - Mean or center of the region in phi_1 axis. mean_phi_2 {float} - Mean or center of the region in phi_2 axis. phi_1 {float} - Center of the object in phi_1 axis. phi_2 {float} - Center of the object in phi_2 axis. Returns: Float -- Distance between the center of the object and the center of the region. """ return ( ( phi_1 - mean_phi_1 ) / sigma_phi_1 )**2 + ( ( phi_2 - mean_phi_2 ) / sigma_phi_2 )**2 # return ( phi_1 - mean_phi_1 )**2 + ( phi_2 - mean_phi_2 )**2
57ca7844e90d91d187782656586b37d328ab84cf
565,048
def sparse(x0, rho, gamma): """ Proximal operator for the l1 norm (induces sparsity) Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) gamma : float A constant that weights how strongly to enforce the constraint Returns ------- theta : array_like The parameter vector found after running the proximal update step """ lmbda = float(gamma) / rho return (x0 - lmbda) * (x0 >= lmbda) + (x0 + lmbda) * (x0 <= -lmbda)
bac67eca84cc666c93c4db06aa2dd39c95b053fe
378,359
def lte_prod_rate(P, eta): """ Low temperature electrolysis energy requirements and production rate. Parameters: ----------- P: thermal power [MW] eta: themal-to-electric conversion efficiency Returns: -------- pr: production rate [kg/h] see: sepecific energy [kWh(th)/kg-H2] """ see = 60 # kWh(e)/kg-H2 see /= eta pr = P/see*1e3 return pr, see
4f1addbabb5399145f3f9b679cc5df205fc2d39c
506,775
def get_rb_file_as_string(fid): """Read Rainbow File Contents in data_string Parameters ---------- fid : object File handle of Data File Returns ------- data_string : str File Contents as data_string """ try: data_string = fid.read() except Exception: raise IOError("Could not read from file handle") return data_string
2c87b47d2214f8b15fd6d60a8cc4eb1a9108c11b
612,931
import re def clean_text_round1(text): """ Clean up text data. Make text lowercase, remove text in square brackets, remove punctuation, remove digits in general, remove urls, remove emails and remove "" caracteres. unidecode = If there is a non ASCII caracter, turn it into ASCII readable --> not used because it hinders the Stemming. """ text = text.lower() text = re.sub(r'\[.*?\]', '', text) # r'[%s]' % re.escape(string.punctuation) remove accents from words, # which hinders the Stemming process. # text = re.sub(r'[%s]' % re.escape(string.punctuation), '', text) text = re.sub(r'[!,:\-;\.\?\(\)]', '', text) text = ''.join(i for i in text if not i.isdigit()) text = re.sub(r'^https?:\/\/.*[\r\n]*', '', text) text = re.sub(r'http\S+', '', text) text = re.sub(r'[‘’“”…]', '', text) text = re.sub('\n', '', text) # unidecode(text) remove accents from words, which hinders Stemming process # so it was not done. # text = unidecode(text) return text
a790f9c01df7956748ed505d6490864edcb001e7
448,703
def detect_faces(image, cascade, scale_factor, min_neighbours): """ Detect faces visible on the image. :param cascade: cascade object used for detection :param image: analyzed image :param scale_factor: subsequent detections scaling coefficient :param min_neighbours: minimum detection neighbours :return: detected face rectangles """ return cascade.detectMultiScale(image, scale_factor, min_neighbours)
973d08e53c8ba58efab72855b52ba012f88c08c5
634,845
def convert_seconds_to_human_readable_form(seconds: int) -> str: """Convert seconds to human readable time format, e.g. 02:30 **Keyword arguments:** - seconds (int) -- Seconds to convert **Returns:** Formatted string """ if seconds <= 0: return "00:00" minutes = int(seconds / 60) remainder = seconds % 60 minutes_formatted = str(minutes) if minutes >= 10 else "0" + str(minutes) seconds_formatted = str(remainder) if remainder >= 10 else "0" + str(remainder) return f"{minutes_formatted}:{seconds_formatted}"
4a722da69e64f0a7e62781708c127a31a6dfcdbd
296,130
def recurse_while_none(element): """ Traverse the ``element`` until a non-None text is found. :param element: element to traverse until get a non-None text. :type element: pyquery.PyQuery :returns: the first non-None value found :rtype: str """ if element.text is None: return recurse_while_none(element.getchildren()[0]) return element.text
54faa684fcbece10d546f6f5514cbd84c584aeda
442,283