content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def get_rgb_channel(image, channel): """ Converts an image to RGB and extracts a single channel """ return image[:, :, channel.value]
35e7be9b99fdf4b4677819e4cc78a2123b915821
115,721
def temperature(cell): """ Returns the temperature (in degrees Celsius) for the given integer index ``cell``. """ temperatures = { 0: 21.0, } return temperatures[cell]
2f56aa797fddad3ee079affb87e3da6c5a4d5a37
139,297
def generate_method(method_name): """Generate a method for a given Thrift service. Uses the provided TChannelSyncClient's threadloop in order to convert RPC calls to concurrent.futures :param method_name: Method being called. :return: A method that invokes the RPC using TChannelSyncClient """ def call(self, *args, **kwargs): """Forward RPC call to TChannelSyncClient :return concurrent.futures.Future: """ if not self.threadloop.is_ready(): self.threadloop.start() return self.threadloop.submit( getattr(self.async_thrift, method_name), *args, **kwargs ) return call
08ea4a3d62f034add07c45219da6d96ea3795e8d
679,842
def build_run_cmd(raw_cmd, start_date=None, end_date=None, database=None): """Replace placeholder inputs in the model command with given values. Parameters ---------- raw_cmd : str Raw command, whichs hould contain placeholders <start_date>, <end_date> and <database>. start_date : str or datetime.datetimie , optional Dataset start date to pass to command (metrics script should use this to modify database queries to return data restricted to the given date range), by default None end_date : str or datetime.datetime , optional Dataset end date to pass to command (metrics script should use this to modify database queries to return data restricted to the given date range), by default None database : str, optional Name of the database to pass to command (metrics script should use this to modify the database it connects to), by default None Returns ------- str Command to run with at least one of the <start_date>, <end_date> and <database> placeholders, to be replaced by the input values. Raises ------ ValueError If raw_cmd does not contain at least one of the <start_date>, <end_date> and <database> placeholders. """ placeholders = { "<start_date>": start_date, "<end_date>": end_date, "<database>": database, } no_placeholders_found = True for key, value in placeholders.items(): if key in raw_cmd and value is None: raise ValueError(f"No value given for {key}") else: no_placeholders_found = False raw_cmd = raw_cmd.replace(key, str(value)) if no_placeholders_found: raise ValueError( "Command doesn't include any of the possible placeholders: " f"{list(placeholders.keys())}" ) return raw_cmd
009c837ea9b355b3ec135577c6aff5593ffaa879
59,327
from typing import Any def none_to_default(field: Any, default: Any) -> Any: """Convert None values into default values. :param field: the original value that may be None. :param default: the new, default, value. :return: field; the new value if field is None, the old value otherwise. :rtype: any """ return default if field is None else field
894d71c2cc89b02dc14fd7ddcd3a949bdc336692
3,505
def _swish_shape(op): """Shape helper function for swish and _swish_grad function below.""" return [op.inputs[0].shape]
fa48ec4a1197347dca465cb17a3a61e05df2bb4b
373,044
def factorial(n): """ factorial: calculate factorial n! :param n: input number :return: n! """ fact=1 for i in range(1,n+1): fact*=i return fact
4e99cc2924f5622f426c7f8114adf181ceb5e884
642,970
def source(bot, trigger): """Simply replies with a link to my modules' source""" url = "https://github.com/cottongin/sopel-modules" return bot.say(f"You can find the source to my custom modules here: {url}")
e50c3c1200e7932dec8e6e2878d98043d824d03c
614,505
def _clean(txt): """Replace all whitespace with a single space.""" return " ".join(txt.split()).strip()
497f7e7a2c460aaa6d920009848c1d02a882c3c6
216,195
import math def stackoverflow_normpdf(x, mean, sd): """ Another alternative from: https://stackoverflow.com/a/12413491 This uses the formula found here: http://en.wikipedia.org/wiki/Normal_distribution#Probability_density_function """ var = float(sd)**2 denom = (2*math.pi*var)**.5 num = math.exp(-(float(x)-float(mean))**2/(2*var)) return num/denom
1d94ae3de427d9de4a5cc63d83fdd28d2ead4433
183,854
import socket def tcp_port_reachable(addr, port, timeout=5): """ Return 'True' if we could establish a TCP connection with the given addr:port tuple and 'False' otherwise. Use the optional third argument to determine the timeout for the connect() call. """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(timeout) try: s.connect((addr, port)) s.shutdown(socket.SHUT_RDWR) return True except: return False
22c530cdbccf6c19ffe60b5e1904e797f7d059ea
15,930
def generate_custom_field_resolver(name, resolver_name): """Generate function to resolve each custom field within each DjangoObjectType. Args: name (str): name of the custom field to resolve resolver_name (str): name of the resolver as declare in DjangoObjectType """ def resolve_custom_field(self, info, **kwargs): return self.cf.get(name, None) resolve_custom_field.__name__ = resolver_name return resolve_custom_field
5558469a9fa3cbdd01e3ed57a72f75a986b9dd09
112,030
def get_firing_time(pwm_period=0.0015, sample_size=7): """ Returns laser firing time. """ return sample_size * pwm_period
5055acda872918843ab8e19f56e3b80a5ed926de
479,860
def _build_schema_resource(fields): """Generate a resource fragment for a schema. Args: fields [Sequence[:class:`~google.cloud.bigquery.schema.SchemaField`]): schema to be dumped Returns: (Sequence[dict]) mappings describing the schema of the supplied fields. """ return [field.to_api_repr() for field in fields]
1f39153aec88f98f6c53812f3bc8ec861294403b
614,231
def transform_table(table): """ Transforms a contingency table into a point on a two-dimensional plane, in which the distance to the origin shows the suitability of a contingency table for separating cases and controls. """ # Yes, this ordering is correct. Please refer to our paper for # more details. a, b, d, c = table n1 = a+b n0 = c+d return (a-b) / n1, (c-d) / n0
90b6f74c96c9149ce6d9abb96665e83e9c5722d3
580,446
def trello_updates(new, old): """Parse out the updates from Trello payload. Best explained by an example: when a list is moved, an updateList event is fired, and the payload from Trello contains the following in the action.data node: { "list":{ "id": "5476fc06d998c88c890b901d", "pos": 131071, "name": "Second list" }, "old":{ "pos": 262143 } } From this, we can work out that the field that has changed is 'pos', as it's in the 'old' dict, and that its value has changed from 262143 to 131071 The output from this tag would therefore be: {"pos": (262143, 131071)} Args: new: dict, the complete node in its current state old: dict, the 'old' node against which to compare Returns: a dictionary containing the fields that have changed as the keys, and a 2-tuple as the value containing old, new values of the field. """ try: return {k: (v, new[k]) for k, v in old.iteritems()} except KeyError: return {k: (v, None) for k, v in old.iteritems()}
829e0369cf3274b83a1050c2dc75cbf1e964a4eb
192,452
def rosenbrock(xx): """ Rosenbrock ND objective function, a common optimization performance test Arguments: xx: 1D numpy array, coordinates at which to evaluate the N-D Rosenbrock test function References: + Rosenbrock, H. H. (1960). An Automatic Method for finding the Greatest or Least Value of a Function. The Computer Journal, 3, 175–84. + https://en.wikipedia.org/wiki/Rosenbrock_function """ val = 0 for ii in range(len(xx)-1): t1 = xx[ii+1] - xx[ii]*xx[ii] t2 = 1 - xx[ii] val += 100*t1*t1 + t2*t2 return val
f00ac742503619d2664936f65ed6158bc1ed0ec5
411,286
import torch def linf_for(diff): """ Compute usinf for loop. :param diff: a mini-batch :return: sum of linf dinstances for each data point in the mini-batch diff. """ linf_sum = 0 for x in diff: max = torch.max(torch.abs(x)) linf_sum += max return linf_sum
1040e5bf0209e48c6e9bb06c0a8507077a5aa9a4
472,646
def circle_line_intersection(circle_center, circle_radius, pt1, pt2, full_line=True, tangent_tol=1e-9): """ Find the points at which a circle intersects a line-segment. This can happen at 0, 1, or 2 points. :param circle_center: The (x, y) location of the circle center :param circle_radius: The radius of the circle :param pt1: The (x, y) location of the first point of the segment :param pt2: The (x, y) location of the second point of the segment :param full_line: True to find intersections along full line - not just in the segment. False will just return intersections within the segment. :param tangent_tol: Numerical tolerance at which we decide the intersections are close enough to consider it a tangent :return Sequence[Tuple[float, float]]: A list of length 0, 1, or 2, where each element is a point at which the circle intercepts a line segment. Note: We follow: http://mathworld.wolfram.com/Circle-LineIntersection.html """ (p1x, p1y), (p2x, p2y), (cx, cy) = pt1, pt2, circle_center (x1, y1), (x2, y2) = (p1x - cx, p1y - cy), (p2x - cx, p2y - cy) dx, dy = (x2 - x1), (y2 - y1) dr = (dx ** 2 + dy ** 2)**.5 big_d = x1 * y2 - x2 * y1 discriminant = circle_radius ** 2 * dr ** 2 - big_d ** 2 if discriminant < 0: # No intersection between circle and line return [] else: # There may be 0, 1, or 2 intersections with the segment intersections = [ (cx + (big_d * dy + sign * (-1 if dy < 0 else 1) * dx * discriminant**.5) / dr ** 2, cy + (-big_d * dx + sign * abs(dy) * discriminant**.5) / dr ** 2) for sign in ((1, -1) if dy < 0 else (-1, 1))] # This makes sure the order along the segment is correct # If only considering the segment, filter out intersections that do not fall within the segment if not full_line: fraction_along_segment = [(xi - p1x) / dx if abs(dx) > abs(dy) else (yi - p1y) / dy for xi, yi in intersections] intersections = [pt for pt, frac in zip(intersections, fraction_along_segment) if 0 <= frac <= 1] # If line is tangent to circle, return just one point (as both intersections have same location) if len(intersections) == 2 and abs(discriminant) <= tangent_tol: return [intersections[0]] else: return intersections
bf25e107fc56b4b7bed277c5100cf09448e4af6d
118,419
def double_me(x): """doubles input value""" return 2*x
664baae6cac1f953c8c9304c05f90df3b1f3c6fd
409,006
def parse_singleline_row(line, columns): """ Parses a single-line row from a "net use" table and returns a dictionary mapping from standardized column names to column values. `line` must be a single-line row from the output of `NET USE`. While `NET USE` may represent a single row on multiple lines, `line` must be a whole row on a single line. `columns` must be a list of `NetUseColumn` objects that correctly parses `string`. """ return {column.name: column.extract(line) for column in columns}
52a24b3c6c00c89a44311a5dfdaf02e3fc6026a6
674,908
def strip_byte_order_mark(text): """Return text with byte order mark (BOM) removed.""" try: return text.encode('utf-8').decode('utf-8-sig') except UnicodeError: return text
9f7734cd9b07312ab35fff4473bf8acdb6991718
82,179
def parse_iam_report_row(row): """ selects interesting columns from IAM credentials report """ # full header ['user', 'arn', 'user_creation_time', 'password_enabled', 'password_last_used', # 'password_last_changed', 'password_next_rotation', 'mfa_active', 'access_key_1_active', # 'access_key_1_last_rotated', 'access_key_1_last_used_date', 'access_key_1_last_used_region', # 'access_key_1_last_used_service', 'access_key_2_active', 'access_key_2_last_rotated', # 'access_key_2_last_used_date', 'access_key_2_last_used_region', 'access_key_2_last_used_service', # 'cert_1_active', 'cert_1_last_rotated', 'cert_2_active', 'cert_2_last_rotated'] parsed_columns = [row[0], row[1], row[3], row[4], row[8], row[10], row[13], row[15]] return parsed_columns
12927b22d716c59513a3527f4233f1022d1174c6
264,436
def maybe_scream(text, do_scream=False): """Returns given text input as caps lock text, if do_scream is true. Args: text (str): Some input text do_scream (bool): Decide, whether to scream or not Returns: str: May be in caps lock """ if do_scream: text = text.upper() return text
19d071cf439adda771c785fd4ade265cf2b864e0
563,049
import json def reject(status_code, **kwargs): """ Bad request. :param int status_code: Rejection status code :param dict kwargs: Rejection body JSON :return dict: Rejection response """ body = json.dumps(kwargs) if kwargs else '' headers = { 'content-length': len(body), 'content-type': 'application/json; charset=utf-8', } return dict(body=body, headers=headers, statusCode=status_code)
e68113ed9c92d0c082ed4c146ca2132d3d52bc40
628,174
def byte_length(n): """Finds the byte length of an `int` object.""" return (n.bit_length() + 7) // 8
5d4b258d8c9bcccbdd4f0739458ce91cbd8825fa
628,829
def diagnose(message, command, status, stdout, stderr): """Constructs a detailed failure message based on arguments.""" result = message + '\n' result += 'COMMAND: %s\n' % ' '.join(command) if status == -1: result += 'EXIT STATUS: %d (command timed out)\n' % status else: result += 'EXIT STATUS: %d\n' % status result += 'STANDARD OUTPUT:' if (stdout is None) or (len(stdout.rstrip('\n')) == 0): result += ' [none]\n' else: result += '\n' + stdout.rstrip('\n') + '\n' result += 'STANDARD ERROR:' if (stderr is None) or (len(stderr.rstrip('\n')) == 0): result += ' [none]\n' else: result += '\n' + stderr.rstrip('\n') + '\n' return result
9e39fd130a7d80aae7e7c1dcb0e5d9942719b165
81,429
from urllib.parse import unquote def build_response(req, resource_type, data): """ Create a standard response for the request, uniform for all the endpoints. :param req: Flask object containing the incoming request :param resource_type: string to describe the endpoint :data: dictionary of data to return :return: output dictionary of the request """ return dict( method=req.method, url=unquote(req.url), url_root=unquote(req.url_root), path=req.path, query_string=req.query_string.decode('utf-8'), resource_type=resource_type, data=data )
68992d13c62e5f1c646553d120e02298dbc381ad
433,314
import re def wslpath(winpath): """ Convert Windows path (or a command line argument containing a Windows path) to the equivalent WSL path (e.g. ``c:\\Users`` -> ``/mnt/c/Users``). Also supports paths in the form ``\\wsl$\\(distro)\\users\\...`` :param winpath: Command line argument which may (or may not) contain a Windows path. It is assumed to be either of the form <windows path> or --<arg>=<windows path>. Note that we don't need to handle --arg <windows path> or -a <windows path> since in these cases the argument and the path will be parsed as separate entities. :return: If ``winpath`` matches a Windows path, the converted argument (including the --<arg>= portion). Otherwise returns ``winpath`` unchanged. """ match = re.match(r"^(--[\w-]+=)?\\\\wsl\$[\\\/][^\\^\/]+(.*)$", winpath) if match: arg, path = match.group(1, 2) if arg is None: arg = "" return arg + path.replace("\\", "/") match = re.match(r"^(--[\w-]+=)?([a-zA-z]):(.+)$", winpath) if match: arg, drive, path = match.group(1, 2, 3) if arg is None: arg = "" return arg + "/mnt/" + drive.lower() + path.replace("\\", "/") return winpath
d5c25db526d61c95efbec3b0311ff758b0ad0eb1
500,562
import textwrap def dedent(docstring): """Dedent a Python docstring treating the first line special. This function uses textwrap.dedent() but assumes that the first line is not indented. """ lines = docstring.splitlines() return '\n'.join((lines[0], textwrap.dedent('\n'.join(lines[1:])),))
c80bcfa0a143da8cb708168dbcda9dca87a0c24b
296,411
def _normal(cb, cs): """The normal blend mode""" return cs
292cc1c3d69432ae8dc7c5336c4056a0f1d30758
512,814
import json import hashlib def default_observation(case_submitter_id, project_id, date, observation_type, line): """Creates a minimal observation.""" js = json.dumps(line, separators=(',',':')) check_sum = hashlib.sha256(js.encode('utf-8')).hexdigest() return { "type": "observation", "cases": {"submitter_id": case_submitter_id}, "submitter_id": '{}-{}-{}'.format(case_submitter_id, observation_type, check_sum), "project_id": project_id, "date": date, "observation_type": observation_type }
64fea2110cb27bd1cd82e7e34fb3826cb3ab5707
572,035
def _excel2num(x): """ Convert Excel column name like 'AB' to 0-based column index. Parameters ---------- x : str The Excel column name to convert to a 0-based column index. Returns ------- num : int The column index corresponding to the name. Raises ------ ValueError Part of the Excel column name was invalid. """ index = 0 for c in x.upper().strip(): cp = ord(c) if cp < ord("A") or cp > ord("Z"): raise ValueError(f"Invalid column name: {x}") index = index * 26 + cp - ord("A") + 1 return index - 1
0e568c2d93cf84a9ceb87c99f9c5b0944656580a
170,010
def _get_reduce_out_dim(keep_dims, x_dim, x_ndim, batch_axis): """get out_dim for reduce* operation.""" if keep_dims: out_dim = x_dim else: out_dim = 0 for i in range(x_ndim): if i == x_dim: break if i in batch_axis: continue else: out_dim += 1 return out_dim
972534195cfaafa6308a580a952aab6403901f8a
356,105
def _header(settings): """ Writes the Latex header using the settings file. The header includes all packages and defines all tikz styles. Returns: header (string): Header of the Latex document. """ packages = ("\\documentclass{standalone}\n\\usepackage[margin=1in]" "{geometry}\n\\usepackage[hang,small,bf]{caption}\n" "\\usepackage{tikz}\n" "\\usepackage{braket}\n\\usetikzlibrary{backgrounds,shadows." "blur,fit,decorations.pathreplacing,shapes}\n\n") init = ("\\begin{document}\n" "\\begin{tikzpicture}[scale=0.8, transform shape]\n\n") gate_style = ("\\tikzstyle{basicshadow}=[blur shadow={shadow blur steps=8," " shadow xshift=0.7pt, shadow yshift=-0.7pt, shadow scale=" "1.02}]") if not (settings['gate_shadow'] or settings['control']['shadow']): gate_style = "" gate_style += "\\tikzstyle{basic}=[draw,fill=white," if settings['gate_shadow']: gate_style += "basicshadow" gate_style += "]\n" gate_style += ("\\tikzstyle{operator}=[basic,minimum size=1.5em]\n" "\\tikzstyle{phase}=[fill=black,shape=circle," + "minimum size={}".format(settings['control']['size']) + "cm,inner sep=0pt,outer sep=0pt,draw=black" ) if settings['control']['shadow']: gate_style += ",basicshadow" gate_style += ("]\n\\tikzstyle{none}=[inner sep=0pt,outer sep=-.5pt," "minimum height=0.5cm+1pt]\n" "\\tikzstyle{measure}=[operator,inner sep=0pt,minimum " + "height={}cm, minimum width={}cm]\n".format( settings['gates']['MeasureGate']['height'], settings['gates']['MeasureGate']['width']) + "\\tikzstyle{xstyle}=[circle,basic,minimum height=") x_gate_radius = min(settings['gates']['XGate']['height'], settings['gates']['XGate']['width']) gate_style += ("{x_rad}cm,minimum width={x_rad}cm,inner sep=-1pt," "{linestyle}]\n" ).format(x_rad=x_gate_radius, linestyle=settings['lines']['style']) if settings['gate_shadow']: gate_style += ("\\tikzset{\nshadowed/.style={preaction={transform " "canvas={shift={(0.5pt,-0.5pt)}}, draw=gray, opacity=" "0.4}},\n}\n") gate_style += "\\tikzstyle{swapstyle}=[" gate_style += "inner sep=-1pt, outer sep=-1pt, minimum width=0pt]\n" edge_style = ("\\tikzstyle{edgestyle}=[" + settings['lines']['style'] + "]\n") return packages + init + gate_style + edge_style
7342481765bc894c539b39e24b24fd0b6addd56d
397,538
def demo_portfolios(initial, rebal_time, start, end, _user_parameters, stock_only=True, bond_only=False, mix=False): """ Creates a list of demo portfolios, each containing a list of assets with varying weights. Args: initial: amount invested on first day rebal_time: portfolio rebalancing frequency (in days) start, end: start and end dates stock_only: whether to use stock-only portfolios bond_only: whether to use bond-only portfolios mix: whether to use stock-bond mix portfolios Returns: A list of portfolios """ # pylint: disable=invalid-name, too-many-locals, redefined-outer-name # Disabling a pylint errors since this function is readable # with the single loop variable names (invalid-name) and all of them # are required (too-many-locals error) # Large-medium-small capitalization U.S. stock splits splits = {} if stock_only: for x in range(0, 11, 2): for y in range(0, 11 - x, 2): z = 10 - x - y label = "Stock LMS %d-%d-%d" % (x, y, z) splits[label] = { 'U.S. large-cap stocks (Wilshire index)': x / 10., 'U.S. mid-cap stocks (Wilshire index)': y / 10., 'U.S. small-cap stocks (Wilshire index)': z / 10. } # Short and medium term U.S. Treasury bond splits if bond_only: for t in range(0, 11, 2): for u in range(0, 11 - t, 2): for v in range(0, 11 - t - u, 2): w = 10 - t - u - v label = "Bond split %d-%d-%d-%d" % (t, u, v, w) splits[label] = { 'U.S. Treasury bonds, 0-1 year (S&P index)': w / 10., 'U.S. Treasury bonds, 1-3 year (S&P index)': v / 10., 'U.S. Treasury bonds, 3-5 year (S&P index)': u / 10., 'U.S. Treasury bonds, 5-7 year (S&P index)': t / 10. } # Stock (3-tier) and total bond market mixed portfolios if mix: for p in range(0, 11, 2): for q in range(0, 11 - p, 2): for r in range(0, 11 - p - q, 2): s = 10 - p - q - r label = "Stock LMS - Total bond %d-%d-%d-%d" % (p, q, r, s) splits[label] = { 'U.S. large-cap stocks (S&P 500 index)': p / 10., 'U.S. mid-cap stocks (Wilshire index)': q / 10., 'U.S. small-cap stocks (Wilshire index)': r / 10., 'U.S. Treasury bonds, total market (S&P index)': s / 10. } portfolios = [{ 'name': label, 'id': label, 'input': { 'Initial investment': initial, 'Investment classes': ivc, 'Rebalancing frequency (days)': rebal_time, 'Start date': start, 'End date': end } } for label, ivc in splits.items()] return portfolios
6d9ddf757f9c3306e2958ad51eb83fba846bc0c0
250,417
def s3_url(config): """ Generate the s3 url where the models will be saved Parameters ---------- config : CfgNode Model configuration Returns ------- url : str String containing the URL pointing to the s3 bucket """ return 'https://s3.console.aws.amazon.com/s3/buckets/{}/{}'.format( config.checkpoint.s3_path[5:], config.name)
32ac0a8722d38315e9394dc5c6c40bd670f223e3
527,602
def literal_symbol(literal): """The symbol in this literal (without the negation). >>> literal_symbol(P) P >>> literal_symbol(~P) P """ if literal.op == '~': return literal.args[0] else: return literal
a7326a9080f796e5fd0eedd8299e3ea764395724
265,276
import torch def scopes_from_offsets(offsets): """Compute scopes (specification used by the segmented operations) from array of offsets Parameters ---------- offsets: torch.Tensor Tensor of length `n_segments + 1` representing the offsets of each segment Returns ------- torch.Tensor A tensor of shape `[n_segment, 2]` representing segment specification in the form `(start, length)`. """ return torch.stack((offsets[:-1], offsets[1:] - offsets[:-1]), dim=-1)
eac85627adf4337b105b52b7f4bff7f1c5a57153
546,196
def read_str_num(string, sep = None): """Returns a list of floats pulled from a string. Delimiter is optional; if not specified, uses whitespace. Parameters ---------- string : str String to be parsed. sep : str, optional Delimiter (default is None, which means consecutive whitespace). Returns ------- list of float Floats separated by `sep` in `string`. """ # separate string into space-delimited strings of numbers num_str = string.split(sep) return [float(n) for n in num_str]
bf48ad82dd9e69404d3feb23db30b902eb8b2a55
313,684
def sum_of_squares(mfrlist): """Add up the squares of all elements in the list""" return (mfrlist .map(lambda x: x ** 2) .reduce(lambda a, b: a + b, 0))
6742efbc6784dfd8706d589a6debad9297bfe0b8
358,454
def rivers_with_station(stations): """Created a sorted list of Rivers with stations on them""" # Initialise rivers list rivers = [] # Add river to list from station, if river is not present for station in stations: if station.river in rivers: pass else: rivers.append(station.river) # Sort list rivers.sort() return rivers
cd515483aa56d2c9fec767234b22d0993bca3725
62,971
def add_user_to_group(service, group_key, user_email, role): """ Add user to a Google Group. :param service: Authenticated directory service object :param group_key: Unique identifier of the group (string, email, or id) :param user_email: Email of the user :param role: Role of the member :return: Results of the query """ results = service.members().insert( groupKey=group_key, body={ "email": user_email, "role": role } ).execute() return results
dff44d21674195ca0418fec84a2c26cbab1e3b06
493,105
def broadcast_chunks(*chunkss): """ Construct a chunks tuple that broadcasts many chunks tuples >>> a = ((5, 5),) >>> b = ((5, 5),) >>> broadcast_chunks(a, b) ((5, 5),) >>> a = ((10, 10, 10), (5, 5),) >>> b = ((5, 5),) >>> broadcast_chunks(a, b) ((10, 10, 10), (5, 5)) >>> a = ((10, 10, 10), (5, 5),) >>> b = ((1,), (5, 5),) >>> broadcast_chunks(a, b) ((10, 10, 10), (5, 5)) >>> a = ((10, 10, 10), (5, 5),) >>> b = ((3, 3,), (5, 5),) >>> broadcast_chunks(a, b) Traceback (most recent call last): ... ValueError: Chunks do not align: [(10, 10, 10), (3, 3)] """ if not chunkss: return () elif len(chunkss) == 1: return chunkss[0] n = max(map(len, chunkss)) chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss] result = [] for i in range(n): step1 = [c[i] for c in chunkss2] if all(c == (1,) for c in step1): step2 = step1 else: step2 = [c for c in step1 if c != (1,)] if len(set(step2)) != 1: raise ValueError("Chunks do not align: %s" % str(step2)) result.append(step2[0]) return tuple(result)
dfd3ba4a183a30c389930294799af52079383e20
273,771
import textwrap def normalize(string: str) -> str: """ Normalizes whitespace. Strips leading and trailing blank lines, dedents, and removes trailing whitespace from the result. """ string = string.replace("\t", " ") lines = string.split("\n") while lines and (not lines[0] or lines[0].isspace()): lines.pop(0) while lines and (not lines[-1] or lines[-1].isspace()): lines.pop() for i, line in enumerate(lines): lines[i] = line.rstrip() string = "\n".join(lines) string = textwrap.dedent(string) return string
b999f200b59f9d0418000d32e2b7577d35347b8c
70,473
def test_module(client, args): """ Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful. Args: client: PrismaCloudAdmin client args : PrismaCloudAdmin arguments passed Returns: 'ok' if test passed, anything else will fail the test. """ args uri = 'user/id' res = client.http_request(method='GET', url_suffix=uri) if res.status_code == 200 or res.status_code == 400: return 'ok', None, None else: raise Exception(f"{res.status_code} - {res.text}")
7c98e690cfd0faba3131d02cf577cc13e2259f01
404,253
from uuid import UUID def validate_uuid4(uuid_string): """ Validate that a UUID string is in fact a valid uuid4. Happily, the uuid module does the actual checking for us. It is vital that the 'version' kwarg be passed to the UUID() call, otherwise any 32-character hex string is considered valid. """ try: val = UUID(uuid_string, version=4) except ValueError: # If it's a value error, then the string # is not a valid hex code for a UUID. return False # If the uuid_string is a valid hex code, # but an invalid uuid4, # the UUID.__init__ will convert it to a # valid uuid4. This is bad for validation purposes. return val.hex == uuid_string.replace('-', '')
a009854c8633e29d72b661c7e5db032ac420a9d6
434,240
import logging def setup_logging(name, handlers=None, level=None): """Set up logging for a named logger. Gets and initializes a named logger, ensuring it at least has a `logging.NullHandler` attached. :param str name: Name of the logger. :param list handlers: A list of `logging.Handler` objects to attach to the logger. :param int level: Log level to set the logger at. :returns: A `logging.Logger` object that can be used to emit log messages. """ handlers = handlers or [] log = logging.getLogger(name) if len(log.handlers) == 0 and not handlers: h = logging.NullHandler() log.addHandler(h) for h in handlers: log.addHandler(h) if level: log.setLevel(level) return log
6ecfad0087f1ba7d7e883a00b59f1870002b86f5
296,666
import re import string def run(text: str, mode: str) -> str: """ Remove the numbers/punctuation from a text. :param text: a text to process :param mode: the mode of processing - n: remove numbers - p: remove punctuation - np: remove numbers and punctuation :return: the processed text """ if mode == "n": text = re.sub(r"[0-9]+", "", text) elif mode == "p": text = text.translate(str.maketrans("", "", string.punctuation)) elif mode == "np": no_puncts = text.translate(str.maketrans("", "", string.punctuation)) text = re.sub(r"[0-9]+", "", no_puncts) else: raise ValueError(f"Unsupported mode: {mode}") return text
9f2c4c1dc3860b5adae786acf8efab7a6b85b293
615,452
def strip_space(str): """Strips white space from left and right ends""" return str.lstrip().rstrip()
e797b868e09fe7e3efa74bc67f8ce0a8962c0091
153,327
from typing import Union from typing import Dict import json def _json_loads(s: Union[str, bytes]) -> Dict: """Like json.loads(), but handle str or bytes. This is needed because an HTTP response's read() method returns bytes on Python 3.5, and json.load doesn't handle bytes. """ if isinstance(s, bytes): s = s.decode('utf-8') return json.loads(s)
c220f95198088062f8703d46d6d3ae2216c8d70e
613,427
def preprocess_histograms_2(hists_orig, two_tail_fraction=0.05): """ hists_orig: pandas.core.frame.DataFrame contains all MNI region histograms for a single cbv volume . Returns: pandas.core.frame.DataFrame Preprocessing: 1. Remove rows with only 0s . """ hists = hists_orig.copy() # 1. hists = hists.loc[~(hists==0).all(axis=1)] return hists
cde88c22d64ade01519e36e60ca7363c47f2463e
481,710
def split_org_repo(in_str): """Splits the input string to extract the repo and the org If the repo is not provided none will be returned Returns a pair or org, repo """ tokens = in_str.split('/', 1) org = tokens[0] repo = None if len(tokens) > 1 and tokens[1]: repo = tokens[1] return org, repo
7e4fb5e5e0886acae030b4b1e3ce44a4913f36e1
68,549
import six def _is_utf_8(txt): """ Check a string is utf-8 encoded :param bytes txt: utf-8 string :return: Whether the string\ is utf-8 encoded or not :rtype: bool """ assert isinstance(txt, six.binary_type) try: _ = six.text_type(txt, 'utf-8') except (TypeError, UnicodeEncodeError): return False else: return True
d81cd8ac66cc83a9245115945e56d18167f2fb07
385,126
def get_category(annotation): """ Extracts a category from an annotation Parameters ---------- - annotation : Kili annotation """ try: return annotation['categories'][0]['name'] except KeyError: return None
c027f6a20a09ae7d9333ea266f1e6add1fe0e557
198,106
def quotes_inner(quoted: str) -> str: """ For a string containing a quoted part returns the inner part """ left_quote = quoted.find('"') right_quote = quoted.rfind('"') if right_quote < 0: right_quote = len(quoted) return quoted[left_quote + 1:right_quote]
737718431d353e3ac9bd21fd625a61557b423d68
681,114
def protected_property(prop_name: str, attr_name: str): """ Makes a read-only getter called `prop_name` that gets `attr_name` """ def prop_fn(self): return getattr(self, attr_name) prop_fn.__name__ = prop_name return property(prop_fn)
d76c39a2d37864c755cbd7f1715e435a9a3facca
434,742
def crop(image, pt1, pt2=None): """Crops image based on based on top left and bottom right corner""" if pt2 == None: pt2 = pt1[1] pt1 = pt1[0] cropped = image[pt1[1]:pt2[1], pt1[0]:pt2[0]] return cropped
1eaeedcf380aed89d2e5814cad01e3fd1998fb5f
500,488
def exp_str(dictionary, key): """Return exponent string. An empty string if exponent is 1. """ if dictionary[key] == 1: return '' else: return '**' + str(dictionary[key])
8bbf356ec7ebb255f0e7d313c1601d6b77bfdc39
412,347
def check_and_get_directory(files): """ Check if all provided files have the same directory and return it. :param files: A list of files to check and get directory from. :return: Base directory of the files. :raise: RuntimeException if files does not have same base directory. """ if not files: raise ValueError('Files must not be empty!') head, *tail = files if not tail: return head.parent tail_parent = check_and_get_directory(tail) if head.parent == tail_parent: return head.parent raise RuntimeError('Files do not have the same directory: {} != {}'.format(head.parent, tail_parent))
2e13d63fccaf18213c9b9108869774b15bdc801a
27,561
def get_mts(virt_eths): """ Parse the machine type/model/serial from the IVM output. :param virt_eths: A dictionary with adapter data from IVM :returns: A string in the form "type.model.serial" """ for key in virt_eths.keys(): if 'Virtual I/O Ethernet Adapter (l-lan)' in virt_eths[key][0]: return virt_eths[key][1].split('-')[0] return None
f1c2aa4f294171b4680f87fd4420fc58a93be325
136,889
from typing import Iterable def make_even(iterable_1: Iterable, iterable_2: Iterable) -> tuple[list, list]: """Extends the shorter of the two iterables with duplicate values until its length is equal to the longer iterable (favours earlier elements). See Also -------- make_even_by_cycling : cycles elements instead of favouring earlier ones Examples -------- Normal usage:: make_even([1, 2], [3, 4, 5, 6]) ([1, 1, 2, 2], [3, 4, 5, 6]) make_even([1, 2], [3, 4, 5, 6, 7]) # ([1, 1, 1, 2, 2], [3, 4, 5, 6, 7]) """ list_1, list_2 = list(iterable_1), list(iterable_2) length = max(len(list_1), len(list_2)) return ( [list_1[(n * len(list_1)) // length] for n in range(length)], [list_2[(n * len(list_2)) // length] for n in range(length)], )
b43992153a18b1d2216073697c079249257aac8b
480,813
def implicant2bnet(partial_state): """Converts a partial state dictionary to a BNet string e.g., {'A':1,'B':0} returns 'A & !B' Parameters ---------- partial_state : partial state dictionary Partial state to convert. Returns ------- str BNET representation of the partial state. """ return ' & '.join(["!"+k for k in partial_state if not partial_state[k]]+[k for k in partial_state if partial_state[k]])
81fbc4c43510871a191656b247bf5f003a0938d5
223,172
from functools import reduce import operator def factorial(n): """Calculate n factorial""" return reduce(operator.mul, range(2, n+1), 1)
a58e0aad4e3a8baf06bbd1a6929a3aab2ab4e66e
698,685
import socket def findFreePort(interface="127.0.0.1", family=socket.AF_INET, type=socket.SOCK_STREAM): """ Ask the platform to allocate a free port on the specified interface, then release the socket and return the address which was allocated. @param interface: The local address to try to bind the port on. @type interface: C{str} @param type: The socket type which will use the resulting port. @return: A two-tuple of address and port, like that returned by L{socket.getsockname}. """ addr = socket.getaddrinfo(interface, 0)[0][4] probe = socket.socket(family, type) try: probe.bind(addr) if family == socket.AF_INET6: sockname = probe.getsockname() hostname = socket.getnameinfo( sockname, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV )[0] return (hostname, sockname[1]) else: return probe.getsockname() finally: probe.close()
37cb499eb388250ee31195ec5008b0ea7c9e6469
621,154
from pathlib import Path def read_settings_file(current_test_path: Path, test_id: str): """Read settings for the given test""" settings_file = current_test_path / f'{test_id}-settings.txt' settings = {} with open(settings_file) as f: for line in f: if line != '\n': (key, val) = line.split(':') settings[key] = val return settings
91156aa96fa5d0beb55d6c0d213ce058a5cfe7e7
429,687
import re def split_into_sentences(s): """Split text into list of sentences.""" s = re.sub(r"\s+", " ", s) s = re.sub(r"[\\.\\?\\!]", "\n", s) return s.split("\n")
cf7f26f8ac69d717f87cd00c3caab8f2983b14af
459,219
from typing import Dict def entry_points(text: str, text_source="entry-points") -> Dict[str, dict]: """Given the contents of entry-points file, process it into a 2-level dictionary (``dict[str, dict[str, str]]``). The first level keys are entry-point groups, the second level keys are entry-point names, and the second level values are references to objects (that correspond to the entry-point value). """ parser = ConfigParser(default_section=None, delimiters=("=",)) # type: ignore parser.optionxform = str # case sensitive parser.read_string(text, text_source) groups = {k: dict(v.items()) for k, v in parser.items()} groups.pop(parser.default_section, None) return groups
0b507930695a2c986be66f9a4eb00ae4f82e1f9f
185,729
import re def package_name_regex(s): """Validate a package name regex for argparse.""" try: return re.compile(s) except re.error as e: raise ValueError from e
63f022066e86abd2e4c9b7795c18057ad36f2b0f
252,901
def gab_oris_shared_U(gab_letters, gab_oris): """ gab_oris_shared_U(gab_letters, gab_oris) Returns Gabor orientations that are shared with U frames, for each gabor letter, ordered together. Required args: - gab_letters (str): Gabor letters for which to retrieve orientations - gab_oris (list) : Gabor orientations that can be included Returns: - new_oris (list): list of orientations for each Gabor letter """ shared_oris = [90, 135] # from ABCD reference new_oris = [] for lett in gab_letters: is_U = (lett.upper() == "U") if is_U and len(lett) > 1: raise NotImplementedError("Cannot return shared orientations if " "U Gabors are grouped with other Gabor letters.") oris = [ori - 90 * is_U for ori in shared_oris if ori in gab_oris] new_oris.append(oris) return new_oris
97fe5e62b08fe77a1b78a07f8a4465f3529e348d
204,964
def get_class_name_from_config_file(path): """Extracts the class name from given path.""" paths = path.split('/csv/') splitted_paths = paths[1].split('/') class_name = splitted_paths[0] return class_name
e41aedce347d2b6ad01b7ca426689492889cd40a
272,737
def get_bucket_config(config, bucket_name): """ Pulls correct bucket config from application config based on name/alias. Args: config(dict) bucket_name(string): bucket name or bucket reference name Returns: dict | None: config for bucket or None if not found """ bucket_config = None for bucket in config["buckets"]: if bucket_name == bucket["referenceName"]: bucket_config = bucket return bucket_config
fd742cd7c7484c51b3bb7a15003528834046fea7
680,383
def bool_from_env_string(string: str) -> bool: """Convert a string recieved from an environment variable into a bool. 'true', 'TRUE', 'TrUe', 1, '1' = True Everything else is False. :param string: The string to convert to a bool. """ if str(string).lower() == 'false' or str(string) == '': return False if str(string).lower() == 'true': return True try: int_value = int(string) if int_value == 1: return True else: return False except: return False
adade99e2be55215fa22d56506ac47e651c11829
589,813
from typing import List def read_umls_file_headers(meta_path: str, filename: str) -> List[str]: """ Read the file descriptor MRFILES.RRF from a UMLS release and get column headers (names) for the given file MRFILES.RRF file format: a pipe-separated values Useful columns: column 0: name of one of the files in the META directory column 2: column names of that file Args: meta_path: path to the META directory of an UMLS release filename: name of the file to get its column headers Returns: a list of column names """ file_descriptors = f"{meta_path}/MRFILES.RRF" # to get column names with open(file_descriptors) as fin: for line in fin: splits = line.split("|") found_filename = splits[0] column_names = (splits[2] + ",").split( "," ) # ugly hack because all files end with an empty column if found_filename in filename: return column_names assert False, f"Couldn't find column names for file {filename}" return None
788bca5a94e2f7c40f09bc1804d7c2e1e31f2c2b
42,257
import re def _fixup_cc_list(cc_value): """Fix up cc list separators and remove duplicates.""" # Copied from trac.ticket.model cclist = [] for cc in re.split(r'[;,\s]+', cc_value): if cc and cc not in cclist: cclist.append(cc) return ', '.join(cclist)
5d2ac464cddd4e24995633c666c2822b155cbabb
575,689
def interval(mean, std, z): """Calculate the interval.""" z_std = std * z return (mean - z_std, mean + z_std)
72b4d14f624f80ad3bc2194e269193463ba9387c
146,370
import attr def get_type(class_, attr_name): """Get type of attribute in a class.""" return getattr(attr.fields(class_), attr_name).type
aebb322db69a3ab700346398c6c2963a4f85051e
484,890
def get_perfdata(label, value, uom, warn, crit, min, max): """Returns 'label'=value[UOM];[warn];[crit];[min];[max] """ msg = "'{}'={}".format(label, value) if uom is not None: msg += uom msg += ';' if warn is not None: msg += str(warn) msg += ';' if crit is not None: msg += str(crit) msg += ';' if min is not None: msg += str(min) msg += ';' if max is not None: msg += str(max) msg += ' ' return msg
a1911bf827ef3111a2ca0a5e5cfdd727c98b09b1
657,509
def convert_metric(metric): """ Convert a metric name to a fixed name that can be used in various scripts. Args: metric (str): input metric Returns: metric (str): output metric """ if metric in ["prc_auc", "prc-auc"]: metric = "pr_auc" elif metric in ["auc", "roc-auc"]: metric = "roc_auc" return metric
353d67d66d82aa7e3a44b3b57653b67fb21c823a
460,248
import wave def _GetWaveDurationSecs(wav_path): """Gets the duration in secs of the WAV file.""" wav = wave.open(wav_path) try: return wav.getnframes() / (wav.getnchannels() * wav.getframerate()) finally: wav.close()
3d8de2d392c66efe86304ddd7b2fb89e2aa5c0e6
225,945
def _calculate_x_bounds(params_data, padding): """Calculate the lower and upper ends of the x-axis for each group. Args: params_data (df): see _combine_params_data padding (float): the x_range is extended on each side by x_padding times the range of the data Returns: x_min (Series): The index are the parameter groups. The values are the left bound of the x-axis for this parameter group x_max (Series): Same as x_min but for right bound """ raw_min = ( params_data.groupby("group")[["conf_int_lower", "value"]].min().min(axis=1) ) raw_max = ( params_data.groupby("group")[["conf_int_upper", "value"]].max().max(axis=1) ) white_space = (raw_max - raw_min).clip(1e-50) * padding x_min = raw_min - white_space x_max = raw_max + white_space x_min.name = "x_min" x_max.name = "x_max" return x_min, x_max
4e00f44d684cf1fde6b7af621076735e0d1aa229
145,316
def get_followers(db, uid, timestamp=None): """ Returns an iterable containing all follower ids for the user with the given uid. If :timestamp: is used, it's a dictiorary containing criteria on the "date" field. """ criteria = { 'follows': uid } if timestamp: criteria['date'] = timestamp followers = db.follow.aggregate( [ { '$match': criteria }, { '$group': { '_id': '$id'} } ], allowDiskUse=True) return (x['_id'] for x in followers) #return db.follow.find({'follows': uid}).distinct('id') #return frozenset(x['id'] for x in db.follow.find(criteria))
2064162bd4d04f9cbc4f509169142146187a62e4
324,606
def force_console_input( query: str, allowable, onfail: str = "Input not recognised, please try again.\n", case_sensitive=False, ): """Get an input from the user matching some string in allowable. Args: query (str): The query to issue the user with. allowable (str or container): The options which the user is allowed to submit. If this is a string, acceptable answers will be substrings. For containers acceptable answers will be elements of the container. Returns: The correct input returned Raises: IOError: A request to quit was submitted. """ if not allowable: raise ValueError("At least one entry must be allowable.") submission = input(query) while True: if not case_sensitive: submission = submission.lower() if submission in ("quit", "exit"): raise IOError("Exit command received.") if submission in allowable: return submission submission = input(onfail)
d43e98f77a14f0045117738900128bf779cba173
627,605
def bool_to_returncode(success): """Return 0 if |success|. Otherwise return 1.""" if success: print('Success.') return 0 print('Failed.') return 1
4effd1340bdf46b91a2fcadd3379d78eb788d282
400,426
def set_whole_node_entry(entry_information): """Set whole node entry Args: entry_information (dict): a dictionary of entry information from white list file Returns: dict: a dictionary of entry information from white list file with whole node settings """ if "submit_attrs" in entry_information: if "+xcount" not in entry_information["submit_attrs"]: entry_information["submit_attrs"]["+xcount"] = None if "+maxMemory" not in entry_information["submit_attrs"]: entry_information["submit_attrs"]["+maxMemory"] = None if "attrs" not in entry_information: entry_information["attrs"] = {} if "GLIDEIN_CPUS" not in entry_information["attrs"]: entry_information["attrs"]["GLIDEIN_CPUS"] = {"value": "auto"} if "GLIDEIN_ESTIMATED_CPUS" not in entry_information["attrs"]: entry_information["attrs"]["GLIDEIN_ESTIMATED_CPUS"] = {"value": 32} if "GLIDEIN_MaxMemMBs" not in entry_information["attrs"]: entry_information["attrs"]["GLIDEIN_MaxMemMBs"] = {"type": "string", "value": ""} if "GLIDEIN_MaxMemMBs_Estimate" not in entry_information["attrs"]: entry_information["attrs"]["GLIDEIN_MaxMemMBs_Estimate"] = {"value": "True"} return entry_information
650068343620696f5e16126cb08fb156219d23ec
623,188
def poly5(x, a5, a4, a3, a2, a1, a0, export=False): """Polynom 5th degree for fitting. :param x: parameter :param a5: coeff :param a4: coeff :param a3: coeff :param a2: coeff :param a1: coeff :param a0: coeff :param export: enable text output of function :returns: function -- polynomial 5th degree """ if export == 'Mathematica': return f'((((({a5}*{x} + {a4})*{x} + {a3})*{x} + {a2})*{x} + {a1})*{x} + {a0})' else: return (((((a5*x + a4)*x + a3)*x + a2)*x + a1)*x + a0)
607929a352fda57200e8624759e1e510ad5cc173
518,974
def decode_var_len_uint8(br): """Decodes a number in the range [0..255], by reading 1 - 11 bits""" if br.read_bits(1): nbits = br.read_bits(3) if nbits == 0: return 1 return br.read_bits(nbits) + (1 << nbits) return 0
9ff653a3781d8b48e2b971661dffba0d3a2ec6ff
58,758
def findPattern(genome, pattern): """ find the indexes of the pattern in a given genome sequence """ indexes = [] for index in range(0, len(genome) - len(pattern) + 1): if genome[index:index + len(pattern)] == pattern: indexes.append(index) return indexes
30cfdaab9c5b12115804a522e91f98488c4419d7
427,024
def _copy_days_used(orig: dict[str, list[int]]) -> dict[str, list[int]]: """ Return a deep copy of the given dictionary mapping club codes to a 0-indexed list of counts of instances per day for that club. """ new = {} for (key, inner) in orig.items(): new[key] = inner[:] return new
1ec82f714f471bf80d46d3d862d08c4a28fc5448
486,582
def _invert_targets_pairs(targets_pairs, label_encoder): """ Given a list of targets pairs of the form 'target1+target2', revert back to the original labeling by using `label_encoder.inverse_transform` Parameters ---------- targets_pairs : list or array-like of str label_encoder : fitted LabelEncoder Returns ------- targets_pairs : list of str the inversed targets_pairs """ t1t2 = [l.split('+') for l in targets_pairs] t1, t2 = zip(*t1t2) t1 = label_encoder.inverse_transform([int(t) for t in t1]) t2 = label_encoder.inverse_transform([int(t) for t in t2]) return ['+'.join([str(tt1), str(tt2)]) for tt1, tt2 in zip(t1, t2)]
3ed6ddfab45ebb3486efa6a54e9decda87e1a956
100,829
import torch def padding_mask(x_lens): """ transform lengths of samples to a binary mask. inputs: - x_lens: length of each sample in minibatch. # tensor # (batch_size, ) outputs: - mask: 1-0 binary mask. 1 means valid and 0 means invalid. # tensor # (batch_size, longest_time_step, 1) """ longest_len = max(x_lens) batch_size = len(x_lens) mask = torch.zeros(batch_size, longest_len, 1) for i, x_len in enumerate(x_lens): mask[i, :x_len] = 1. return mask
ea9e3c06d61f5d9b19795a59dbb2956e8bdb4385
36,033
def hello_get(name: str): """ Example of passing parameters via GET. """ return {"message": f"Hello {name}!"}
42de133d75806507ee65a2df955fc733d8e284cb
420,309
def getParamsOfModel(model): """ Returns list of parameters used in model Parameters ---------- model: sklearn.RandomForestClassifier object fitted random forest model Examples -------- >>> base_model = RandomForestClassifier(2, 245, 100) >>> params = getParamsOfModel(base_model) Returns ------- list list of parameters """ params = model.get_params() return params
c7c828973477d8fd7803f22313b60face7f1a804
248,531
def parent_map(xml): """Creates the parent map dictionary, a map from child to parent in the XML hierarchy.""" try: return xml.pm except AttributeError: xml.pm = {c:p for p in xml.iter() for c in p} return xml.pm
68e3a207488b06c7b71c397057ea50985358092c
219,034
def norm(vector): """Makes a vector unit length. If original vLen is 0, leave vector as [0,...]""" vLen = sum([e**2 for e in vector])**(1/2) if vLen != 0: return [e/vLen for e in vector] else: return [0 for e in vector]
a8501638fcc00b45789556d672e4e685b02ba606
151,969
def mask_hash(hash, show=6, char="*"): """ Return the given hash, with only the first ``show`` number shown. The rest are masked with ``char`` for security reasons. """ masked = hash[:show] masked += char * len(hash[show:]) return masked
4662079131f7a0751a7dc55dfe53df6bdcc79d5d
218,811
def max_power_rule(mod, g, tmp): """ **Constraint Name**: GenCommitCap_Max_Power_Constraint **Enforced Over**: GEN_COMMIT_CAP_OPR_TMPS Power plus upward services cannot exceed capacity. """ return mod.GenCommitCap_Provide_Power_MW[g, tmp] \ + mod.GenCommitCap_Upwards_Reserves_MW[g, tmp] \ <= mod.Commit_Capacity_MW[g, tmp]
58553519d8a7c6c772333ddf4a1b06121c23f69e
378,082
def readMailTextFile(filename): """ Reads text from a file. Returns string containing the whole text. """ with open(filename) as textFile: text = textFile.read() return text
81a90a3ee01889b57c7944aa2b3a7831417ffe96
145,649
def _apply_jump(board, move): """Apply a jump move to a given board. Args: board: board string move: move tuple Returns: Board string of new position. """ assert (board[move[0]].lower() == 'b') board_list = list(board) board_list[move[-1]] = board_list[move[0]] if move[-1] > 27: # King the piece if necessary. board_list[move[-1]] = board_list[move[-1]].upper() board_list[move[0]] = '-' # remove the jumped checkers for i in move[1::2]: board_list[i] = '-' return ''.join(board_list)
2bcf732f88809a6942b53777e465dec9404cf4ee
599,791
def testhasperm(context, model, action): """ Returns True iif the user have the specified permission over the model. For 'model', we accept either a Model class, or a string formatted as "app_label.model_name". Sample usage: {% testhasperm model 'view' as can_view_objects %} {% if not can_view_objects %} <h2>Sorry, you have no permission to view these objects</h2> {% endif %} """ user = context['request'].user if isinstance(model, str): app_label, model_name = model.split('.') else: app_label = model._meta.app_label model_name = model._meta.model_name required_permission = '%s.%s_%s' % (app_label, action, model_name) return user.is_authenticated and user.has_perm(required_permission)
e1d2202feb5be1b2fc2a9042ec2fb829469b6499
164,131