content
stringlengths
22
815k
id
int64
0
4.91M
def expected_response(y: np.ndarray, w: np.ndarray, policy: np.ndarray, mu: Optional[np.ndarray]=None, ps: Optional[np.ndarray]=None) -> float: """Estimate expected response. Parameters ---------- y: array-like of shape = (n_samples) Observed target values. w: array-like of shape = shape = (n_samples) Treatment assignment variables. policy: array-like of shape = (n_samples) Estimated treatment policy. mu: array-like of shape = (n_samples, n_trts), optional Estimated potential outcomes. ps: array-like of shape = (n_samples, n_trts), optional Estimated propensity scores. Returns ------- expected_response: float Estimated expected_response. """ mu = np.zeros((w.shape[0], np.unique(w).shape[0])) if mu is None else mu ps = pd.get_dummies(w).mean(axis=0).values if ps is None else ps indicator = np.array(w == policy, dtype=int) expected_response = np.mean(mu[np.arange(w.shape[0]), policy] + (y - mu[np.arange(w.shape[0]), policy]) * indicator / ps[w]) return expected_response
5,350,200
def get_type_for_field(field: Field) -> type: """ For optional fields, the field type_ is a :class:`typing.Union`, of ``NoneType`` and the actual type. Here we extract the "actual" type from a Union with None """ if not field.sub_fields: return field.type_ for f in field.sub_fields: if f.type_ != type(None): # noqa return f.type_ raise Exception(f"No type found for field: {field}")
5,350,201
def read_program_data(program: List[str]) -> int: """Read program data from port computer system. Args: program (List[str]): the program code containing masks and memory Returns: int: sum of all values in memory """ memory = defaultdict(int) for line in program: if line.startswith('mask'): _, mask = line.split(' = ') ones = remove_leading_zeroes( [1 if c == '1' else 0 for c in mask] ) floating = remove_leading_zeroes( [1 if c == 'X' else 0 for c in mask] ) mask_len = 36 # This is hard-coded currently and may change # if this problem is used in a new context. else: address, value = [int(n) for n in MEM_RE.match(line).groups()] address = [int(a) for a in bin(address)[2:]] if len(address) < mask_len: address = add_leading_zeroes(address, mask_len - len(address)) try: if 1 in ones: address = mask_values(ones, address, 1) except TypeError: pass if 1 in floating: addresses = mask_floating(floating, address) for address in addresses: address = int(''.join([str(a) for a in address]), base=2) memory[address] = value return sum(memory.values())
5,350,202
def get_mgr_worker_msg(comm, status=None): """Get message to worker from manager. """ status = status or MPI.Status() comm.probe(source=0, tag=MPI.ANY_TAG, status=status) tag = status.Get_tag() if tag in [STOP_TAG, PERSIS_STOP]: return tag, None, None Work = comm.recv(buf=None, source=0, tag=MPI.ANY_TAG, status=status) calc_in = comm.recv(buf=None, source=0) return tag, Work, calc_in
5,350,203
def retry(exceptions, tries=4, delay=3, backoff=2, logger=None): """ Retry calling the decorated function using an exponential backoff. Args: exceptions: The exception to check. may be a tuple of exceptions to check. tries: Number of times to try (not retry) before giving up. delay: Initial delay between retries in seconds. backoff: Backoff multiplier (e.g. value of 2 will double the delay each retry). logger: Logger to use. If None, print. """ def deco_retry(f): @wraps(f) def f_retry(*args, **kwargs): mtries, mdelay = tries, delay while mtries > 1: try: return f(*args, **kwargs) except exceptions as e: msg = '{}, Retrying in {} seconds...'.format(e, mdelay) if logger: logger.warning(msg) else: print(msg) time.sleep(mdelay) mtries -= 1 mdelay *= backoff return f(*args, **kwargs) return f_retry # true decorator return deco_retry
5,350,204
def install_pip(module): """Method installs python module via pip Args: module (str): python module Returns: none """ modtok = module.split('>=') if ('>=' in module) else module.split('==') module_name = modtok[0] module_version = modtok[1] if (len(modtok) > 1) else None pip_path = 'pip' if ('pip' not in environ) else '$pip' if (module_version != None and Utils.module_exists(module_name)): if Utils.module_version_ok(module_version, Utils.module_version(module_name)): print('Module {0} already installed with version {1}'.format(module_name,Utils.module_version(module_name))) else: print ('Upgrading module {0} to version {1}'.format(module_name,module_version)) cmd = '{0} install --upgrade "{1}"'.format(pip_path, module) result, _, err = shell_exec(cmd, True) if result != 0: print('Failed to install {0}, hydratk installation failed.'.format(module)) print(err) exit(-1) else: print ('Installing module {0}'.format(module)) cmd = '{0} install "{1}"'.format(pip_path, module) print(cmd) result, _, err = shell_exec(cmd, True) if result != 0: print('Failed to install {0}, hydratk installation failed.'.format(module)) print(err) exit(-1)
5,350,205
def test_resources_init(): """ Test initialization of the resources module. Expected results: resources.paths should have the argument directory and the directory which resources.py lives in """ with TemporaryDirectory() as tmp_dir: resources = Resources([tmp_dir]) assert resources.paths assert resources.paths[0] == tmp_dir assert resources.paths[1] == os.path.dirname(statick_tool.resources.__file__)
5,350,206
def list_effects(ctx: Context, param: Option, value: str) -> None: """ List the names and descriptions of the effects currently available then exit. :param ctx: see callbacks for click options :param param: see callbacks for click options :param value: see callbacks for click options :return: No meaningful return """ if not value or ctx.resilient_parsing: return click.echo(EFFECT_CACHE.create_list_effects_display()) ctx.exit()
5,350,207
def load_dictionary(dicttimestamp, server='postgres-cns-myaura'): """ Load dictionary from database Args: dicttimestamp (string): the version of dictionary (ex: 20210131) server (string): the server name in db_config.ini Returns: tuple (termdictparser, pandas.DataFrame): A TermDictParser and a pandas dataframe containing the dictionary. """ print('--- Loading {server:s} dictionary ({dicttimestamp:s}) ---'.format(server=server, dicttimestamp=dicttimestamp)) # if 'postgres' in server: engine = db.connectToPostgreSQL(server=server) tablename = 'dictionaries.dict_%s' % (dicttimestamp) sql = """ SELECT d.id, COALESCE(d.id_parent,d.id) AS id_parent, d.dictionary, d.token, COALESCE(p.token, d.token) as parent, d.type, d.source, d.id_original, COALESCE(p.id_original, d.id_original) as id_original_parent FROM %s d LEFT JOIN %s p ON d.id_parent = p.id WHERE d.enabled > 0""" % (tablename, tablename) elif 'mysql' in server: engine = db.connectToMySQL(server=server) tablename = 'dict_%s' % (dicttimestamp) sql = """ SELECT d.id, IFNULL(d.id_parent,d.id) AS id_parent, d.dictionary, d.token, IFNULL(p.token, d.token) as parent, d.type, d.source, d.id_original, IFNULL(p.id_original, d.id_original) as id_original_parent FROM %s d LEFT JOIN %s p ON d.id_parent = p.id WHERE d.enabled = True""" % (tablename, tablename) else: raise TypeError("Invalid server name. The name of the server must contain either a 'mysql' or 'postgress' string.") df = pd.read_sql(sql, engine, index_col='id') return df
5,350,208
def _test(): """ テスト """ print("rdbutils") cm = connect("sqlite3", ":memory:") # テーブル作成 with cm as cursor: cursor.execute("CREATE TABLE `t_test`(`id` INTEGER PRIMARY KEY AUTOINCREMENT, `value` TEXT)") # データ作成 with cm as cursor: cursor.execute("INSERT INTO `t_test`(`value`) VALUES(?)", "name") cursor.execute("INSERT INTO `t_test`(`value`) VALUES(?)", "namae") cursor.execute("INSERT INTO `t_test`(`value`) VALUES(?)", "nomen") # 注入してみる cursor.execute("INSERT INTO `t_test`(`value`) VALUES(?)", "1);DELETE FROM `t_test`;--") # データの確認 with cm as cursor: cursor.execute("SELECT * FROM `t_test` ORDER BY `id`") for row in cursor: print(row) print("OK")
5,350,209
def part1(data): """ >>> part1(((20, 30), (-10, -5))) 45 >>> part1(INPUT) 13203 """ target_x, target_y = data best = None for dx in range(1, max(target_x) + 1): for dy in range(0, - min(target_y) + 1): hit_target, height = trajectory(target_x, target_y, dx, dy) if hit_target: if best is None: best = height else: best = max(best, height) return best
5,350,210
def get_neighbors(p, exclude_p=True, shape=None, nNeighbors=1, get_indices=False, direction=None, get_mask=False): """Determine pixel coordinates of neighboring pixels. Includes also all pixels that neighbor diagonally. Parameters ---------- p : tuple Gives the coordinates (y, x) of the central pixel exclude_p : boolean Whether or not to exclude the pixel with position p from the resulting list. shape : tuple Describes the dimensions of the total array (NAXIS2, NAXIS1). Returns ------- neighbors: numpy.ndarray Contains all pixel coordinates of the neighboring pixels [[y1, x1], [y2, x2], ...] Adapted from: https://stackoverflow.com/questions/34905274/how-to-find-the-neighbors-of-a-cell-in-an-ndarray """ ndim = len(p) n = nNeighbors*2 + 1 # generate an (m, ndims) array containing all combinations of 0, 1, 2 offset_idx = np.indices((n,) * ndim).reshape(ndim, -1).T # use these to index into np.array([-1, 0, 1]) to get offsets lst = list(range(-(nNeighbors), nNeighbors + 1)) offsets = np.r_[lst].take(offset_idx) if direction == 'horizontal': indices = np.where(offsets[:, 0] == 0) elif direction == 'vertical': indices = np.where(offsets[:, 1] == 0) elif direction == 'diagonal_ul': indices = np.where(offsets[:, 0] == offsets[:, 1]) elif direction == 'diagonal_ur': indices = np.where(offsets[:, 0] == -offsets[:, 1]) if direction is not None: offsets = offsets[indices] # optional: exclude offsets of 0, 0, ..., 0 (i.e. p itself) if exclude_p: offsets = offsets[np.any(offsets, 1)] neighbours = p + offsets # apply offsets to p # optional: exclude out-of-bounds indices if shape is not None: valid = np.all((neighbours < np.array(shape)) & (neighbours >= 0), axis=1) neighbours = neighbours[valid] if get_mask: return valid if get_indices: indices_neighbours = np.array([]) for neighbour in neighbours: indices_neighbours = np.append( indices_neighbours, np.ravel_multi_index(neighbour, shape)).astype('int') return indices_neighbours return neighbours
5,350,211
def _update_wgrad_clipped(learning_rate, loss, w1, w2): """same as above, clamped in unit sphere""" for k in range(w1.size): grad = loss * w2[k] w1[k] = w1[k] - learning_rate * grad if w1[k] < -1.: w1[k] = -1. elif w1[k] > 1.: w1[k] = 1.
5,350,212
def make_coordinate_grid(spatial_size, type): """ Create a meshgrid [-1,1] x [-1,1] of given spatial_size. """ h, w = spatial_size x = torch.arange(w).type(type) y = torch.arange(h).type(type) x = (2 * (x / (w - 1)) - 1) y = (2 * (y / (h - 1)) - 1) yy = y.view(-1, 1).repeat(1, w) xx = x.view(1, -1).repeat(h, 1) meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2) return meshed
5,350,213
def add_wrong_column(data_frame): """ Adds wrong column to dataframe :params dataframe data_frame: :returns dataframe: """ new_df = data_frame.copy() new_df['Ducks'] = 0 return new_df
5,350,214
def p2l(X, Y, D, tol, inputTransform): """ Computes the Procrustean point-line registration between X and Y+nD with anisotropic Scaling, where X is a mxn matrix, m is typically 3 Y is a mxn matrix denoting line origin, same dimension as X D is a mxn normalized matrix denoting line direction R is a mxm rotation matrix, A is a mxm diagonal scaling matrix, and t is a mx1 translation vector Q is a mxn fiducial on line that is closest to X after registration fre is the fiducial localization error based on the Majorization Principle """ [m,n] = X.shape err = np.Infinity E_old = 1000000 * np.ones((m,n)) e = np.ones((1,n)) # intialization Q = Y # normalize the line orientation just in case Dir = D/np.linalg.norm(D, ord=2,axis=0,keepdims=True) while err > tol: [R, t, A] = AOPA_Major(X, Q, tol) E = Q-np.matmul(R,np.matmul(A,X))-np.matmul(t,e) # project point to line Q = Y+Dir*np.tile(np.einsum('ij,ij->j',np.matmul(R,np.matmul(A,X))+np.matmul(t,e)-Y,Dir),(m,1)) err = np.linalg.norm(E-E_old) E_old = E E = Q - np.matmul(R, np.matmul(A,X)) - np.matmul(t,e) # calculate fiducial registration error fre = np.sum(np.linalg.norm(E,ord=2,axis=0,keepdims=True))/X.shape[1] lps2ras = np.diag([-1, -1, 1, 1]) data = np.eye(4) data[0:3, 3] = t.T data[:3, :3] = np.dot(R, A) data = np.dot(data, lps2ras) transform_matrix = vtk.vtkMatrix4x4() dimensions = len(data) - 1 for row in range(dimensions): for col in range(dimensions + 1): transform_matrix.SetElement(row, col, data[(row, col)]) inputTransform.SetMatrixTransformToParent(transform_matrix) return [R,t,A,Q,fre, inputTransform]
5,350,215
def test_federal_account_insert(): """Test federal account creation from underlying TAS records.""" mommy.make( TreasuryAppropriationAccount, agency_id='abc', main_account_code='7777', account_title='Fancy slipper fund' ) mommy.make( TreasuryAppropriationAccount, agency_id='abc', main_account_code='7777', account_title='Fancy boot fund', ending_period_of_availability='2016') mommy.make( TreasuryAppropriationAccount, agency_id='abc', main_account_code='7777', account_title='Fancy flower fund', ending_period_of_availability='2017' ) mommy.make( TreasuryAppropriationAccount, agency_id='abc', main_account_code='7777', account_title='Fancy cat fund', ending_period_of_availability='' ) # run the federal account insert process and check results insert_federal_accounts() federal_accounts = FederalAccount.objects.all() # only 1 record per unique agency/main account TAS combo assert federal_accounts.count() == 1 # federal account title should match title of the tas w/ latest EPOA # (TAS with an EPOA considered before TAS records with a blank/NULL EPOA) fa = federal_accounts[0] assert fa.account_title == 'Fancy flower fund' # federal_account foreign key on TAS records should = id of the federal # account we just created distinct_fa = TreasuryAppropriationAccount.objects.values('federal_account').distinct() assert distinct_fa.count() == 1 assert distinct_fa[0]['federal_account'] == fa.id
5,350,216
def config_database(db_name): """ Create a database in sqlite3 :param db_name: The name of the file for the database :return: A database objetc and his connections object """ db = Database() connection = db.create_connection(db_name) db.create_table(connection) return db, connection
5,350,217
def gaussian_blur(img, kernel_size): """Applies a Gaussian Noise kernel""" return
5,350,218
def parse_main_argument(argument, export_folder): """Function parsing the main_argument argument. Returns a dataframe containing the search terms (or the urls if main_argument is a youtube file.""" # File or string if Path(argument).is_file(): is_file = True argument_file_content = open(argument).read() # File of urls or search terms is_spotify = ( "spotify" in argument_file_content and argument_file_content.startswith("http") ) is_deezer = ( "deezer" in argument_file_content and argument_file_content.startswith("http") ) is_youtube = ( "youtu" in argument_file_content and argument_file_content.startswith("http") ) else: is_file = False is_spotify = "spotify" in argument is_deezer = "deezer" in argument # would be equivalent to argument youtube_url, doesn't exist is_youtube = False if is_spotify: if is_file: terms = extract_terms_from_file(argument) df = get_spotify_songs(terms) logger.info("Reading file containing spotify urls at %s.", argument) else: terms = extract_terms_from_arg(argument) df = get_spotify_songs(terms) logger.info("Reading spotify urls %s.", argument) elif is_deezer: if is_file: terms = extract_terms_from_file(argument) df = get_deezer_songs(terms) logger.info("Reading file containing deezer urls at %s.", argument) else: terms = extract_terms_from_arg(argument) df = get_deezer_songs(terms) logger.info("Reading deezer urls %s.", argument) elif is_youtube: if is_file: df = pd.read_csv(argument, sep="\t", header=None, names=["url"]) logger.info("Reading file containing youtube urls at %s.", argument) else: if is_file: df = pd.read_csv(argument, sep="\t", header=None, names=["title"]) logger.info("Reading file containing search terms at %s.", argument) else: df = pd.DataFrame( [x.strip() for x in argument.split(",")], columns=["title"] ) logger.info("Reading search terms %s.", argument) return df
5,350,219
def validate_auth_header(headers): """Validate and decode auth token in request headers. This helper function is used in each of the below wrappers, and is responsible to validate the format of the `Authorization` header where the Lowball token is supposed to reside. Requirements for successful validation: 1. The current app must have a working auth database 2. The `Authorization` header __must__ be present in the headers 3. That header value __must__ be of the format `Bearer <token>`. The header value is split on the space character, and if the header value is properly formatted, this should result in a data structure that looks like ["Bearer", "<token>"]. If after splitting the header value on the space, the length of the resulting structure is not __exactly__ two, then the header is considered improperly formatted. 4. The token must be able to be decoded by the `Authentication.decode_token` method 5. The token cannot be expired. 6. The token must match a token that is in the application authentication database __exactly__ :param headers: Headers from request made to Lowball application :type headers: werkzeug.Headers :return: decoded token data :rtype: Token """ if current_app.auth_db is None: raise NoAuthenticationDatabaseException if "Authorization" not in headers: raise NoAuthHeaderException auth_header = headers["Authorization"].split(" ") if len(auth_header) < 2 or auth_header[0] != "Bearer": raise InvalidAuthHeaderException token = auth_header[1] decoded = current_app.authenticator.decode_token(token) g.client_data = decoded if datetime.datetime.utcnow() > decoded.expiration: raise ExpiredTokenException database_token = current_app.auth_db.lookup_token(decoded.token_id) if database_token != decoded: raise InvalidTokenException return decoded
5,350,220
def shared_random_seed(): """All workers must call this function, otherwise it will deadblock. """ seed = np.random.randint(2 ** 31) all_seeds = all_gather(seed) return all_seeds[0]
5,350,221
def _version(base): """Get a chronological version from git or PKG-INFO Args: base (dict): state Returns: str: Chronological version "yyyymmdd.hhmmss" str: git sha if available """ v1 = _version_from_pkg_info(base) v2, sha = _version_from_git(base) if v1: if v2: return (v1, None) if float(v1) > float(v2) else (v2, sha) return v1, None if v2: return v2, sha raise ValueError('Must have a git repo or an source distribution')
5,350,222
def test_reset_workflow( redis, session, reset_workflow, museum_object_factory, museum_package_factory, museum_packages_dir): """ Reset workflow and ensure dangling packages are removed """ # Objects A and B will be reset, object C will remain object_a = museum_object_factory(id=10) package_a = museum_package_factory(downloaded=True, museum_object=object_a) object_a.latest_package = package_a object_b = museum_object_factory(id=20) package_b = museum_package_factory( downloaded=True, packaged=True, museum_object=object_b ) object_b.latest_package = package_b object_c = museum_object_factory(id=30) package_c = museum_package_factory( downloaded=True, packaged=True, uploaded=True, museum_object=object_c ) object_c.latest_package = package_c (museum_packages_dir / "10" / "sip").mkdir(parents=True) (museum_packages_dir / "20" / "sip").mkdir(parents=True) (museum_packages_dir / "30" / "sip").mkdir(parents=True) session.commit() # 2 objects were reset result = reset_workflow(["--perform-reset"]) assert "Found 2 dangling objects" in result.stdout object_a = session.query(MuseumObject).get(10) object_b = session.query(MuseumObject).get(20) object_c = session.query(MuseumObject).get(30) assert not object_a.latest_package assert not object_b.latest_package assert object_c.latest_package # Package directories were deleted assert not (museum_packages_dir / "10").is_dir() assert not (museum_packages_dir / "20").is_dir() assert (museum_packages_dir / "30").is_dir()
5,350,223
def paint_flag_iceland(): """http://www.crwflags.com/fotw/flags/is.html""" f = FlagPainter(18 / 25) colors = [(0, 0, 204), (255, 255, 255), (255, 0, 0)] f.background(colors[0]) f.draw_horizontal_band((7 / 18, 11 / 18), colors[1]) f.draw_vertical_band((7 / 25, 11 / 25), colors[1]) f.draw_horizontal_band((8 / 18, 10 / 18), colors[2]) f.draw_vertical_band((8 / 25, 10 / 25), colors[2]) f.save('iceland')
5,350,224
def run_adaptive_redundancy(host_num, coder_log_conf): """Run network application for multi-hop topology :param host_num (int): Number of hosts :param profile (int): To be tested profile :param coder_log_conf (dict): Configs for logs of coders """ net = Containernet(controller=RemoteController, link=TCLink, autoStaticArp=True) mgr = VNFManager(net) hosts = create_topology(net, host_num) try: info("*** Starting network\n") net.start() # MARK: Use static ARP to avoid ping losses # info("*** Ping all to update ARP tables of each host\n") # net.pingAll() info("*** Adding OpenFlow rules\n") add_ovs_flows(net, host_num) info("*** Disable Checksum offloading\n") disable_cksum_offload(host_num) info("*** Deploy coders\n") coders = deploy_coders(mgr, hosts) # Wait for coders to be ready info("*** Starting Ryu controller\n") c0 = net.get("c0") makeTerm(c0, cmd="ryu-manager adaptive_rlnc_sdn_controller.py ; read") time.sleep(3) info("*** Run Iperf\n") run_iperf_test(hosts[0], hosts[-1], "udp", 30) print_coders_log(coders, coder_log_conf) remove_coders(mgr, coders) info("*** Emulation stops...") except Exception as e: error("*** Emulation has errors:") error(e) finally: info("*** Stopping network\n") net.stop() mgr.stop()
5,350,225
def average_link_distance_segment(D,stop=-1,qmax=1,verbose=0): """ Average link clustering based on a pairwise distance matrix. Parameters ---------- D: a (n,n) distance matrix between some items stop=-1: stopping criterion, i.e. distance threshold at which further merges are forbidden By default, all merges are performed qmax = 1; the number of desired clusters (in the limit of stop) verbose=0, verbosity level Returns ------- u: a labelling of the graph vertices according to the criterion cost the cost of each merge step during the clustering procedure Note ---- this method has not been optimized """ n = D.shape[0] if D.shape[1]!=n: raise ValueError, "non -square distance matrix" if stop==-1: stop = np.infty t = average_link_distance(D,verbose) if verbose: t.plot() u1 = np.zeros(n, np.int) u2 = np.zeros(n, np.int) if stop>=0: u1 = t.partition(stop) if qmax>0: u2 = t.split(qmax) if u1.max()<u2.max(): u = u2 else: u = u1 cost = t.get_height() cost = cost[t.isleaf()==False] return u,cost
5,350,226
def _process_get_set_Operand(column, reply): """Process reply for functions zGetOperand and zSetOperand""" rs = reply.rstrip() if column == 1: # ensure that it is a string ... as it is supposed to return the operand if isinstance(_regressLiteralType(rs), str): return str(rs) else: return -1 elif column in (2,3): # if thre is a comment, it will be in column 2 #return int(float(rs)) return _regressLiteralType(rs) else: return float(rs)
5,350,227
def test_dict_similar_keys(): """ unpackb() similar keys This was a regression in 3.4.2 caused by using the implementation in wy instead of wyhash. """ obj = {"cf_status_firefox67": "---", "cf_status_firefox57": "verified"} assert ormsgpack.unpackb(ormsgpack.packb(obj)) == obj
5,350,228
def generic_laplace(input, derivative2, output=None, mode="reflect", cval=0.0, extra_arguments=(), extra_keywords=None): """Multi-dimensional Laplace filter using a provided second derivative function. Args: input (cupy.ndarray): The input array. derivative2 (callable): Function or other callable with the following signature that is called once per axis:: derivative2(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) where ``input`` and ``output`` are ``cupy.ndarray``, ``axis`` is an ``int`` from ``0`` to the number of dimensions, and ``mode``, ``cval``, ``extra_arguments``, ``extra_keywords`` are the values given to this function. output (cupy.ndarray, dtype or None): The array in which to place the output. Default is is same dtype as the input. mode (str): The array borders are handled according to the given mode (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, ``'wrap'``). Default is ``'reflect'``. cval (scalar): Value to fill past edges of input if mode is ``'constant'``. Default is ``0.0``. extra_arguments (sequence, optional): Sequence of extra positional arguments to pass to ``derivative2``. extra_keywords (dict, optional): dict of extra keyword arguments to pass ``derivative2``. Returns: cupy.ndarray: The result of the filtering. .. seealso:: :func:`scipy.ndimage.generic_laplace` .. note:: When the output data type is integral (or when no output is provided and input is integral) the results may not perfectly match the results from SciPy due to floating-point rounding of intermediate results. """ if extra_keywords is None: extra_keywords = {} ndim = input.ndim modes = _util._fix_sequence_arg(mode, ndim, 'mode', _util._check_mode) output = _util._get_output(output, input) if ndim == 0: output[...] = input return output derivative2(input, 0, output, modes[0], cval, *extra_arguments, **extra_keywords) if ndim > 1: tmp = _util._get_output(output.dtype, input) for i in range(1, ndim): derivative2(input, i, tmp, modes[i], cval, *extra_arguments, **extra_keywords) output += tmp return output
5,350,229
def find_zones_by_tld(graph, tpd, groups, mongo_connector): """ Technically, a "tld" is ".org" or ".com". However, tld library that I use considers TLDs to be "example.org". This code just rolls with that. For the provided third-party-domain, find the zones that are associated with that tpd. """ tpds_collection = mongo_connector.get_tpds_connection() tpds_results = tpds_collection.find({'tld': tpd}) for result in tpds_results: for zone in result['zones']: zone_g_index = add_to_list(zone['zone'], groups) # A space is added because sometimes the tpd is the same as the target graph.add_node(zone['zone'], data_type="zone", type=zone_g_index, depends=[tpd + " "], dependedOnBy=[], docs="") graph.add_edge(tpd + " ", zone['zone'], value=2) for entry in zone['records']: graph.add_node(entry['host'], data_type="domain", type=zone_g_index, depends=[zone['zone']], dependedOnBy=[entry['target']], docs="") graph.add_node(entry['target'], data_type="domain", type=zone_g_index, depends=[entry['host']], dependedOnBy=[], docs="") graph.add_edge(zone['zone'], entry['host'], value=1) graph.add_edge(entry['host'], entry['target'], value=1)
5,350,230
def as_keras_metric(method): """ from https://stackoverflow.com/questions/43076609/how-to-calculate-precision-and-recall-in-keras """ import functools @functools.wraps(method) def wrapper(self, args, **kwargs): """ Wrapper for turning tensorflow metrics into keras metrics """ value, update_op = method(self, args, **kwargs) tf.keras.backend.get_session().run(tf.local_variables_initializer()) with tf.control_dependencies([update_op]): value = tf.identity(value) return value return wrapper
5,350,231
def plot_gms_spectra( gms_result: gms.GMSResult, save_file: Path = None, ): """Plot of the pSA values of the realisations and selected ground motions and the median, 16th, and 84th percentile of the GCIM Parameters ---------- gms_result: gms.GMSResult save_file: Path, optional """ ( gcim_df, realisations_df, selected_gms_df, ) = _prepare_gms_spectra(gms_result) plt.figure(figsize=(20, 9)) for label, cur_gcim in gcim_df.iloc[:, 1:].iterrows(): plt.plot( cur_gcim.index, cur_gcim.values, color="red", linestyle="solid" if label == "median" else "dashdot", label=GCIM_LABEL[label], ) for cur_ix, cur_rel in realisations_df.iloc[:, 1:].iterrows(): plt.plot( cur_rel.index.values, cur_rel.values, color="blue", linestyle="solid", label="Realisations" if cur_ix == 0 else None, linewidth=0.4, ) for cur_ix, cur_rel in selected_gms_df.iloc[:, 1:].iterrows(): plt.plot( cur_rel.index.values, cur_rel.values, color="black", linestyle="solid", label="Selected Ground Motions" if cur_ix == 0 else None, linewidth=0.4, ) plt.xscale("log") plt.yscale("log") plt.xlabel("Period, T (s)") plt.ylabel("Spectral acceleration, SA (g)") plt.title("Pseudo acceleration response spectra") plt.legend() if save_file is not None: plt.savefig(save_file) plt.close() else: plt.show()
5,350,232
def main(): """ TODO: """ read_dictionary() boggle = [] for i in range(4): words = input(str(i+1)+' row of letters:') words = words.split() row = [] for letter in words: if letter.islower(): letter.upper() if len(letter) != 1: print('Illegal input') break else: row.append(letter) boggle.append(row) found = [] for x in range(4): for y in range(4): game(boggle, x, y, "", [], found) print("There are " + str(len(found)) + " words in total.")
5,350,233
def do_get_video_capture_job(port_output_name: str = 'RAW') -> str: """ Function for configure the image retrieval job from video camera. :param port_output_name: name you want to use for raw image in the application :return: output image port name """ output_raw_port_name = transform_port_name_lvl(name=port_output_name, lvl=PYRAMID_LEVEL.LEVEL_0) output_raw_port_size = transform_port_size_lvl(lvl=PYRAMID_LEVEL.LEVEL_0, rgb=True) input_port_list = None main_func_list = [output_raw_port_name] output_port_list = [(output_raw_port_name, output_raw_port_size, 'B', True)] job_name = job_name_create(action='Get image camera video frame') d = create_dictionary_element(job_module='get_image', job_name=job_name, input_ports=input_port_list, init_func_name='init_func', init_func_param=None, main_func_name='main_func_video_camera', main_func_param=main_func_list, output_ports=output_port_list) jobs_dict.append(d) return port_output_name
5,350,234
def test_interact_functions(): """Do the helper functions in the interact module run without syntax error?""" import bokeh from ..interact import (prepare_tpf_datasource, prepare_lightcurve_datasource, get_lightcurve_y_limits, make_lightcurve_figure_elements, make_tpf_figure_elements, show_interact_widget) tpf = TessTargetPixelFile(example_tpf) mask = tpf.flux[0, :, :] == tpf.flux[0, :, :] tpf_source = prepare_tpf_datasource(tpf, aperture_mask=mask) lc = tpf.to_lightcurve(aperture_mask=mask) lc_source = prepare_lightcurve_datasource(lc) get_lightcurve_y_limits(lc_source) make_lightcurve_figure_elements(lc, lc_source) make_tpf_figure_elements(tpf, tpf_source) show_interact_widget(tpf)
5,350,235
def sync_one(src: str, dst: str, *, dry_run: bool) -> Optional[date]: """ From the snapshots that are present in src and missing in dst, pick the one that is closest to an existing snapshot in dst, and sync it. Returns the snapshot synced, or none if src and dst are already in sync. """ src_subvols = list_subvolumes(src) dst_subvols = list_subvolumes(dst) missing_subvols = src_subvols - dst_subvols if len(missing_subvols) == 0: return None # We will sync the *latest* missing subvolume first. The rationale behind # this is that data is mostly append-only, and that we prefer fragmenting # early snapshots over later snapshots. There is no advantage in rebuilding # a file that changed over time in the same order, it will only be # fragmented in the later snapshots. Rather, we can sync the final (or at # least latest) version, and rebuild the past versions backwards. sync_date = max(missing_subvols) num_days, base_date = hausdorff_distance(sync_date, dst_subvols) base_dir = base_date.isoformat() sync_dir = sync_date.isoformat() print(f'Syncing {sync_dir}, using {base_dir} as base.') # Create a writeable snapshot of the base subvolume. cmd = [ 'btrfs', 'subvolume', 'snapshot', os.path.join(dst, base_dir), os.path.join(dst, sync_dir), ] run(cmd, dry_run=dry_run) print('Waiting for sync of snapshot.') # Previously I used "btrfs subvolume sync" instead of "filesystem sync", # but that sync process reliably got stuck in an endless ioctl loop where # it would call clock_nanosleep to sleep for a second and then a # BTRFS_IOC_TREE_SEARCH ioctl, over and over again. A filesystem sync is # less buggy. cmd_sync = [ 'btrfs', 'filesystem', 'sync', os.path.join(dst, sync_dir), ] run(cmd_sync, dry_run=dry_run) cmd = [ 'target/release/reflink-diff', 'dry-run' if dry_run else 'apply', os.path.join(src, base_dir), os.path.join(src, sync_dir), os.path.join(dst, base_dir), os.path.join(dst, sync_dir), ] subprocess.run(cmd, check=True) # Sync into it. # Would be nice to use reflink support once that gets mainstream. # https://bugzilla.samba.org/show_bug.cgi?id=10170 cmd = [ 'rsync', '-a', '--delete-delay', '--inplace', '--preallocate', '--no-whole-file', '--fuzzy', '--info=copy,del,name1,progress2,stats2', os.path.join(src, sync_dir) + '/', os.path.join(dst, sync_dir), ] run(cmd, dry_run=dry_run) # Once that is done, make the snapshot readonly. cmd = [ 'btrfs', 'property', 'set', '-t', 'subvol', os.path.join(dst, sync_dir), 'ro', 'true', ] run(cmd, dry_run=dry_run) run(cmd_sync, dry_run=dry_run) return sync_date
5,350,236
def version(ctx: click.Context, project_path: Path, strict: bool) -> None: """Calculate next version from Git history. Given a Git repository, this command will find the latest version tag and calculate the next version using the Conventional Commits (CC) specification. Calculated version will be printed out to STDOUT. By default non-CC commits are allowed. Use the `--strict` flag.""" project_config = ProjectConfig.from_path(project_path) git = Git(project_config.path) try: next_version, changelog = run(project_config, git, strict) except Exception as e: ctx.fail(e) formatted_next_version = next_version.format(config=project_config) echo("Next version:", formatted_next_version) formatted_changelog = changelog.format( changelog_type=ChangeLogTypeEnum.git_commit, format_type=project_config.changelog_format_type_git, ) echo("Changelog:\n", formatted_changelog) echo("Printing next version to stdout") print(formatted_next_version)
5,350,237
def vgg8_S(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = VGG(make_layers(cfg['YS']), final_filter=256, **kwargs) return model
5,350,238
def createWords(word_str): """Cn Mandarin sentence to Cn Mandarin Words list""" pre_func = IO.readList(r'docs/pre_punctuation.txt')[1:] lat_func = IO.readList(r'docs/post_punctuation.txt')[1:] en_letters = IO.readList(r'docs/special_English_letters.txt')[1:] words = [] j = 0 tmp_word = '' while j < len(word_str): find_pre_func = 0 while j < len(word_str) and word_str[j] in pre_func: tmp_word += word_str[j] find_pre_func = 1 j += 1 if (u'\u9fa5' >= word_str[j] >= u'\u4e00') or word_str[j] in en_letters: if find_pre_func: tmp_word += word_str[j] else: tmp_word = word_str[j] j = j + 1 while j < len(word_str) and word_str[j] in lat_func: tmp_word += word_str[j] j = j + 1 words.append(tmp_word) tmp_word = '' return words
5,350,239
def get_calib_driver(calib_dir: str): """ Create left/right charuco point detectors and load calibration images from directory. """ reference_image = cv2.imread("tests/data/2020_01_20_storz/pattern_4x4_19x26_5_4_with_inset_9x14.png") minimum_points = 50 number_of_squares = [19, 26] square_tag_sizes = [5, 4] filter_markers = True number_of_chessboard_squares = [9, 14] chessboard_square_size = 3 chessboard_id_offset = 500 left_pd = \ charuco_pd.CharucoPlusChessboardPointDetector( reference_image, minimum_number_of_points=minimum_points, number_of_charuco_squares=number_of_squares, size_of_charuco_squares=square_tag_sizes, charuco_filtering=filter_markers, number_of_chessboard_squares=number_of_chessboard_squares, chessboard_square_size=chessboard_square_size, chessboard_id_offset=chessboard_id_offset ) right_pd = \ charuco_pd.CharucoPlusChessboardPointDetector( reference_image, minimum_number_of_points=minimum_points, number_of_charuco_squares=number_of_squares, size_of_charuco_squares=square_tag_sizes, charuco_filtering=filter_markers, number_of_chessboard_squares=number_of_chessboard_squares, chessboard_square_size=chessboard_square_size, chessboard_id_offset=chessboard_id_offset ) calibration_driver = sc.StereoVideoCalibrationDriver(left_pd, right_pd, minimum_points) for i in range(3): l_img, r_img, chessboard, scope = lcu.get_calib_data(calib_dir, i) calibration_driver.grab_data(l_img, r_img, scope, chessboard) return calibration_driver
5,350,240
def stop_loading() -> dict: """Force the page stop all navigations and pending resource fetches.""" return {"method": "Page.stopLoading", "params": {}}
5,350,241
def rename_model(model_name, new_name): """Assign a new name to a model in current database. Parameters ---------- model_name : str Current model name. new_name : str New name for model. Returns ------- None """ mdb.models.changeKey(fromName=model_name, toName=new_name)
5,350,242
def drop_duplicates(df): """Drop duplicate rows and reindex. Args: df (pd.DataFrame): Dataframe. Returns: pd.DataFrame: Dataframe with the replaced value. Examples: >>> df = pd.DataFrame({'letters':['b','b','c'], 'numbers':[2,2,3]}) >>> drop_duplicates(df) letters numbers 0 b 2 1 c 3 """ return df.drop_duplicates().reset_index(drop=True)
5,350,243
def by_index(e, index): """Decompose error by an index. TODO Parameters ---------- e : array_like index : array_like """ pass
5,350,244
def estimate_progress(ihash, peers): """Estimate a percentage done based on client stats""" progress = count = 0 log.debug("peers: %s" % peers) size = float(get_size(ihash)) if not size: return "Unknown" stats = get_clientstats(ihash) # log.debug("%s" % stats) for peer in peers: progress += float(stats["%s:peer:%s:left" % (ihash, peer)]) try: percentage = 100 - (( progress / float(len(peers)) ) / size * 100) except ZeroDivisionError: if transfer_complete_for_peers(ihash, peers) and count == 0 and len(peers) > 0: percentage = 100.00 else: percentage = 0.00 log.debug("progress: %s, perc: %s, count: %s, peers: %s" % ( progress, percentage, count, peers)) return "%0.2f%%" % percentage
5,350,245
async def update_state(): """Updates state of the TV every 5 seconds.""" while True: await TV.update() await asyncio.sleep(5)
5,350,246
def close(fd): """close(fd) Close a file descriptor (for low level IO). """ rawio = FileDescriptors.get(fd) _handle_oserror(rawio.close)
5,350,247
def make_otf( psf, outpath=None, dzpsf=0.1, dxpsf=0.1, wavelength=520, na=1.25, nimm=1.3, otf_bgrd=None, krmax=0, fixorigin=10, cleanup_otf=False, max_otf_size=60000, **kwargs ): """ Generate a radially averaged OTF file from a PSF file Args: psf (str): Filepath of 3D PSF TIF outpath (str): Destination filepath for the output OTF (default: appends "_otf.tif" to filename) dzpsf (float): Z-step size in microns (default: {0.1}) dxpsf (float): XY-Pixel size in microns (default: {0.1}) wavelength (int): Emission wavelength in nm (default: {520}) na (float): Numerical Aperture (default: {1.25}) nimm (float): Refractive indez of immersion medium (default: {1.3}) otf_bgrd (int, None): Background to subtract. "None" = autodetect. (default: {None}) krmax (int): pixels outside this limit will be zeroed (overwriting estimated value from NA and NIMM) (default: {0}) fixorigin (int): for all kz, extrapolate using pixels kr=1 to this pixel to get value for kr=0 (default: {10}) cleanup_otf (bool): clean-up outside OTF support (default: {False}) max_otf_size (int): make sure OTF is smaller than this many bytes. Deconvolution may fail if the OTF is larger than 60KB (default: 60000) Returns: str: Path of output file """ if outpath is None: outpath = psf.replace(".tif", "_otf.tif") if otf_bgrd and isinstance(otf_bgrd, (int, float)): bUserBackground = True background = float(otf_bgrd) else: bUserBackground = False background = 0.0 with CappedPSF(psf, max_otf_size) as _psf: shared_makeotf( str.encode(_psf.path), str.encode(outpath), wavelength, dzpsf, fixorigin, bUserBackground, background, na, nimm, dxpsf, krmax, cleanup_otf, ) return outpath
5,350,248
def get_palette(dataset_name): """ Maps classes to colors in the style of PASCAL VOC. Close values are mapped to far colors for segmentation visualization. See http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit Takes: num_classes: the number of classes Gives: palette: the colormap as a k x 3 array of RGB colors """ # TODO: парсити з csv файлу palette = np.zeros((num_classes, 3), dtype=np.uint8) for k in range(0, num_classes): label = k i = 0 while label: palette[k, 0] |= (((label >> 0) & 1) << (7 - i)) palette[k, 1] |= (((label >> 1) & 1) << (7 - i)) palette[k, 2] |= (((label >> 2) & 1) << (7 - i)) label >>= 3 i += 1 return palette
5,350,249
def is_number(input_string): """ if input_string includes number only, return corresponding number, otherwise return input_string """ try: return float(input_string) except ValueError: pass try: import unicodedata return unicodedata.numeric(input_string) except (TypeError, ValueError): pass return input_string.strip('"')
5,350,250
def test_upgrade_db_26_to_27(user_data_dir): # pylint: disable=unused-argument """Test upgrading the DB from version 26 to version 27. - Recreates balancer events, uniswap events, amm_swaps. Deletes balancer pools """ msg_aggregator = MessagesAggregator() _use_prepared_db(user_data_dir, 'v26_rotkehlchen.db') db_v26 = _init_db_with_target_version( target_version=26, user_data_dir=user_data_dir, msg_aggregator=msg_aggregator, ) # Checks before migration cursor = db_v26.conn.cursor() assert cursor.execute( 'SELECT COUNT(*) from used_query_ranges WHERE name LIKE "uniswap%";', ).fetchone()[0] == 2 assert cursor.execute( 'SELECT COUNT(*) from used_query_ranges WHERE name LIKE "balancer%";', ).fetchone()[0] == 2 assert cursor.execute('SELECT COUNT(*) from used_query_ranges;').fetchone()[0] == 6 assert cursor.execute('SELECT COUNT(*) from amm_swaps;').fetchone()[0] == 2 assert cursor.execute('SELECT COUNT(*) from balancer_pools;').fetchone()[0] == 1 assert cursor.execute('SELECT COUNT(*) from balancer_events;').fetchone()[0] == 1 db_v26.logout() # Migrate to v27 db = _init_db_with_target_version( target_version=27, user_data_dir=user_data_dir, msg_aggregator=msg_aggregator, ) cursor = db.conn.cursor() assert cursor.execute('SELECT COUNT(*) from used_query_ranges;').fetchone()[0] == 2 assert cursor.execute('SELECT COUNT(*) from amm_swaps;').fetchone()[0] == 0 assert cursor.execute('SELECT COUNT(*) from balancer_events;').fetchone()[0] == 0 # Finally also make sure that we have updated to the target version assert db.get_version() == 27
5,350,251
def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argument """ Initiate a reboot if the running kernel is not the latest one installed. .. note:: This state does not install any patches. It only compares the running kernel version number to other kernel versions also installed in the system. If the running version is not the latest one installed, this state will reboot the system. See :py:func:`kernelpkg.upgrade <salt.modules.kernelpkg_linux_yum.upgrade>` and :py:func:`~salt.states.kernelpkg.latest_installed` for ways to install new kernel packages. This module does not attempt to understand or manage boot loader configurations it is possible to have a new kernel installed, but a boot loader configuration that will never activate it. For this reason, it would not be advisable to schedule this state to run automatically. Because this state function may cause the system to reboot, it may be preferable to move it to the very end of the state run. See :py:func:`~salt.states.kernelpkg.latest_wait` for a waitable state that can be called with the `listen` requesite. name Arbitrary name for the state. Does not affect behavior. at_time The wait time in minutes before the system will be rebooted. """ active = __salt__["kernelpkg.active"]() latest = __salt__["kernelpkg.latest_installed"]() ret = {"name": name} if __salt__["kernelpkg.needs_reboot"](): ret["comment"] = ( "The system will be booted to activate " "kernel: {0}" ).format(latest) if __opts__["test"]: ret["result"] = None ret["changes"] = {"kernel": {"old": active, "new": latest}} else: __salt__["system.reboot"](at_time=at_time) ret["result"] = True ret["changes"] = {"kernel": {"old": active, "new": latest}} else: ret["result"] = True ret["comment"] = ( "The latest installed kernel package " "is active: {0}" ).format(active) ret["changes"] = {} return ret
5,350,252
def upload_file(file_name, bucket, object_name): """Upload a file to an S3 bucket :param file_name: File to upload :param bucket: Bucket to upload to :param object_name: S3 object name. If not specified then file_name is used :return: True if file was uploaded, else False """ s3_client = boto3.client('s3') try: s3_client.upload_file(file_name, bucket, object_name) except ClientError as e: logging.error(e) return False return True
5,350,253
def get_conf(bs_info, client_config, genesis_time, setup_oracle=None, setup_poet=None, args=None): """ get_conf gather specification information into one ContainerSpec object :param bs_info: DeploymentInfo, bootstrap info :param client_config: DeploymentInfo, client info :param genesis_time: string, genesis time as set in suite specification file :param setup_oracle: string, oracle ip :param setup_poet: string, poet ip :param args: list of strings, arguments for appendage in specification :return: ContainerSpec """ genesis_time_delta = get_genesis_time_delta(genesis_time) client_args = {} if 'args' not in client_config else client_config['args'] # append client arguments if args is not None: for arg in args: client_args[arg] = args[arg] # create a new container spec with client configuration cspec = ContainerSpec(cname='client', specs=client_config) # append oracle configuration if setup_oracle: client_args['oracle_server'] = 'http://{0}:{1}'.format(setup_oracle, conf.ORACLE_SERVER_PORT) # append poet configuration if setup_poet: client_args['poet_server'] = '{0}:{1}'.format(setup_poet, conf.POET_SERVER_PORT) bootnodes = node_string(bs_info['key'], bs_info['pod_ip'], conf.BOOTSTRAP_PORT, conf.BOOTSTRAP_PORT) cspec.append_args(bootnodes=bootnodes, genesis_time=genesis_time_delta.isoformat('T', 'seconds')) # append client config to ContainerSpec if len(client_args) > 0: cspec.append_args(**client_args) return cspec
5,350,254
def generate_list( bibliographies: Biblio, search_key: str ) -> Generator[CompletionItem, None, None]: """Given a bibliography and a search string, find all completion items that might match the entry.""" key_regex = re.compile("^{}.*".format(search_key)) for key in list(filter(key_regex.match, bibliographies.keys())): entry = bibliographies[key] yield CompletionItem( label="{}".format(key), kind=CompletionItemKind.Text, documentation=info(entry), insert_text=key, )
5,350,255
def zeropad(tr, starttime, endtime): """ Zeropads an obspy.Trace so as to cover the time window specified by `starttime`'and `endtime` Parameters ---------- tr : obspy.Trace starttime, endtime : obspy.UTCDateTime Returns ------- trace : obspy.Trace Zeropadded copy of the input trace. """ trace = Trace() for key, value in tr.stats.items(): if key not in ['endtime', 'npts']: trace.stats[key] = value fs = tr.stats.sampling_rate samples_before = int((tr.stats.starttime - starttime) * fs) samples_after = int((endtime - tr.stats.endtime) * fs) data = tr.data if samples_before > 0: trace.stats.starttime = tr.stats.starttime - ((samples_before+1) / fs) data = np.concatenate((np.zeros(samples_before+1), data)) if samples_after > 0: data = np.concatenate((data, np.zeros(samples_after+1))) trace.data = data return trace
5,350,256
def deepwalk(G, _filepath, o=1, num_walks_node=10, walk_length=80, representation_size=128, window_size=5,): """not going to deal with memory exceeding case""" output = _filepath + G.name print("Walking...") time_start = time.time() walks = gu.build_deepwalk_corpus(G, num_paths=num_walks_node, path_length=walk_length, alpha=0, rand=random.Random(0)) # alpha = 0: do not go back time_end = time.time() print('Walking time cost:', time_end - time_start) print("Training...") time_start = time.time() # with negative sampling: 5(default) model = Word2Vec(walks, size=representation_size, window=window_size, min_count=0, sg=1, workers=cpu_count()) time_end = time.time() print('Training vectors time cost:', time_end - time_start) if o == 1: model.wv.save_word2vec_format(output + '.dw.emb') else: model.wv.save_word2vec_format(output + '0.dw.emb') return time_end - time_start
5,350,257
def logship_status(host): """Report log shipping retstore delta and latency""" crit = warn = 0 msg = '' sql = """SELECT secondary_server, secondary_database, primary_server, primary_database, last_restored_date, DATEDIFF(mi, last_restored_date, GETDATE()) last_restored_delta, last_restored_latency, restore_threshold FROM msdb..log_shipping_monitor_secondary""" rows = execute_sql(host, sql) if type(rows) is dict: return rows for row in rows: if row.last_restored_delta >= row.restore_threshold: warn += 1 msg += "Srv:%s DB:%s Restore delta %s exceeds threshold of %s\n" % (row.primary_server, row.primary_database, row.last_restored_delta, row.restore_threshold) if row.last_restored_latency >= row.restore_threshold: crit += 1 msg += "Srv:%s DB:%s Latency of %s exceeds threshold of %s\n" % (row.primary_server, row.primary_database, row.last_restored_latency, row.restore_threshold) if row.last_restored_delta < row.restore_threshold and row.last_restored_latency < row.restore_threshold: msg += "Srv:%s DB:%s Latency:%s Restore delta:%s\n" % (row.primary_server, row.primary_database, row.last_restored_latency, row.last_restored_delta) if crit > 0: code = 'CRITICAL' msg = 'Log shipping CRITICAL\n' + msg elif warn > 0: code = 'WARNING' msg = 'Log shipping warning\n' + msg else: code = 'OK' msg = 'Log shipping OK\n' + msg return {'code':code, 'msg': msg}
5,350,258
def dice(y_true, y_pred): """ Attention: y_true can be weighted to modify learning therefore apply sign to get back to labels y_pred have to be rounded to nearest integer to obtain labels. """ smooth = 1. y_true_f = y_true.flatten() y_pred_f = y_pred.flatten() intersection = np.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
5,350,259
def output_video(input_file, output_file): """ Given input_file video, save annotated video to output_file """ video = VideoFileClip(input_file) final_video = video.fl_image(process_image) final_video.write_videofile(output_file, audio=False)
5,350,260
def get_long_description(filename): """Return entire contents of *filename*.""" with open(os.path.join(WORKING_DIR, filename)) as fh: return fh.read()
5,350,261
def get_default_language(): """ Returns the default language code based on the data from LANGUAGES.json. """ for language_code, language_data in MEDICINE_LANGUAGE_DATA.items(): if 'DEFAULT' in language_data: if language_data['DEFAULT']: return language_code return 'en'
5,350,262
def triple_triple(r, p=qt.QH([1, 0, 0, 0])): """Use three triple products for rotations and boosts.""" # Note: 'qtype' provides a record of what algrabric operations were done to create a quaternion. return triple_sandwich(r, p).add(triple_2_on_1(r, p), qtype="triple_triple")
5,350,263
def _pinv_trunc(x, miss): """Compute pseudoinverse, truncating at most "miss" fraction of varexp.""" u, s, v = linalg.svd(x, full_matrices=False) # Eigenvalue truncation varexp = np.cumsum(s) varexp /= varexp[-1] n = np.where(varexp >= (1.0 - miss))[0][0] + 1 logger.info(' Truncating at %d/%d components to omit less than %g ' '(%0.2g)' % (n, len(s), miss, 1. - varexp[n - 1])) s = 1. / s[:n] inv = ((u[:, :n] * s) @ v[:n]).T return inv, n
5,350,264
def mog_loglike(x, means, icovs, dets, pis): """ compute the log likelihood according to a mixture of gaussians with means = [mu0, mu1, ... muk] icovs = [C0^-1, ..., CK^-1] dets = [|C0|, ..., |CK|] pis = [pi1, ..., piK] (sum to 1) at locations given by x = [x1, ..., xN] """ xx = np.atleast_2d(x) centered = xx[:,:,np.newaxis] - means.T[np.newaxis,:,:] solved = np.einsum('ijk,lji->lki', icovs, centered) logprobs = -0.5*np.sum(solved * centered, axis=1) - np.log(2*np.pi) - 0.5*np.log(dets) + np.log(pis) logprob = scpm.logsumexp(logprobs, axis=1) if len(x.shape) == 1: return logprob[0] else: return logprob
5,350,265
def align_chunks(array: da.core.Array, scale_factors: Sequence[int]) -> da.core.Array: """ Ensure that all chunks are divisible by scale_factors """ new_chunks = {} for idx, factor in enumerate(scale_factors): aligned = aligned_coarsen_chunks(array.chunks[idx], factor) if aligned != array.chunks[idx]: new_chunks[idx] = aligned if new_chunks: array = array.rechunk(new_chunks) return array
5,350,266
def getWordScore(word): """ Computes the score of a word (no bingo bonus is added). word: The word to score (a string). returns: score of the word. """ if len(word) == HAND_SIZE: score = 50 else: score = 0 for letter in word: score = score + SCRABBLE_LETTER_VALUES[letter] return score
5,350,267
def autolabel(rects, ax, error_bar, rotation=90, color="black", fontsize=None): """Attach a text label above each bar in *rects*, displaying its height.""" if isinstance(error_bar, dict): error_bar = error_bar.values() if error_bar is None: error_bar = [np.nan for _ in range(len(rects))] for rect, error_value_i in zip(rects, error_bar): height = rect.get_height() height_rounded = round(height, 2) error_rounded = round(error_value_i, 2) # text_y = height + error_value_i text_y = 0.02 if np.isnan(height_rounded): continue if np.isnan(error_rounded): ax.annotate( f"{height_rounded:.02f}", xy=(rect.get_x() + rect.get_width() / 2, text_y), xytext=(0, 3), # 3 points vertical offset textcoords="offset points", ha="center", va="bottom", rotation=rotation, color=color, fontsize=fontsize, ) else: ax.annotate( f"{height_rounded:.02f}\u00B1{error_rounded:.02f}", xy=(rect.get_x() + rect.get_width() / 2, text_y), xytext=(0, 3), # 3 points vertical offset textcoords="offset points", ha="center", va="bottom", rotation=rotation, color=color, fontsize=fontsize, )
5,350,268
def get_lang_list(source_text, key=None, print_meta_data=False): """ Inputs: source_text - source text as a string key - google api key, needed or function will raise and error returns list of language identifiers """ #set up url request to google translate api if not key: raise Exception( "You dont have a key") url_shell = 'https://www.googleapis.com/language/translate/v2/detect?key={0}&q={1}' url = url_shell.format(key, source_text) response = requests.get(url) lang_json= json.loads(response.text) source_lang = lang_json['data']['detections'][0][0]['language'] # if print_meta_data: # print 'Is detection reliable: {0}'.format(data_dict['data']['detections']['isReliable']) # print 'Confidence: {0}'.format(data_dict['data']['detections']['confidence']) # return source_lang
5,350,269
def single_value_rnn_regressor(num_units, sequence_feature_columns, context_feature_columns=None, cell_type='basic_rnn', num_rnn_layers=1, optimizer_type='SGD', learning_rate=0.1, momentum=None, gradient_clipping_norm=5.0, input_keep_probability=None, output_keep_probability=None, model_dir=None, config=None, feature_engineering_fn=None): """Create a RNN `Estimator` that predicts single values. The input function passed to this `Estimator` optionally contains keys `RNNKeys.SEQUENCE_LENGTH_KEY`. The value corresponding to `RNNKeys.SEQUENCE_LENGTH_KEY` must be vector of size `batch_size` where entry `n` corresponds to the length of the `n`th sequence in the batch. The sequence length feature is required for batches of varying sizes. It will be used to calculate loss and evaluation metrics. If `RNNKeys.SEQUENCE_LENGTH_KEY` is not included, all sequences are assumed to have length equal to the size of dimension 1 of the input to the RNN. In order to specify an initial state, the input function must include keys `STATE_PREFIX_i` for all `0 <= i < n` where `n` is the number of nested elements in `cell.state_size`. The input function must contain values for all state components or none of them. If none are included, then the default (zero) state is used as an initial state. See the documentation for `dict_to_state_tuple` and `state_tuple_to_dict` for further details. The `predict()` method of the `Estimator` returns a dictionary with keys `RNNKeys.PREDICTIONS_KEY` and `STATE_PREFIX_i` for `0 <= i < n` where `n` is the number of nested elements in `cell.state_size`. The value keyed by `RNNKeys.PREDICTIONS_KEY` has shape `[batch_size, padded_length]`. Here, `padded_length` is the largest value in the `RNNKeys.SEQUENCE_LENGTH` `Tensor` passed as input. Entry `[i, j]` is the prediction associated with sequence `i` and time step `j`. Args: num_units: The size of the RNN cells. This argument has no effect if `cell_type` is an instance of `RNNCell`. sequence_feature_columns: An iterable containing all the feature columns describing sequence features. All items in the set should be instances of classes derived from `FeatureColumn`. context_feature_columns: An iterable containing all the feature columns describing context features, i.e., features that apply accross all time steps. All items in the set should be instances of classes derived from `FeatureColumn`. cell_type: A subclass of `RNNCell`, an instance of an `RNNCell` or one of 'basic_rnn,' 'lstm' or 'gru'. num_rnn_layers: Number of RNN layers. Leave this at its default value 1 if passing a `cell_type` that is already a MultiRNNCell. optimizer_type: The type of optimizer to use. Either a subclass of `Optimizer`, an instance of an `Optimizer` or a string. Strings must be one of 'Adagrad', 'Momentum' or 'SGD'. learning_rate: Learning rate. This argument has no effect if `optimizer` is an instance of an `Optimizer`. momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'. gradient_clipping_norm: Parameter used for gradient clipping. If `None`, then no clipping is performed. input_keep_probability: Probability to keep inputs to `cell`. If `None`, no dropout is applied. output_keep_probability: Probability to keep outputs of `cell`. If `None`, no dropout is applied. model_dir: The directory in which to save and restore the model graph, parameters, etc. config: A `RunConfig` instance. feature_engineering_fn: Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into `model_fn`. Please check `model_fn` for a definition of features and labels. Returns: An initialized `Estimator`. """ cell = _to_rnn_cell(cell_type, num_units, num_rnn_layers) target_column = layers.regression_target() if optimizer_type == 'Momentum': optimizer_type = momentum_opt.MomentumOptimizer(learning_rate, momentum) dynamic_rnn_model_fn = _get_dynamic_rnn_model_fn( cell=cell, target_column=target_column, problem_type=ProblemType.REGRESSION, prediction_type=PredictionType.SINGLE_VALUE, optimizer=optimizer_type, sequence_feature_columns=sequence_feature_columns, context_feature_columns=context_feature_columns, learning_rate=learning_rate, gradient_clipping_norm=gradient_clipping_norm, input_keep_probability=input_keep_probability, output_keep_probability=output_keep_probability, name='SingleValueRnnRegressor') return estimator.Estimator(model_fn=dynamic_rnn_model_fn, model_dir=model_dir, config=config, feature_engineering_fn=feature_engineering_fn)
5,350,270
def token(): """ Return a unique 32-char write-token """ return str(uuid.uuid4().hex)
5,350,271
def _get_caller_caller_module_name(): """Return name of module which calls the function from which this function is invoked""" frame = currentframe().f_back.f_back return getmodule(frame).__name__
5,350,272
def percentile(x: np.ndarray, percentile: float = 99) -> Tuple[float, float]: """Get the (low, high) limit for the series by only including the data within the given percentile. For example, if percentile is 99, (1st percentile, 99th percentile) will be returned. Also, if percentile is 1, (1st percentile, 99th percentile) will be returned. Args: x: the series percentile: the percentile, beyond which to exclude data. Returns: (low, high) percentiles of series """ percentile = max(percentile, 100 - percentile) high = np.percentile(x, percentile) low = np.percentile(x, 100 - percentile) return (low, high)
5,350,273
def get_versions_data( hidden=None, is_unreleased=None, find_latest_release=None, sort_key=None, labels=None, suffix_latest_release=' (latest release)', suffix_unreleased=' (dev)', find_downloads=None, ): """Get the versions data, to be serialized to json.""" if hidden is None: hidden = [] if is_unreleased is None: is_unreleased = _is_unreleased if find_latest_release is None: find_latest_release = _find_latest_release if find_downloads is None: find_downloads = _find_downloads if sort_key is None: sort_key = parse_version if labels is None: labels = {} folders = sorted( [ str(f) for f in Path().iterdir() if ( f.is_dir() and not str(f).startswith('.') and not str(f).startswith('_') ) ], key=sort_key, ) labels = {folder: labels.get(folder, str(folder)) for folder in folders} versions = [] unreleased = [] for folder in folders: if folder not in hidden: versions.append(folder) if is_unreleased(folder): unreleased.append(folder) labels[folder] += suffix_unreleased latest_release = find_latest_release( [f for f in versions if f not in unreleased] ) outdated = [] if latest_release is not None: labels[latest_release] += suffix_latest_release outdated = [ folder for folder in versions if (folder != latest_release and folder not in unreleased) ] versions_data = { # list of *all* folders 'folders': folders, # # folder => labels for every folder in "Versions" 'labels': labels, # # list folders that appear in "Versions" 'versions': versions, # # list of folders that do not appear in "Versions" 'hidden': hidden, # # list of folders that should warn & point to latest release 'outdated': outdated, # # list of dev-folders that should warn & point to latest release 'unreleased': unreleased, # # the latest stable release folder 'latest_release': latest_release, # # folder => list of (label, file) 'downloads': {folder: find_downloads(folder) for folder in folders}, } return versions_data
5,350,274
def cgan_training(Xtrain, Xdev, Ytrain, Ydev, use_gpu=False): """ Train using a conditional GAN """ if use_gpu: device = torch.device("cuda") else: device = torch.device("cpu") dtype = torch.double vh = VariableHandler(device=device, dtype=dtype) # Make sure inputs are numpy arrays Xtrain = np.asarray(Xtrain, dtype=np.float64) Ytrain = np.asarray(Ytrain, dtype=np.float64) Xdev = np.asarray(Xdev, dtype=np.float64) Ydev = np.asarray(Ydev, dtype=np.float64) # Sizes batch_size = 64 input_height = 32 input_width = 64 nsample_lbls = 16 nsample_noise = 10 noise_size = 100 nlabels = Xtrain.shape[1] torch.manual_seed(5_465_462) # Construct the G and D models D = Discriminator().to(device=device, dtype=dtype) G = Generator(noise_size, vh).to(device=device, dtype=dtype) # The number of times entire dataset is trained nepochs = 500 # Learning rate lr_D = 1e-3 lr_G = 1e-3 decay_rate = 0.98 # Loss and optimizers criterion = nn.BCELoss().to(device=device) D_optimizer = optim.SGD(D.parameters(), lr=lr_D, momentum=0.5, nesterov=True) G_optimizer = optim.SGD(G.parameters(), lr=lr_G, momentum=0.5, nesterov=True) D_scheduler = optim.lr_scheduler.StepLR(D_optimizer, step_size=1, gamma=decay_rate) G_scheduler = optim.lr_scheduler.StepLR(G_optimizer, step_size=1, gamma=decay_rate) # Tensorboard writer writer = SummaryWriter() logdir = writer.file_writer.get_logdir() model_name = "CGAN" # Validation images, labels and noise xdev_sub = vh.tovar(Xdev[:nsample_lbls, :]) ydev_sub = vh.tovar(Ydev[:nsample_lbls, :]) valimgs = ydev_sub.view(nsample_lbls, -1, input_height, input_width) vallbl = xdev_sub.expand(input_height, input_width, nsample_lbls, nlabels).permute( 2, 3, 0, 1 ) grid = vutils.make_grid(valimgs, nrow=nsample_lbls, normalize=True, scale_each=True) writer.add_image("True PDF", grid, 0) fixed_noise = vh.tovar( torch.rand(nsample_noise, noise_size) .to(device=device) .repeat(1, nsample_lbls) .reshape(-1, noise_size) ) fixed_labels = xdev_sub.repeat(nsample_noise, 1) # Graphs in Tensorboard xdummy = vh.tovar(torch.rand(1, 1, input_height, input_width)) ldummy = vh.tovar(torch.rand(1, nlabels, input_height, input_width)) writer.add_graph(D, (xdummy, ldummy), verbose=False) writer.add_graph(G, (fixed_noise, fixed_labels), verbose=False) # Train the model nbatches = Xtrain.shape[0] // batch_size D.train() for epoch in range(nepochs): G.train() permutation = torch.randperm(Xtrain.shape[0]) for batch, i in enumerate(range(0, Xtrain.shape[0], batch_size)): # Global step step = epoch * nbatches + batch # Take a batch indices = permutation[i : i + batch_size] batch_x = vh.tovar(Xtrain[indices, :]) batch_y = vh.tovar(Ytrain[indices, :]) # Reshape these for the D network actual_batch_size = batch_x.shape[0] labels = batch_x.expand( input_height, input_width, actual_batch_size, nlabels ).permute(2, 3, 0, 1) imgs = batch_y.view(actual_batch_size, -1, input_height, input_width) noise = vh.tovar(torch.rand((actual_batch_size, noise_size))) # Real and fake labels real_label = vh.tovar(torch.ones(actual_batch_size, 1)) fake_label = vh.tovar(torch.zeros(actual_batch_size, 1)) # update the D network D_optimizer.zero_grad() D_real = D(imgs, labels) D_real_loss = criterion(D_real, real_label) G_ = G(noise, batch_x) D_fake = D(G_, labels) D_fake_loss = criterion(D_fake, fake_label) D_loss = D_real_loss + D_fake_loss writer.add_scalar("D_real_loss", D_real_loss.item(), step) writer.add_scalar("D_fake_loss", D_fake_loss.item(), step) writer.add_scalar("D_loss", D_loss.item(), step) D_loss.backward() D_optimizer.step() # update G network G_optimizer.zero_grad() G_ = G(noise, batch_x) D_fake = D(G_, labels) G_loss = criterion(D_fake, real_label) writer.add_scalar("G_loss", G_loss.item(), step) G_loss.backward() G_optimizer.step() if batch % 10 == 0: print( "Epoch [{0:d}/{1:d}], Batch [{2:d}/{3:d}], D_loss: {4:.4e}, G_loss: {5:.4e}".format( epoch + 1, nepochs, batch + 1, nbatches, D_loss.item(), G_loss.item(), ) ) # Adaptive time step G_scheduler.step() D_scheduler.step() for param_group in D_optimizer.param_groups: print("Current learning rate for discriminator:", param_group["lr"]) for param_group in G_optimizer.param_groups: print(" for generator:", param_group["lr"]) # Visualize results in Tensorboard G.eval() samples = G(fixed_noise, fixed_labels) grid = vutils.make_grid( samples, nrow=nsample_lbls, normalize=True, scale_each=True ) writer.add_image("Generator", grid, step) # Save the models torch.save(G.state_dict(), os.path.join(logdir, model_name + "_G.pkl")) torch.save(D.state_dict(), os.path.join(logdir, model_name + "_D.pkl")) writer.close() # Stuff we need to do to get plots... G.eval() mtrain = G.predict(Xtrain) mdev = G.predict(Xdev) # Summarize training summarize_training( Ytrain, mtrain, Ydev, mdev, os.path.join(logdir, model_name + ".log") ) return mtrain, mdev, G
5,350,275
def wheels( package_name: str = Argument(..., help="The name of the package to show wheel info for"), version: str = Argument( None, help="The version of the package to show info for, defaults to latest, can be omitted if using package_name==version", ), supported_only: bool = Option(False, help="Only show wheels supported on the current platform"), ): """See detailed information about all the wheels of a release of a package""" if not version and "==" in package_name: package_name, _, version = package_name.partition("==") url = f"{base_url}/pypi/{quote(package_name)}{f'/{quote(version)}' if version else ''}/json" with console.status("Getting data from PyPI"): response = session.get(url) if response.status_code != 200: if response.status_code == 404: rich.print("[red]:no_entry_sign: Project or version not found[/]") rich.print(f"[orange]:grey_exclamation: Some error occured. response code {response.status_code}[/]") raise typer.Exit() parsed_data = json.loads(response.text) from packaging.version import parse as parse_version # pylint: disable=import-outside-toplevel from rich.text import Text # pylint: disable=import-outside-toplevel # def is_wheel_supported(wheel_name): # try: # tag = parse_tag("-".join(wheel_name.split("-")[2:])) # except Exception as e: # return "white" # if not tag: # return "white" # else: # if list(tag)[-1] in sys_tags(): # return "green" # else: # return "red" data = parsed_data["urls"] from itertools import cycle # pylint: disable=import-outside-toplevel colors = cycle(["green", "blue", "magenta", "cyan", "yellow", "red"]) wheel_panels = [] if supported_only: from packaging.tags import parse_tag, sys_tags # pylint: disable=import-outside-toplevel from wheel_filename import InvalidFilenameError, parse_wheel_filename def is_wheel_supported(wheel): try: parsed_wheel_file = parse_wheel_filename(wheel["filename"]) except InvalidFilenameError: return True for tag in parsed_wheel_file.tag_triples(): if any(tag in sys_tags() for tag in list(parse_tag(tag))): return True return False data = filter(is_wheel_supported, data) from datetime import timezone # pylint: disable=import-outside-toplevel for wheel in data: wheel_name = Text(wheel["filename"]) # Maybe use the regex in https://github.com/jwodder/wheel-filename/blob/master/src/wheel_filename/__init__.py#L45-L53 wheel_name.highlight_regex( r"^(?P<distribution>\w+)-(?P<version>[A-Za-z0-9\.\-]+)(?P<build_tag>-\w{0,3})?-(?P<python_tag>[a-z]{2}[0-9]{0,3})-(?P<abi_tag>\w+)-(?P<platform_tag>.+)(?P<file_extension>\.whl)$", style_prefix="wheel.", ) wheel_panels.append( Panel( "\n".join( filter( None, [ f"[blue]Comment:[/] {wheel['comment_text']}" if wheel["comment_text"] else None, f"[magenta]Has Signature[/]: {wheel['has_sig']}", f"[cyan]Package Type:[/] {wheel['packagetype']}", f"[green]Requires Python:[/] {wheel['requires_python']}" if not wheel["requires_python"] is None else None, f"[yellow]Size:[/] {humanize.naturalsize(wheel['size'], binary=True)}", f"[bright_cyan]Yanked Reason[/]: {wheel['yanked_reason']}" if wheel["yanked"] else None, f"[red]Upload Time[/]: {humanize.naturaltime(utc_to_local(datetime.strptime(wheel['upload_time_iso_8601'], '%Y-%m-%dT%H:%M:%S.%fZ'), timezone.utc))}", ], ) ), title=f"[white]{wheel_name}[/]" if not wheel_name.plain.endswith(".whl") else wheel_name, border_style=next(colors), ) ) from rich.columns import Columns # pylint: disable=import-outside-toplevel console.print(Columns(wheel_panels))
5,350,276
def _list_of_files() -> List[str]: """ Return the list of waypoint story files :return: """ file_list = ( f for f in os.listdir(waypoint_directory_path) if f.endswith("." + "yml") ) waypoint_list_file = [] for file in file_list: if not file.startswith("_"): waypoint_list_file.append(file) return waypoint_list_file
5,350,277
def _parse_figsize(kwargs): """ Translate `figsize` into proplot-specific `figwidth` and `figheight` keys. """ # WARNING: Cannot have Figure.__init__() interpret figsize() because # the figure manager fills it with the matplotlib default. figsize = kwargs.pop('figsize', None) figwidth = kwargs.pop('figwidth', None) figheight = kwargs.pop('figheight', None) if figsize is not None: figsize_width, figsize_height = figsize figwidth = _not_none(figwidth=figwidth, figsize_width=figsize_width) figheight = _not_none(figheight=figheight, figsize_height=figsize_height) kwargs['figwidth'] = figwidth kwargs['figheight'] = figheight
5,350,278
def p_jump_statement(t): """ jump_statement : CONTINUE SEMI | BREAK SEMI | RETURN expression_opt SEMI """ pass
5,350,279
def extract_pvdata(h5file, timestamp, pvnames=None): """ Extract as a snapshot (PV values) nearest a timestamp from a BSA HDF5 file. Parameters ---------- h5file: str BSA HDF5 file with data that includes the timestamp timestamp: datetime-like, str, int, float This must be localized (not naive time). Returns ------- pvdata: dict Dict of pvname:value found_timestamp : pd.Timestamp The exact time that the data was tagged at See Also -------- bsa_snapshot """ timestamp = pd.Timestamp(timestamp).tz_convert('UTC') # Convert to UTC with h5py.File(h5file) as h5: # Use pandas to get the nearest time s = h5['secondsPastEpoch'][:, 0] ns = h5['nanoseconds'][:, 0] df = pd.DataFrame({'s':s, 'ns':ns}) df['time'] = pd.to_datetime(df['s'], unit='s', utc=True) + pd.to_timedelta(df['ns'], unit='nanoseconds') # Assure that the time is in here assert timestamp <= df.time.iloc[-1] assert timestamp >= df.time.iloc[0] # Search for the nearest time ix = df.time.searchsorted(timestamp) found_timestamp = df['time'].iloc[ix] # form snapshot dict pvdata = {} # Return everything if pvnames is None: pvnames = list(h5) for pvname in pvnames: if pvname in h5: pvdata[pvname] = np.squeeze(h5[pvname][ix]) else: pvdata[pvname] = None return pvdata, found_timestamp
5,350,280
def decorate_func_with_plugin_arg(f): """Decorate a function that takes a plugin as an argument. A "plugin" is a pair of simulation and postprocess plugins. The decorator expands this pair. """ @functools.wraps(f) def wrapper(self, plugins_tuple): return f(self, plugins_tuple[0], plugins_tuple[1]) return wrapper
5,350,281
def datestr(date=None): """Convert timestamps to strings in a predefined format """ if date is None: date = datetime.utcnow() if isinstance(date, str): date = parse_time(date) return date.strftime("%y-%m-%d %H:%M:%S")
5,350,282
def validate_release_tag_param(arg_value): """ User defined helper function to validate that the release_tag parameter follows the correct naming convention :param arg_value: release tag parameter passed through either the command line arguments :return: arg_value """ release_tag_regex = re.compile(r'[0-9]{4}Q[0-9]R[0-9]') if not re.match(release_tag_regex, arg_value): msg = f"Parameter ERROR {arg_value} is in an incorrect format, accepted: YYYYQ#R#" LOGGER.error(msg) raise argparse.ArgumentTypeError(msg) return arg_value
5,350,283
def init(): """Connect to the keyboard, switch all lights off""" global bufferC # Buffer with the full key/lights mapping global device device=hid.device() # 0x17cc: Native Instruments. 0x1410: KK S88 MK1 device.open(0x17cc, pid) device.write([0xa0]) bufferC = [0x00] * numkeys notes_off() return True
5,350,284
def render(path): """ Render the knowledge post with all the related formatting. """ mode = request.args.get('render', 'html') username, user_id = current_user.identifier, current_user.id tmpl = 'markdown-rendered.html' if mode == 'raw': tmpl = 'markdown-raw.html' elif mode == 'presentation': # TODO(dan?) fix presentation post # presentation_post = {} # presentation_post['authors_string'] = post.author_string # presentation_post['tldr'] = post.tldr # presentation_post['html'] = html # html = create_presentation_text(presentation_post) tmpl = "markdown-presentation.html" post = (db_session.query(Post) .filter(Post.path == path) .first()) if not post: knowledge_aliases = current_repo.config.aliases if path in knowledge_aliases: # TODO: reframe as redirect post = (db_session.query(Post) .filter(Post.path == knowledge_aliases[path]) .first()) # If post is None ... if not post: if not current_app.config.get('INDEXING_ENABLED', True): # ... and indexing is disabled... return _render_preview(path=path, tmpl=tmpl) # try rendering in preview mode else: # ...otherwise, raise exception that post wasn't found raise Exception(u"unable to find post at {}".format(path)) if post.contains_excluded_tag: # It's possible that someone gets a direct link to a post that has an excluded tag return render_template("error.html") if post.private and not (username in post.authors or username in current_repo.config.editors): allowed_users = set(user.id for group in post.groups for user in group.users) if user_id not in allowed_users: return render_template("permission_ask.html", authors=post.authors_string) rendered = render_post(post, with_toc=True) raw_post = render_post_raw(post) if mode == 'raw' else None comments = post.comments for comment in comments: author = db_session.query(User).filter(User.id == comment.user_id).first() if author is not None: comment.author = author.format_name else: comment.author = 'Anonymous' if mode != 'raw': comment.text = render_comment(comment) user_obj = current_user tags_list = [str(t.name) for t in post.tags] user_subscriptions = [str(s) for s in user_obj.subscriptions] is_author = user_id in [author.id for author in post.authors] web_editor_prefixes = current_app.config['WEB_EDITOR_PREFIXES'] is_webpost = False if web_editor_prefixes: is_webpost = any(prefix for prefix in web_editor_prefixes if path.startswith(prefix)) rendered = render_template(tmpl, html=rendered['html'], toc=rendered['toc'], post_id=post.id, post_path=path, raw_post=raw_post, comments=comments, username=username, post_author=post.authors_string, title=post.title, page_views=post.view_count, unique_views=post.view_user_count, likes=post.vote_counted_for_user(user_id=user_id), total_likes=post.vote_count, tags_list=tags_list, user_subscriptions=user_subscriptions, show_webeditor_button=is_webpost and is_author, webeditor_buttons=is_webpost, web_uri=post.kp.web_uri, table_id=None, is_private=(post.private == 1), is_author=is_author, can_download=permissions.post_download.can(), downloads=post.kp.src_paths) return rendered
5,350,285
def compute_acc_bin(conf_thresh_lower, conf_thresh_upper, conf, pred, true): """ # Computes accuracy and average confidence for bin Args: conf_thresh_lower (float): Lower Threshold of confidence interval conf_thresh_upper (float): Upper Threshold of confidence interval conf (numpy.ndarray): list of confidences pred (numpy.ndarray): list of predictions true (numpy.ndarray): list of true labels Returns: (accuracy, avg_conf, len_bin): accuracy of bin, confidence of bin and number of elements in bin. """ filtered_tuples = [x for x in zip(pred, true, conf) if x[2] > conf_thresh_lower and x[2] <= conf_thresh_upper] if len(filtered_tuples) < 1: return 0, 0, 0 else: correct = len([x for x in filtered_tuples if x[0] == x[1]]) # How many correct labels len_bin = len(filtered_tuples) # How many elements falls into given bin avg_conf = sum([x[2] for x in filtered_tuples]) / len_bin # Avg confidence of BIN accuracy = float(correct) / len_bin # accuracy of BIN return accuracy, avg_conf, len_bin
5,350,286
def list_consumers(ctx: click.Context, full_keys: bool, full_plugins: bool) -> None: """List all consumers along with relevant information.""" session = ctx.obj["session"] tablefmt = ctx.obj["tablefmt"] font = ctx.obj["font"] print_figlet("Consumers", font=font, width=160) consumers = get("consumers", lambda: general.all_of("consumers", session)) plugins = get("plugins", lambda: general.all_of("plugins", session)) acls = get("acls", lambda: general.all_of("acls", session)) basic_auths = get("basic-auth", lambda: general.all_of("basic-auths", session)) key_auths = get("key-auth", lambda: general.all_of("key-auths", session)) data = [] for c in consumers: cdata = { "id": c["id"], "custom_id": c.get("custom_id", ""), "username": c.get("username", ""), "acl_groups": set(), "plugins": [], "basic_auth": set(), "key_auth": set(), } for a in acls: substitude_ids(a) if c["id"] == a.get("consumer.id"): cdata["acl_groups"] |= {a["group"]} for p in plugins: substitude_ids(p) if c["id"] == p.get("consumer.id"): if full_plugins: cdata["plugins"] += [(p["name"], p["config"])] else: cdata["plugins"] += [p["name"]] for b in basic_auths: substitude_ids(b) if c["id"] == b.get("consumer.id"): cdata["basic_auth"] |= {f'{b["username"]}:xxx'} for k in key_auths: substitude_ids(k) if c["id"] == k.get("consumer.id"): key = k["key"] if not full_keys: key = f"{key[:6]}..." cdata["key_auth"] |= {key} cdata["acl_groups"] = "\n".join(sorted(cdata["acl_groups"])) if full_plugins: cdata["plugins"] = "\n".join( f"{name}:\n{json_pretty(p)}" for name, p in sorted(cdata["plugins"]) ) else: cdata["plugins"] = "\n".join(sorted(cdata["plugins"])) cdata["basic_auth"] = "\n".join(sorted(cdata["basic_auth"])) cdata["key_auth"] = "\n".join(sorted(cdata["key_auth"])) data.append(cdata) data.sort(key=lambda d: (len(d["custom_id"]), d["username"])) click.echo(tabulate(data, headers="keys", tablefmt=tablefmt))
5,350,287
def multi_backend_test(globals_dict, relative_module_name, backends=('jax', 'tensorflow'), test_case=None): """Multi-backend test decorator. The end goal of this decorator is that the decorated test case is removed, and replaced with a set of new test cases that have been rewritten to use one or more backends. E.g., a test case named `Test` will by default be rewritten to `Test_jax` and 'Test_tensorflow' which use the JAX and TensorFlow, respectively. The decorator works by using the dynamic rewrite system to rewrite imports of the module the test is defined in, and inserting the approriately renamed test cases into the `globals()` dictionary of the original module. A side-effect of this is that the global code inside the module is run `1 + len(backends)` times, so avoid doing anything expensive there. This does mean that the original module needs to be in a runnable state, i.e., when it uses symbols from `backend`, those must be actually present in the literal `backend` module. A subtle point about what this decorator does in the rewritten modules: the rewrite system changes the behavior of this decorator to act as a passthrough to avoid infinite rewriting loops. Args: globals_dict: Python dictionary of strings to symbols. Set this to the value of `globals()`. relative_module_name: Python string. The module name of the module where the decorated test resides relative to `fun_mc`. You must not use `__name__` for this as that is set to a defective value of `__main__` which is sufficiently abnormal that the rewrite system does not work on it. backends: Python iterable of strings. Which backends to test with. test_case: The actual test case to decorate. Returns: None, to delete the original test case. """ if test_case is None: return lambda test_case: multi_backend_test( # pylint: disable=g-long-lambda globals_dict=globals_dict, relative_module_name=relative_module_name, test_case=test_case) if BACKEND is not None: return test_case if relative_module_name == '__main__': raise ValueError( 'module_name should be written out manually, not by passing __name__.') # This assumes `test_util` is 1 levels deep inside of `fun_mc`. If we # move it, we'd change the `-1` to equal the (negative) nesting level. root_name_comps = __name__.split('.')[:-1] relative_module_name_comps = relative_module_name.split('.') # Register the rewrite hooks. importlib.import_module('.'.join(root_name_comps + ['backends', 'rewrite'])) new_test_case_names = [] for backend in backends: new_module_name_comps = ( root_name_comps + ['dynamic', 'backend_{}'.format(backend)] + relative_module_name_comps) # Rewrite the module. new_module = importlib.import_module('.'.join(new_module_name_comps)) # Subclass the test case so that we can rename it (absl uses the class name # in its UI). base_new_test = getattr(new_module, test_case.__name__) new_test = type('{}_{}'.format(test_case.__name__, backend), (base_new_test,), {}) new_test_case_names.append(new_test.__name__) globals_dict[new_test.__name__] = new_test # We deliberately return None to delete the original test case from the # original module.
5,350,288
def _get_typed_array(): """Generates a TypedArray constructor. There are nine types of TypedArrays and TypedArray has four constructors. Types: * Int8Array * Int16Array * Int32Array * Uint8Array * Uint16Array * Uint32Array * Uint8ClampedArray * Float32Array * Float64Array Constructors: * new TypedArray(length) * new TypedArray(typedArray) * new TypedArray(object) * new TypedArray(buffer) Returns: A string made up of a randomly chosen type and argument type from the lists above. """ array_type = random.choice([ 'Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Uint8ClampedArray', 'Float32Array', 'Float64Array' ]) # Choose an argument type at random. arguments = random.choice([ # length e.g. 293 # We choose 2**10 as the upper boundry because the max length allowed # by WebBluetooth is 2**10. lambda: utils.UniformExpoInteger(0, 10), # typedArray e.g. new Uint8Array([1,2,3]) _get_typed_array, # object e.g. [1,2,3] lambda: _get_array_of_random_ints(max_length=1000, max_value=2**64), # buffer e.g. new Uint8Array(10).buffer lambda: _get_typed_array() + '.buffer', ]) return 'new {array_type}({arguments})'.format( array_type=array_type, arguments=arguments())
5,350,289
def _is_valid_requirement(requirement: str) -> bool: """Returns True is the `requirement.txt` line is valid.""" is_invalid = ( not requirement or # Empty line requirement.startswith('#') or # Comment requirement.startswith('-r ') # Filter the `-r requirement.txt` ) return not is_invalid
5,350,290
def execute(cmd): """Execute a random string in the app context """ from kivy.clock import mainthread _result = [None] _event = threading.Event() @mainthread def _real_execute(): from kivy.app import App app = App.get_running_app() idmap = {"app": app} try: exec(cmd, idmap) except Exception as e: _result[:] = [u"{}".format(e)] _event.set() _real_execute() _event.wait() return _result[0]
5,350,291
def _election(__PIGS__): """ this method will perform an election after each revolution. *note:* as this is a communist module, the election is protected and only __PIGS__ have access to it. """ party = random.choices(__PIGS__, k=min(10, len(__PIGS__))) leader = random.choice(party) party.remove(leader) print( "Glorious election has been held with huge amount of participation (99.3%).", "\nSecretary General of the Party:", leader, "\nPeople's representatives:", party, )
5,350,292
def UprevVersionedPackage(input_proto, output_proto, _config): """Uprev a versioned package. See go/pupr-generator for details about this endpoint. """ chroot = controller_util.ParseChroot(input_proto.chroot) build_targets = controller_util.ParseBuildTargets(input_proto.build_targets) package = controller_util.PackageInfoToCPV(input_proto.package_info) refs = [] for ref in input_proto.versions: refs.append(GitRef(path=ref.repository, ref=ref.ref, revision=ref.revision)) try: result = packages.uprev_versioned_package(package, build_targets, refs, chroot) except packages.Error as e: # Handle module errors nicely, let everything else bubble up. cros_build_lib.Die(e) if not result.uprevved: # No uprevs executed, skip the output population. return for modified in result.modified: uprev_response = output_proto.responses.add() uprev_response.version = modified.new_version for path in modified.files: uprev_response.modified_ebuilds.add().path = path
5,350,293
def is_scalar(element): """An `is_atomic` criterion. Returns `True` for scalar elements. Scalar elements are : strings and any object that is not one of: collections.Sequence, collections.Mapping, set, or attrs object. ``` import nifty_nesting as nest flat = nest.flatten([1, [2, 3]], is_atomic=is_scalar) assert flat == [1, 2, 3] ``` Arguments: element: The element to check. Returns: `True` if the element is a scalar, else `False`. """ if isinstance(element, six.string_types): return True if is_attrs_object(element): return False if is_sequence(element) or is_set(element): return False if is_mapping(element): return False return True
5,350,294
def show_usage(): """ It prints usage information. """ print "Usage:" print " %s <network interface> <timeout in minutes>\n" % sys.argv[0] print "Example:" print " %s eth0 10\n" % sys.argv[0] print "Available Interfaces:" if platform.uname()[0].lower() == "windows": print_windows_network_interfaces() else: print_unix_network_interfaces()
5,350,295
def wrap_singleton_string(item: Union[Sequence, str]): """ Wrap a single string as a list. """ if isinstance(item, str): # Can't check if iterable, because a string is an iterable of # characters, which is not what we want. return [item] return item
5,350,296
def view_milestone_history(request, chosen_year=None): """ http://127.0.0.1:8000/milestones/by-columns/ :param request: :return: """ (chosen_year, basic_query) = get_basic_milestone_history_query(chosen_year) milestones = basic_query.order_by('due_on') open_closed_cnts = get_issue_counts_query_base().values('open_issues', 'closed_issues') num_open_issues = sum(x['open_issues'] for x in open_closed_cnts) num_closed_issues = sum( x['closed_issues'] for x in open_closed_cnts) mmo = MilestoneMonthOrganizer(milestones) #mmo.show() #return HttpResponse('ok') sorted_repos = mmo.get_sorted_repos() if sorted_repos and len(sorted_repos) > 0: last_retrieval_time = sorted_repos[0].last_retrieval_time else: last_retrieval_time = None d = {} d['page_title'] = 'Previous Milestones for %s' % chosen_year d['is_milestone_history_all'] = True d['chosen_year'] = chosen_year d['last_retrieval_time'] = last_retrieval_time d['sorted_repos'] = sorted_repos d['organized_months'] = mmo.get_organized_months(descending_order=True) d['NO_DUE_DATE'] = RepoMilestoneMonthsOrganizer.NO_DUE_DATE d['milestone_count'] = milestones.count() d['num_open_issues'] = num_open_issues d['num_closed_issues'] = num_closed_issues d['hide_description'] = True #print(d) return render_to_response('milestones/view_history_multi_column.html'\ , d\ , context_instance=RequestContext(request))
5,350,297
def M_absolute_bol(lum): """Computes the absolute bolometric luminosity Parameters ---------- lum : `float/array` luminosity in solar luminosities Returns ------- M_bol : `float/array` absolute bolometric magnitude """ log_lum = np.log10(lum) M_bol = 4.75 - 2.7 * log_lum return M_bol
5,350,298
def codes_index_get_double(indexid, key): # type: (cffi.FFI.CData, bytes) -> T.List[float] """ Get the list of double values associated to a key. The index must be created with such a key (possibly together with other keys). :param bytes key: the keyword whose list of values has to be retrieved :rtype: List(int) """ size = codes_index_get_size(indexid, key) values = ffi.new('double[]', size) size_p = ffi.new('size_t *', size) check_return(lib.codes_index_get_double)(indexid, key, values, size_p) return list(values)
5,350,299