content
stringlengths
22
815k
id
int64
0
4.91M
def encrypt(plaintext, a, b): """ 加密函数:E(x) = (ax + b)(mod m) m为编码系统中的字母数,一般为26 :param plaintext: :param a: :param b: :return: """ cipher = "" for i in plaintext: if not i.isalpha(): cipher += i else: n = "A" if i.isupper() else "a" cipher += chr((a * (ord(i) - ord(n)) + b) % 26 + ord(n)) return cipher
5,347,600
def threshold(alpha): """Only two-to-one mapping is supported. """ alpha[0] = min(1., max(0., (1. + alpha[0] - alpha[1]) * .5)) alpha[1] = 1. - alpha[0]
5,347,601
def construct_gn_command(output_path, gn_flags, python2_command=None, shell=False): """ Constructs and returns the GN command If shell is True, then a single string with shell-escaped arguments is returned If shell is False, then a list containing the command and arguments is returned """ gn_args_string = " ".join( [flag + "=" + value for flag, value in gn_flags.items()]) command_list = [str(pathlib.Path("tools", "gn", "bootstrap", "bootstrap.py")), "-v", "-s", "-o", str(output_path), "--gn-gen-args=" + gn_args_string] if python2_command: command_list.insert(0, python2_command) if shell: command_string = " ".join([shlex.quote(x) for x in command_list]) if python2_command: return command_string else: return os.path.join(".", command_string) else: return command_list
5,347,602
def reads_in_file(file_path): """ Find the number of reads in a file. Count number of lines with bash wc -l and divide by 4 if fastq, otherwise by 2 (fasta) """ return round(int(subprocess.check_output(["wc", "-l", file_path]).split()[0]) / (4 if bin_classify.format == "fastq" else 2))
5,347,603
def _url_as_filename(url: str) -> str: """Return a version of the url optimized for local development. If the url is a `file://` url, it will return the remaining part of the url so it can be used as a local file path. For example, 'file:///logs/example.txt' will be converted to '/logs/example.txt'. Parameters ---------- url: str The url to check and optaimize. Returns ------- str: The url converted to a filename. """ return url.replace('file://', '')
5,347,604
def hard_max(node: NodeWrapper, params: Dict[str, np.ndarray], xmap: Dict[str, XLayer]): """ ONNX Hardmax to XLayer AnyOp conversion function Input tensor shape: N dims Output tensor shape: 2D """ logger.info("ONNX Hardmax -> XLayer AnyOp") assert len(node.get_outputs()) == 1 name = node.get_outputs()[0] bottoms = node.get_inputs() node_attrs = node.get_attributes() iX = xmap[bottoms[0]] d = len(iX.shapes) axis = int(node_attrs['axis']) if 'axis' in node_attrs else 1 if axis < 0: axis = d + axis in_shape = iX.shapes.tolist() dim_0 = int(np.prod(in_shape[:axis])) dim_1 = int(np.prod(in_shape[axis:])) X = px.ops.any_op( op_name=px.stringify(name), in_xlayers=[iX], any_shape=[dim_0, dim_1], onnx_id=name ) return [X]
5,347,605
def sol_files_by_directory(target_path: AnyStr) -> List: """Gathers all the .sol files inside the target path including sub-directories and returns them as a List. Non .sol files are ignored. :param target_path: The directory to look for .sol files :return: """ return files_by_directory(target_path, ".sol")
5,347,606
def _call(sig, *inputs, **kwargs): """Adds a node calling a function. This adds a `call` op to the default graph that calls the function of signature `sig`, passing the tensors in `inputs` as arguments. It returns the outputs of the call, which are one or more tensors. `sig` is OpDefArg.a `_DefinedFunction` object. You can pass an optional keyword parameter `name=string` to name the added operation. You can pass an optional keyword parameter `noinline=True|False` to instruct the runtime not to inline the function body into the call site. Args: sig: OpDefArg. The signature of the function. *inputs: arguments to the function. **kwargs: Optional keyword arguments. Can only contain 'name' or 'noinline'. Returns: A 2-element tuple. First element: a Tensor if the function returns a single value; a list of Tensors if the function returns multiple value; the Operation if the function returns no values. Second element: the Operation. Raises: ValueError: if the arguments are invalid. """ if len(inputs) != len(sig.input_arg): raise ValueError("Expected number of arguments: %d, received: %d" % (len( sig.input_arg), len(inputs))) name = kwargs.pop("name", None) g = ops.get_default_graph() func_name = sig.name if name is None: name = func_name attrs = _parse_kwargs_as_attrs(func_name, **kwargs) output_types = [dtypes.DType(x.type) for x in sig.output_arg] op = g._create_op_internal( # pylint: disable=protected-access func_name, list(inputs), output_types, name=name, attrs=attrs, op_def=sig) if op.outputs: if len(op.outputs) == 1: ret = op.outputs[0] else: ret = tuple(op.outputs) else: ret = op return ret, op
5,347,607
def api_url_for(view_name, _absolute=False, _xml=False, *args, **kwargs): """Reverse URL lookup for API routes (that use the JSONRenderer or XMLRenderer). Takes the same arguments as Flask's url_for, with the addition of `_absolute`, which will make an absolute URL with the correct HTTP scheme based on whether the app is in debug mode. The _xml flag sets the renderer to use. """ renderer = 'XMLRenderer' if _xml else 'JSONRenderer' url = url_for('{0}__{1}'.format(renderer, view_name), *args, **kwargs) if _absolute: # We do NOT use the url_for's _external kwarg because app.config['SERVER_NAME'] alters # behavior in an unknown way (currently breaks tests). /sloria /jspies return urlparse.urljoin(website_settings.DOMAIN, url) return url
5,347,608
def cov(x, rowvar=False, bias=False, ddof=None, aweights=None): """Estimates covariance matrix like numpy.cov""" # ensure at least 2D if x.dim() == 1: x = x.view(-1, 1) # treat each column as a data point, each row as a variable if rowvar and x.shape[0] != 1: x = x.t() if ddof is None: if bias == 0: ddof = 1 else: ddof = 0 w = aweights if w is not None: if not torch.is_tensor(w): w = torch.tensor(w, dtype=torch.float) w_sum = torch.sum(w) avg = torch.sum(x * (w/w_sum)[:,None], 0) else: avg = torch.mean(x, 0) # Determine the normalization if w is None: fact = x.shape[0] - ddof elif ddof == 0: fact = w_sum elif aweights is None: fact = w_sum - ddof else: fact = w_sum - ddof * torch.sum(w * w) / w_sum xm = x.sub(avg.expand_as(x)) if w is None: X_T = xm.t() else: X_T = torch.mm(torch.diag(w), xm).t() c = torch.mm(X_T, xm) c = c / fact return c.squeeze()
5,347,609
def threadsafe_generator(f): """A decorator that takes a generator function and makes it thread-safe. Args: f(function): Generator function Returns: None """ def g(*args, **kwargs): """ Args: *args(list): List of non-key worded,variable length arguments. **kwargs(dict): List of key-worded,variable length arguments. Returns: function: The thread-safe function. """ return threadsafe_iter_3(f(*args, **kwargs)) return g
5,347,610
def load_many_problems(file, collection): """Given a ZIP file containing several ZIP files (each one a problem), insert the problems into collection""" problems = list() try: with ZipFile(file) as zfile: for filename in zfile.infolist(): with zfile.open(filename) as curr_file: problem = load_problem_from_file(curr_file) problem.collection = collection problem.author = collection.author problems.append(problem) except ZipFileParsingException as excp: raise ZipFileParsingException('{}: {}'.format(filename.filename, excp)) from excp except Exception as excp: raise ZipFileParsingException("{}: {}".format(type(excp), excp)) from excp return problems
5,347,611
def CheckStructuralModelsValid(rootGroup, xyzGridSize=None, verbose=False): """ **CheckStricturalModelsValid** - Checks for valid structural model group data given a netCDF root node Parameters ---------- rootGroup: netCDF4.Group The root group node of a Loop Project File xyzGridSize: [int,int,int] or None The 3D grid shape to test data in this node to adhere to verbose: bool A flag to indicate a higher level of console logging (more if True) Returns ------- bool True if valid structural model data in project file, False otherwise. """ valid = True if "StructuralModels" in rootGroup.groups: if verbose: print(" Structural Models Group Present") smGroup = rootGroup.groups.get("StructuralModels") # if verbose: print(smGroup) if "easting" in smGroup.ncattrs() and "northing" in smGroup.ncattrs() and "depth" in smGroup.ncattrs(): if xyzGridSize != None: # Check gridSize from extents matches models sizes smGridSize = [smGroup.dimensions["easting"].size,smGroup.dimensions["northing"].size,smGroup.dimensions["depth"].size] if smGridSize != xyzGridSize: print("(INVALID) Extents grid size and Structural Models Grid Size do NOT match") print("(INVALID) Extents Grid Size : ", xyzGridSize) print("(INVALID) Structural Models Grid Size : ", smGridSize) valid = False else: if verbose: print(" Structural Models grid size adheres to extents") else: if verbose: print("No structural models extents in project file") else: if verbose: print("No Structural Models Group Present") return valid
5,347,612
def get_load_balancers(): """ Return all load balancers. :return: List of load balancers. :rtype: list """ return elbv2_client.describe_load_balancers()["LoadBalancers"]
5,347,613
def init_app(app): """Makes sure the db gets closed and init-db command works. :param type app: flask.Flask. """ app.teardown_appcontext(close_db) app.cli.add_command(init_db_command)
5,347,614
def get_datasets(folder): """ Returns a dictionary of dataset-ID: dataset directory paths """ paths = glob(f"{folder}/*") return {os.path.split(p)[-1]: p for p in paths if os.path.isdir(p)}
5,347,615
def create_dir(path): """ This routine creates directories. """ try: os.mkdir(path) except OSError: print ("Creation of the directory %s failed" % path) else: print ("Successfully created the directory %s " % path)
5,347,616
def file_preview(request): """ Live preview of restructuredtext payload - currently not wired up """ f = File( heading=request.POST['heading'], content=request.POST['content'], ) rendered_base = render_to_string('projects/doc_file.rst.html', {'file': f}) rendered = restructuredtext(rendered_base) json_response = simplejson.dumps({'payload': rendered}) return HttpResponse(json_response, mimetype='text/javascript')
5,347,617
def makeMask(n): """ return a mask of n bits as a long integer """ return (long(2) << n - 1) - 1
5,347,618
def link_name_to_index(model): """ Generate a dictionary for link names and their indicies in the model. """ return { link.name : index for index, link in enumerate(model.links) }
5,347,619
def create_audio_dataset( dataset_path: str, dataset_len=100, **kwargs ) -> pd.DataFrame: """ Creates audio dataset from file structure. Args: playlist_dir: Playlist directory path. # TODO dataset_len (optional): Number of audio files to include. Returns: df: Compiled dataframe representing this dataset. """ # dir_dict = _crawl_dir(dataset_path) num_songs = 0 song_names = [] songs = [] break_outer = False for root, dirs, files in ( dir_iterator := tqdm(os.walk(dataset_path), leave=False) ): if break_outer: dir_iterator.close() break rel_root = root.replace(dataset_path, "", 1) for file in tqdm(files, leave=False): if num_songs >= dataset_len: break_outer = True continue song_name = file if rel_root != "": song_name = f"{rel_root}/{song_name}" song_names.append(song_name) try: songs.append( create_audio_datum(f"{root}/{file}", file, **kwargs) ) except NoBackendError: song_names.pop() continue num_songs += 1 data = { "index": song_names, "columns": ["sampling_rate", "time_signal", "stft"], "data": songs, "index_names": ["songs"], "column_names": ["audio components"], } return pd.DataFrame.from_dict(data, orient="tight") # make into correct df? # for file in os.listdir(dataset_path): # if os.path.isdir(file): # _create_audio_dataset( # file, dataset_len=dataset_len-file_count, **kwargs # ) # else: # create_audio_datum(file, **kwargs) # file_count += 1 # songs = os.listdir(playlist_dir)[:dataset_len] # if ".DS_Store" in songs: # songs.remove(".DS_Store") # songs = [song for song in songs if ".json" not in song] # df_structure = dict(zip(songs, [None] * len(songs))) # for song_name in df_structure.keys(): # song_path = playlist_dir / song_name # if os.path.isdir(song_path): # components = os.listdir(song_path) # else: # components = [song_name] # df_structure[song_name] = {} # df_structure[song_name]["time_signals"] = dict(zip(components, [None] * len(components))) # df_structure[song_name]["stfts"] = dict(zip(components, [None] * len(components))) # df_structure[song_name]["sampling_rate"] = None # df = pd.DataFrame(df_structure) # for song_name, song_data in tqdm(df_structure.items()): # sr = None # song_path = playlist_dir / song_name # for component in song_data["time_signals"]: # if component == song_name: # filepath = song_path # else: # filepath = song_path / component # df[song_name]["time_signals"][component], sr_tmp = librosa.load( # filepath, sr=None # ) # # assumes all songs at same sampling rate # assert(not sr or sr == sr_tmp) # sr = sr_tmp # df[song_name]["sampling_rate"] = sr # # calculate STFTs # for key in tqdm(songs): # song = df[key] # for component, data in tqdm(song["time_signals"].items(), leave=False): # X = librosa.stft(data, **kwargs) # song["stfts"][component] = X # return df
5,347,620
def _run_plugins(st, make_all=False, run_id=test_run_id_nT, **proces_kwargs): """ Try all plugins (except the DAQReader) for a given context (st) to see if we can really push some (empty) data from it and don't have any nasty problems like that we are referring to some non existant dali folder. """ with tempfile.TemporaryDirectory() as temp_dir: st.storage = [strax.DataDirectory(temp_dir)] # As we use a temporary directory we should have a clean start assert not st.is_stored(run_id, 'raw_records'), 'have RR???' # Create event info target = 'event_info' st.make(run_id=run_id, targets=target, **proces_kwargs) # The stuff should be there assert st.is_stored(run_id, target), f'Could not make {target}' if not make_all: return end_targets = set(st._get_end_targets(st._plugin_class_registry)) for p in end_targets-set(forbidden_plugins): st.make(run_id, p) # Now make sure we can get some data for all plugins all_datatypes = set(st._plugin_class_registry.keys()) for p in all_datatypes-set(forbidden_plugins): should_be_stored = (st._plugin_class_registry[p].save_when == strax.SaveWhen.ALWAYS) if should_be_stored: is_stored = st.is_stored(run_id, p) assert is_stored, f"{p} did not save correctly!" print("Wonderful all plugins work (= at least they don't fail), bye bye")
5,347,621
def unzip_file(file_to_unzip, destination_to_unzip="unzip_apk"): """ Extract all directories in the zip to the destination. :param str file_to_unzip: :param str destination_to_unzip: """ if not os.path.isdir(destination_to_unzip): os.makedirs(destination_to_unzip) try: zipped_apk = zipfile.ZipFile(file_to_unzip, "r") zipped_apk.extractall(path=destination_to_unzip) except Exception: log.exception("Failed to extract zipped APK from %s to %s", file_to_unzip, destination_to_unzip) raise SystemExit("Failed to extract zipped APK")
5,347,622
def photos_page(): """ Example view demonstrating rendering a simple HTML page. """ context = make_context() with open('data/featured.json') as f: context['featured'] = json.load(f) return make_response(render_template('photos.html', **context))
5,347,623
def get_user_by_id(current_user, uid): """ Получение одного пользователя по id в json""" try: user_schema = CmsUsersSchema(exclude=['password']) user = CmsUsers.query.get(uid) udata = user_schema.dump(user) response = Response( response=json.dumps(udata.data), status=200, mimetype='application/json' ) except Exception: response = server_error(request.args.get("dbg")) return response
5,347,624
def update_bond_lists_mpi(bond_matrix, comm, size, rank): """ update_bond_lists(bond_matrix) Return atom indicies of angular terms """ N = bond_matrix.shape[0] "Get indicies of bonded beads" bond_index_full = np.argwhere(bond_matrix) "Create index lists for referring to in 2D arrays" indices_full = create_index(bond_index_full) angle_indices = [] angle_bond_indices = [] "Count number of unique bonds" count = np.unique(bond_index_full.T[0]).shape[0] """ "Find indicies of ends of fibrils" fib_end_check = np.argwhere(np.sum(bond_matrix, axis=1) <= 1) n_fib_end = fib_end_check.shape[0] fib_end_check_ind = np.tile(fib_end_check, n_fib_end) fib_end_check_ind = np.stack((fib_end_check_ind, fib_end_check_ind.T), axis=2) fib_end_check_ind = create_index(fib_end_check_ind[np.where(~np.eye(n_fib_end,dtype=bool))]) fib_end = np.zeros(bond_matrix.shape) fib_end[fib_end_check_ind] += 1 """ for n in range(N): slice_full = np.argwhere(bond_index_full.T[0] == n) if slice_full.shape[0] > 1: angle_indices.append(np.unique(bond_index_full[slice_full].flatten())) angle_bond_indices.append(bond_index_full[slice_full][::-1]) bond_indices = np.nonzero(np.array_split(bond_matrix, size)[rank]) angle_indices = np.array_split(angle_indices, size)[rank] angle_bond_indices = create_index(np.array_split(angle_bond_indices, size)[rank].reshape((2 * len(angle_indices), 2))) return bond_indices, angle_indices, angle_bond_indices
5,347,625
def RepoRegion(args, cluster_location=None): """Returns the region for the Artifact Registry repo. The intended behavior is platform-specific: * managed: Same region as the service (run/region or --region) * gke: Appropriate region based on cluster zone (cluster_location arg) * kubernetes: The run/region config value will be used or an exception raised when unset. Args: args: Namespace, the args namespace. cluster_location: The zone which a Cloud Run for Anthos cluster resides. When specified, this will result in the region for this zone being returned. Returns: The appropriate region for the repository. """ if cluster_location: return _RegionFromZone(cluster_location) region = flags.GetRegion(args, prompt=False) if region: return region raise exceptions.ArgumentError( 'To deploy from source with this platform, you must set run/region via ' '"gcloud config set run/region REGION".')
5,347,626
def html(ctx, **kwargs): """Formats meeting HTML""" ensure_config(ctx.obj) ctx.obj.update(kwargs) args = AttrDict(ctx.obj) context = Context(args) context.prerender() content = context.render() logger.info(f'Generated {len(content)//1000}KB of HTML content') logger.info(f'Total meetings renderd: {len(context["meetings"])}') outfile = args.output if outfile is None: outfile = open(context['now'].strftime('%B %Y Directory.html'), 'w') elif outfile == '-': outfile = sys.stdout.buffer else: outfile = open(outfile, 'w') outfile.write(content) outfile.close() logger.info(f'Wrote to {outfile.name}')
5,347,627
def install_nightly_packs(client: demisto_client, host: str, packs_to_install: List, request_timeout: int = 999999): """ Install content packs on nightly build. We will catch the exception if pack fails to install and send the request to install packs again without the corrupted pack. Args: client(demisto_client): The configured client to use. host (str): The server URL. packs_to_install (list): A list of the packs to install. request_timeout (int): Timeout settings for the installation request. Returns: None: No data returned. """ logging.info(f'Installing packs on server {host}') # make the pack installation request all_packs_install_successfully = False request_data = { 'packs': packs_to_install, 'ignoreWarnings': True } while not all_packs_install_successfully: try: packs_to_install_str = ', '.join([pack['id'] for pack in packs_to_install]) logging.debug(f'Installing the following packs in server {host}:\n{packs_to_install_str}') response_data, status_code, _ = demisto_client.generic_request_func(client, path='/contentpacks/marketplace/install', method='POST', body=request_data, accept='application/json', _request_timeout=request_timeout) if 200 <= status_code < 300: packs_data = [{'ID': pack.get('id'), 'CurrentVersion': pack.get('currentVersion')} for pack in ast.literal_eval(response_data)] logging.success(f'Packs were successfully installed on server {host}') logging.debug(f'The following packs were successfully installed on server {host}:\n{packs_data}') else: result_object = ast.literal_eval(response_data) message = result_object.get('message', '') raise Exception(f'Failed to install packs on server {host}- with status code {status_code}\n{message}\n') break except Exception as e: all_packs_install_successfully = False malformed_pack_id = find_malformed_pack_id(str(e)) if not malformed_pack_id: logging.exception('The request to install packs has failed') raise pack_ids_to_install = {pack['id'] for pack in packs_to_install} malformed_pack_id = malformed_pack_id[0] if malformed_pack_id not in pack_ids_to_install: logging.exception( f'The pack {malformed_pack_id} has failed to install even though it was not in the installation list') raise logging.warning(f'The request to install packs on server {host} has failed, retrying without {malformed_pack_id}') # Remove the malformed pack from the pack to install list. packs_to_install = [pack for pack in packs_to_install if pack['id'] not in malformed_pack_id] request_data = { 'packs': packs_to_install, 'ignoreWarnings': True }
5,347,628
def compile(expr: ibis.Expr, params=None): """Compile a given expression. Note you can also call expr.compile(). Parameters ---------- expr : ibis.Expr params : dict Returns ------- compiled : string """ from ibis.omniscidb.compiler import to_sql return to_sql(expr, dialect.make_context(params=params))
5,347,629
def get_border(border, size): """ Get border """ i = 1 while size - border // i <= border // i: # size > 2 * (border // i) i *= 2 return border // i
5,347,630
def load_normalized_data(file_path, log1p=True): """load normalized data 1. Load filtered data for both FACS and droplet 2. Size factor normalization to counts per 10 thousand 3. log(x+1) transform 4. Combine the data Args: file_path (str): file path. Returns: adata_combine (AnnData): Combined data for FACS and droplet """ # Load filtered data # adata_facs = read_h5ad(f'{file_path}/facs_filtered.h5ad') adata_facs = read_h5ad(f'{file_path}/facs_filtered_reannotated-except-for-marrow-lung-kidney.h5ad') adata_droplet = read_h5ad(f'{file_path}/droplet_filtered.h5ad') # Size factor normalization sc.pp.normalize_per_cell(adata_facs, counts_per_cell_after=1e4) sc.pp.normalize_per_cell(adata_droplet, counts_per_cell_after=1e4) # log(x+1) transform if log1p: sc.pp.log1p(adata_facs) sc.pp.log1p(adata_droplet) # Combine the data ind_select = adata_facs.obs['age'].isin(['3m', '18m', '24m']) adata_facs = adata_facs[ind_select,] adata_combine = AnnData.concatenate(adata_facs, adata_droplet, batch_key='b_method', batch_categories = ['facs','droplet']) return adata_combine
5,347,631
def from_copy_number( model: cobra.Model, index: pd.Series, cell_copies: pd.Series, stdev: pd.Series, vol: float, dens: float, water: float, ) -> cobra.Model: """Convert `cell_copies` to mmol/gDW and apply them to `model`. Parameters ---------- model: cobra.Model cobra or geckopy Model (will be converted to geckopy.Model). It is NOT modified inplace. index: pd.Series uniprot IDs cell_copies: pd.Series cell copies/ cell per proteins stdev: pd.Series standard deviation of the cell copies vol: float cell volume dens: float cell density water: float water content fraction (0-1) Returns ------- geckopy.Model with the proteomics constraints applied """ df = pd.DataFrame({"cell_copies": cell_copies, "CV": stdev}) # from molecules/cell to mmol/gDW df["copies_upper"] = df["cell_copies"] + 0.5 * df["CV"] / 100 * df["cell_copies"] df["mmol_per_cell"] = df["copies_upper"] / 6.022e21 proteomics = df["mmol_per_cell"] / (vol * dens * water) proteomics.index = index return from_mmol_gDW(model, proteomics)
5,347,632
def test_exclude_crds_mask_pix(): """Test that bad pixel images are differentiated correctly """ common_bad = ([0, 1, 2, 3, 4], [0, 1, 2, 3, 4]) bad1_only = ([0, 1, 3, 4], [4, 3, 1, 0]) bad2_only = ([3, 3, 3, 3], [0, 1, 2, 4]) bad1 = np.zeros((5, 5), dtype=np.uint8) bad1[common_bad] = 1 bad1[bad1_only] = 2 bad2 = np.zeros((5, 5), dtype=np.uint8) bad2[common_bad] = 1 bad2[bad2_only] = 4 # Create a mask to help with indexing mask = np.zeros(bad1.shape, dtype=bool) mask[bad1_only] = True diff = bad_pixel_monitor.exclude_crds_mask_pix(bad1, bad2) assert np.all(diff[mask] == 2) assert np.all(diff[~mask] == 0) # Test the reverse case mask = np.zeros(bad2.shape, dtype=bool) mask[bad2_only] = True diff = bad_pixel_monitor.exclude_crds_mask_pix(bad2, bad1) assert np.all(diff[mask] == 4) assert np.all(diff[~mask] == 0)
5,347,633
def getstatusoutput(cmd): """Return (exitcode, output) of executing cmd in a shell. Execute the string 'cmd' in a shell with 'check_output' and return a 2-tuple (status, output). The locale encoding is used to decode the output and process newlines. A trailing newline is stripped from the output. The exit status for the command can be interpreted according to the rules for the function 'wait'. Example: >>> import subprocess >>> subprocess.getstatusoutput('ls /bin/ls') (0, '/bin/ls') >>> subprocess.getstatusoutput('cat /bin/junk') (1, 'cat: /bin/junk: No such file or directory') >>> subprocess.getstatusoutput('/bin/junk') (127, 'sh: /bin/junk: not found') >>> subprocess.getstatusoutput('/bin/kill $$') (-15, '') """ try: data = check_output(cmd, shell=True, text=True, stderr=STDOUT) exitcode = 0 except CalledProcessError as ex: data = ex.output exitcode = ex.returncode if data[-1:] == '\n': data = data[:-1] return exitcode, data
5,347,634
def _get_values(attribute, text): """Match attribute in text and return all matches. :returns: List of matches. """ regex = '{}\s+=\s+"(.*)";'.format(attribute) regex = re.compile(regex) values = regex.findall(text) return values
5,347,635
def time_axis(tpp=20e-9, length=20_000) -> np.ndarray: """Return the time axis used in experiments. """ ts = tpp * np.arange(length) ten_percent_point = np.floor(length / 10) * tpp ts -= ten_percent_point ts *= 1e6 # convert from seconds to microseconds return ts
5,347,636
def _has_perm(user, ctnr, action, obj=None, obj_class=None): """ Checks whether a user (``request.user``) has permission to act on a given object (``obj``) within the current session CTNR. Permissions will depend on whether the object is within the user's current CTNR and the user's permissions level within that CTNR. Plebs are people that don't have any permissions except for dynamic registrations. Guests of a CTNR have view access to all objects within the current CTNR. Users have full access to objects within the current CTNR, except for exceptional types of objects (domains, SOAs) and the CTNR itself. CTNR admins are like users except they can modify the CTNR itself and assign permissions to other users. Cyder admins are CTNR admins to every CTNR. Though the object has to be within the CURRENT CTNR for permissions to be granted, for purposes of encapsulation. Superusers (Uber-admins/Elders) have complete access to everything including the ability to create top-level domains, SOAs, and global DHCP objects. Plebs are not assigned to any CTNR. CTNR Guests have level 0 to a CTNR. CTNR Users have level 1 to a CTNR. CTNR Admins have level 2 to a CTNR. Cyder Admins have level 2 to the 'global' CTNR (``pk=1``). Superusers are Django superusers. :param request: A django request object. :type request: :class:`request` :param obj: The object being tested for permission. :type obj: :class:`object` :param action: ``0`` (view), ``1`` (create), ``2`` (update), ``3`` (delete) :type action: :class: `int` An example of checking whether a user has 'create' permission on a :class:`Domain` object. >>> perm = request.user.get_profile().has_perm(request, \'create\', ... obj_class=Domain) >>> perm = request.user.get_profile().has_perm(request, \'update\', ... obj=domain) """ from cyder.core.ctnr.models import CtnrUser user_level = None if user.is_superuser: return True ctnr_level = -1 assert LEVEL_ADMIN > LEVEL_USER > LEVEL_GUEST > ctnr_level if obj: ctnr = None ctnrs = None if hasattr(obj, "get_ctnrs"): try: ctnrs = obj.get_ctnrs() except TypeError: pass if ctnrs is not None: for c in ctnrs: try: level = CtnrUser.objects.get(ctnr=c, user=user).level except CtnrUser.DoesNotExist: continue if level > ctnr_level: ctnr_level = level ctnr = c if ctnr_level == LEVEL_ADMIN: break elif ctnr and user and not obj: try: ctnr_level = CtnrUser.objects.get(ctnr=ctnr, user=user).level except CtnrUser.DoesNotExist: pass if obj and ctnr and not ctnr.check_contains_obj(obj): return False # Get user level. is_ctnr_admin = ctnr_level == LEVEL_ADMIN is_ctnr_user = ctnr_level == LEVEL_USER is_ctnr_guest = ctnr_level == LEVEL_GUEST try: cyder_level = CtnrUser.objects.get(ctnr=1, user=user).level except CtnrUser.DoesNotExist: cyder_level = -1 is_cyder_admin = cyder_level == LEVEL_ADMIN is_cyder_guest = CtnrUser.objects.filter(user=user).exists() if is_cyder_admin: user_level = 'cyder_admin' elif is_ctnr_admin: user_level = 'ctnr_admin' elif is_ctnr_user: user_level = 'ctnr_user' elif is_ctnr_guest: user_level = 'ctnr_guest' elif is_cyder_guest: user_level = 'cyder_guest' else: user_level = 'pleb' # Dispatch to appropriate permissions handler. if obj: obj_type = obj.__class__.__name__ elif obj_class: if isinstance(obj_class, basestring): obj_type = str(obj_class) else: obj_type = obj_class.__name__ else: return False if (obj_type and obj_type.endswith('AV') and obj_type != 'WorkgroupAV'): obj_type = obj_type[:-len('AV')] handling_functions = { # Administrative. 'Ctnr': has_administrative_perm, 'User': has_administrative_perm, 'UserProfile': has_administrative_perm, 'CtnrUser': has_ctnr_user_perm, 'CtnrObject': has_ctnr_object_perm, 'SOA': has_soa_perm, 'Domain': has_domain_perm, # Domain records. 'AddressRecord': has_domain_record_perm, 'CNAME': has_domain_record_perm, 'MX': has_domain_record_perm, 'Nameserver': has_name_server_perm, 'SRV': has_domain_record_perm, 'SSHFP': has_domain_record_perm, 'TXT': has_domain_record_perm, 'PTR': has_reverse_domain_record_perm, # DHCP. 'Network': has_network_perm, 'Range': has_range_perm, 'Site': has_site_perm, 'System': has_system_perm, 'Vlan': has_vlan_perm, 'Vrf': has_vrf_perm, 'Workgroup': has_workgroup_perm, 'StaticInterface': has_static_registration_perm, 'DynamicInterface': has_dynamic_registration_perm, 'Supernet': has_supernet_perm, 'WorkgroupAV': has_workgroupav_perm, 'Token': has_token_perm } handling_function = handling_functions.get(obj_type, None) if not handling_function: if '_' in obj_type: obj_type = obj_type.replace('_', '') if 'Intr' in obj_type: obj_type = obj_type.replace('Intr', 'interface') for key in handling_functions.keys(): if obj_type.lower() == key.lower(): handling_function = handling_functions[key] if handling_function: return handling_function(user_level, obj, ctnr, action) else: raise Exception('No handling function for {0}'.format(obj_type))
5,347,637
def log_level(bot: Bot, event: Event, irc: connection_wrapper, args: List[str]): """<level> Changes the logging level""" try: log.setLevel(getattr(log, args[0].upper())) irc.reply(event, f"Set log level to {args[0]}") except AttributeError: irc.reply(event, f"Invalid log level {args}")
5,347,638
def read_chunk(file: File, size: int=400) -> bytes: """ Reads first [size] chunks from file, size defaults to 400 """ file = _path.join(file.root, file.name) # get full path of file with open(file, 'rb') as file: # read chunk size chunk = file.read(size) return chunk
5,347,639
def plot_tensorflow_log(args): """ Plot data from tensorboard event file. """ # Loading too much data is slow... tf_size_guidance = {'scalars': args.num_load} event_acc = EventAccumulator(args.log_name, tf_size_guidance) event_acc.Reload() assert event_acc.Tags()["scalars"] != [], "Did you give the log file?" fig1 = plt.figure() ax1 = fig1.add_subplot(1, 1, 1) loss_content = event_acc.Scalars('loss_content') loss_style = event_acc.Scalars('loss_style') steps = args.num_print if args.num_print != 0 else len(loss_content) x = np.zeros(steps, ) y = np.zeros([steps,2]) for i in range(steps): x[i] = loss_content[i][1] #step y[i, 0] = loss_style[i][2] y[i, 1] = loss_content[i][2] # value if args.trend: y_trend = np.zeros([steps,2]) for i in range(steps): if i < 1000: y_trend[i, 0] = np.mean(y[:i+1, 0]) y_trend[i, 1] = np.mean(y[:i+1, 1]) elif i > steps-1000: y_trend[i, 0] = np.mean(y[i-1000:, 0]) y_trend[i, 1] = np.mean(y[i-1000:, 1]) else: y_trend[i, 0] = np.mean(y[i-1000:i+1000, 0]) y_trend[i, 1] = np.mean(y[i-1000:i+1000, 1]) ax1.plot(x, y[:,0], label='loss_style', color="tab:orange") ax1.plot(x, y[:,1], label='loss_content', color="tab:blue") ax1.plot(x, y_trend[:, 0], label='loss_style_trend', color="red") ax1.plot(x, y_trend[:, 1], label='loss_content_trend', color="blue") print("Final content loss: %.3f style loss: %.3f"%(np.mean(y[-1000:,0]),np.mean(y[-1000:,1]))) ax1.set_xlabel("Iter") ax1.set_ylabel("loss") ax1.set_ylim([0.1,1000]) ax1.set_title("Training Progress") ax1.legend(loc='upper right', frameon=True) if not args.linear: ax1.set_yscale("log") if args.save: if not os.path.exists(args.save_dir): os.mkdir(args.save_dir) plt.savefig('{:s}/losses_{:s}.eps'.format(args.save_dir, args.log_name[-17:])) else: plt.show()
5,347,640
def parse_input(file_path): """ Turn an input file of newline-separate bitrate samples into input and label arrays. An input file line should look like this: 4983 1008073 1591538 704983 1008073 1008073 704983 Adjacent duplicate entries will be removed and lines with less than two samples will be filtered out. @return a tuple of the x, x sequence length, and y arrays parsed from the input file """ bitrate_inputs = [] inputs_length = [] bitrate_labels = [] with open(file_path, 'r') as file: for line in file: samples = map(lambda x: [float(x) * bps_to_MBps], line.strip().split(' '))[0:MAX_SAMPLES + 1] if (len(samples) < 2): # skip lines without enough samples continue bitrate_labels.append(samples.pop()) inputs_length.append(len(samples)) samples += [[-1] for i in range(MAX_SAMPLES - len(samples))] bitrate_inputs += [samples] return bitrate_inputs, inputs_length, bitrate_labels
5,347,641
def read(rel_path): """ Docstring """ here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, rel_path), 'r') as fp: return fp.read()
5,347,642
def main(argv=None): """Main command line interface.""" if argv is None: argv = sys.argv[1:] cli = CommandLineTool() try: return cli.run(argv) except KeyboardInterrupt: print('Canceled') return 3
5,347,643
def extract_attrs_for_lowering(mod: nn.Module) -> Dict[str, Any]: """If `mod` is in `module_fetch_book`, fetch the mod's attributes that in the `module_fetch_book` after checking module's version is compatible with the `module_fetch_book`. """ attrs_for_lowering: Dict[str, Any] = {} attrs_for_lowering["name"] = torch.typename(mod) if type(mod) in module_fetch_book: version, param_to_fetch, matching_method = module_fetch_book[type(mod)] if version < mod._version: raise RuntimeError(f"Fetcher version {version} try to fetch {torch.typename(mod)} version {mod._version}, " "please upgrade the module_fetch_book, open an issue and @842974287 " "or report a bug to AIACC team directly.") for attr in param_to_fetch: attrs_for_lowering[attr] = getattr(mod, matching_method(attr, mod._version)) else: raise RuntimeError(f"{torch.typename(mod)} is not in the module_fetch_book yet, " "please add it to the module_fetch_book, open an issue and @842974287 " "or report a bug to AIACC team directly.") return attrs_for_lowering
5,347,644
def get_filter_fields(target: str, data_registry_url: str, token: str) -> Set[str]: """ Returns a list of filterable fields from a target end point by calling OPTIONS :param target: target end point of the data registry :param data_registry_url: the url of the data registry :param token: personal access token :return: the set of filterable fields on this target end point """ end_point = get_end_point(data_registry_url, target) result = requests.options(end_point, headers=get_headers(token)) result.raise_for_status() options = result.json() return set(options.get("filter_fields", []))
5,347,645
def cli_invitation(): """`crcr invitation`の起点.""" pass
5,347,646
def azimuthalAverage(image, center=None): """ Calculate the azimuthally averaged radial profile. image - The 2D image center - The [x,y] pixel coordinates used as the center. The default is None, which then uses the center of the image (including fracitonal pixels). http://www.astrobetter.com/blog/2010/03/03/fourier-transforms-of-images-in-python/ v0.1 """ # Calculate the indices from the image y, x = np.indices(image.shape) if not center: center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0]) r = np.hypot(x - center[0], y - center[1]) # Get sorted radii ind = np.argsort(r.flat) r_sorted = r.flat[ind] i_sorted = image.flat[ind] # Get the integer part of the radii (bin size = 1) r_int = r_sorted.astype(int) # Find all pixels that fall within each radial bin. deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented rind = np.where(deltar)[0] # location of changed radius nr = rind[1:] - rind[:-1] # number of radius bin # Cumulative sum to figure out sums for each radius bin csim = np.cumsum(i_sorted, dtype=float) tbin = csim[rind[1:]] - csim[rind[:-1]] radial_prof = tbin / nr return radial_prof
5,347,647
def run_tasks(api, logger, config): """Launch CGC tasks. Parameters ---------- api: SevenBridges API instance Api logger: logger instance Log config: dict YAML configuration file """ logger.info('Running tasks!') project = config['project'] max_task_number = config['task_max_per_run'] app = config['app-bam2fasta'] running_tasks = list( api.tasks.query(project=project, limit=100, status='RUNNING').all() ) queued_tasks = list( api.tasks.query(project=project, limit=100, status='QUEUED').all() ) if len(running_tasks) + len(queued_tasks) >= max_task_number: logger.info("Maximum number of active tasks reached!") raise SbgError( 'Unable to run! You already have {active} active tasks. ' 'Please try later!'.format (active=len(running_tasks) + len(queued_tasks))) draft_tasks = list( api.tasks.query(project=project, limit=100, status='DRAFT').all()) if len(draft_tasks) == 0: print('No draft tasks left to be run!') return # Remove draft tasks that weren't created by current app draft_tasks_app = list(draft_tasks) for task in draft_tasks: if app not in task.app: draft_tasks_app.remove(task) executable_tasks = draft_tasks_app[0:max_task_number - len(running_tasks)] for task in executable_tasks: # Sanity check only current app draft tasks are run if app in task.app: try: task.run() except SbgError as e: logger.error("Task was not started! Error happened ", exc_info=e) raise SbgError('Task was not started! Error happened') if task.status == 'DRAFT': logger.error("Task was not started! Task state is DRAFT!") raise SbgError("Task was not started! Task state is DRAFT!")
5,347,648
def train_transforms_fisheye(sample, image_shape, jittering): """ Training data augmentation transformations Parameters ---------- sample : dict Sample to be augmented image_shape : tuple (height, width) Image dimension to reshape jittering : tuple (brightness, contrast, saturation, hue) Color jittering parameters Returns ------- sample : dict Augmented sample """ if len(image_shape) > 0: sample = resize_sample_fisheye(sample, image_shape) sample = duplicate_sample(sample) if len(jittering) > 0: sample = colorjitter_sample(sample, jittering) sample = to_tensor_sample(sample) return sample
5,347,649
def build_json_output_request(**kwargs: Any) -> HttpRequest: """A Swagger with XML that has one operation that returns JSON. ID number 42. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow. :return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's `send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow. :rtype: ~azure.core.rest.HttpRequest Example: .. code-block:: python # response body for status code(s): 200 response.json() == { "id": 0 # Optional. } """ accept = "application/json" # Construct URL url = kwargs.pop("template_url", "/xml/jsonoutput") # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
5,347,650
def merge_extended(args_container: args._ArgumentContainer, hold: bool, identificator: str) -> int: """ Merge the args_container into the internal, like merge_named, but hold specifies if the internal container should not be cleared. :param args_container: The argument container with the data to merge :param hold: When True, does not clear the internal data. :param identificator: The identificator to pass to the MERGE_END event :raises TypeError: if the arguments passed are not the expected type """ if ( not isinstance(args_container, args._ArgumentContainer) or not isinstance(hold, int) # noqa W503 or not isinstance(identificator, str) # noqa W503 ): raise TypeError("The given parameters do not match the types required.") return _grm.grm_merge_extended(args_container.ptr, c_int(1 if hold else 0), _encode_str_to_char_p(identificator))
5,347,651
def video_path_file_name(instance, filename): """ Callback for video node field to get path file name :param instance: the image field :param filename: the file name :return: the path file name """ return path_file_name(instance, 'video', filename)
5,347,652
def setup_platform(opp, config, add_entities, discovery_info=None): """Set up the cover platform for ADS.""" ads_hub = opp.data[DATA_ADS] ads_var_is_closed = config.get(CONF_ADS_VAR) ads_var_position = config.get(CONF_ADS_VAR_POSITION) ads_var_pos_set = config.get(CONF_ADS_VAR_SET_POS) ads_var_open = config.get(CONF_ADS_VAR_OPEN) ads_var_close = config.get(CONF_ADS_VAR_CLOSE) ads_var_stop = config.get(CONF_ADS_VAR_STOP) name = config[CONF_NAME] device_class = config.get(CONF_DEVICE_CLASS) add_entities( [ AdsCover( ads_hub, ads_var_is_closed, ads_var_position, ads_var_pos_set, ads_var_open, ads_var_close, ads_var_stop, name, device_class, ) ] )
5,347,653
def _suppress_hotkey(steps, timeout): """ Adds a hotkey to the list of keys to be suppressed. To unsuppress all hotkeys use `clear_all_hotkeys()`. """ _key_table.suppress_sequence(steps, timeout)
5,347,654
def radiative_processes_mono(flux_euv, flux_fuv, average_euv_photon_wavelength=242.0, average_fuv_photon_wavelength=2348.0): """ Calculate the photoionization rate of helium at null optical depth based on the EUV spectrum arriving at the planet. Parameters ---------- flux_euv (``float``): Monochromatic extreme-ultraviolet (0 - 504 Angstrom) flux arriving at the planet in units of erg / s / cm ** 2. Attention: notice that this ``flux_euv`` is different from the one used for hydrogen, since helium ionization happens at a shorter wavelength. flux_fuv (``float``): Monochromatic far- to middle-ultraviolet (911 - 2593 Angstrom) flux arriving at the planet in units of erg / s / cm ** 2. average_euv_photon_wavelength (``float``): Average wavelength of EUV photons ionizing the He singlet state, in unit of Angstrom. Default value is 242 Angstrom. The default value is based on a flux-weighted average of the solar spectrum between 0 and 504 Angstrom. average_fuv_photon_wavelength (``float``): Average wavelength of FUV-NUV photons ionizing the He triplet state, in unit of Angstrom. Default value is 2348 Angstrom. The default value is based on a flux-weighted average of the solar spectrum between 911 and 2593 Angstrom. Returns ------- phi_1 (``float``): Ionization rate of helium singlet at null optical depth in unit of 1 / s. phi_3 (``float``): Ionization rate of helium triplet at null optical depth in unit of 1 / s. a_1 (``float``): Flux-averaged photoionization cross-section of helium singlet in unit of cm ** 2. a_3 (``float``): Flux-averaged photoionization cross-section of helium triplet in unit of cm ** 2. a_h_1 (``float``): Flux-averaged photoionization cross-section of hydrogen in the range absorbed by helium singlet in unit of cm ** 2. a_h_3 (``float``): Flux-averaged photoionization cross-section of hydrogen in the range absorbed by helium triplet in unit of cm ** 2. """ # Average cross-section to ionize helium singlet a_1 = microphysics.helium_singlet_cross_section(average_euv_photon_wavelength) # The photoionization cross-section of He triplet wavelength_3, a_lambda_3 = microphysics.helium_triplet_cross_section() # # Average cross-section to ionize helium triplet a_3 = np.interp(average_fuv_photon_wavelength, wavelength_3, a_lambda_3) # The flux-averaged photoionization cross-section of H is also going to be # needed because it adds to the optical depth that the He atoms see. # Contribution to the optical depth seen by He singlet atoms: # Hydrogen cross-section within the range important to helium singlet a_h_1 = 6.3E-18 * (average_euv_photon_wavelength / 13.6) ** (-3) # Unit 1 / cm ** 2. # Contribution to the optical depth seen by He triplet atoms: if average_fuv_photon_wavelength < 911.0: a_h_3 = microphysics.hydrogen_cross_section( wavelength=average_fuv_photon_wavelength) else: a_h_3 = 0.0 # Convert the fluxes from erg to eV and calculate the photoionization rates energy_1 = 12398.419843320025 / average_euv_photon_wavelength energy_3 = 12398.419843320025 / average_fuv_photon_wavelength phi_1 = flux_euv * 6.24150907e+11 * a_1 / energy_1 phi_3 = flux_fuv * 6.24150907e+11 * a_3 / energy_3 return phi_1, phi_3, a_1, a_3, a_h_1, a_h_3
5,347,655
def handle_exception(exc): """ :return: void Tries to handle it """ print("[CRITICAL ERROR]:", str(exc).replace("\n", ".") + "!!!") print("pyhodl stopped abruptly, but your data is safe, don't worry.") user_input = UserInput() if user_input.get_yes_no("Want to fill a bug report?"): print("Please file a bug report here >> " "https://github.com/sirfoga/pyhodl/issues attaching the " "following content ...") time.sleep(1) traceback.print_exc() print("Terribly sorry for the inconvenience, see you soon!")
5,347,656
def date_yyyymmdd(now: typing.Union[datetime.datetime, None] = None, day_delta: int = 0, month_delta: int = 0) -> str: """ :param day_delta: :param month_delta: :return: today + day_delta + month_delta -> str YYYY-MM-DD """ return date_delta(now, day_delta, month_delta).strftime("%Y-%m-%d")
5,347,657
def description_for_number(*args, **kwargs): """Return a text description of a PhoneNumber object for the given language. The description might consist of the name of the country where the phone number is from and/or the name of the geographical area the phone number is from. This function explicitly checks the validity of the number passed in Arguments: numobj -- The PhoneNumber object for which we want to get a text description. lang -- A 2-letter lowercase ISO 639-1 language code for the language in which the description should be returned (e.g. "en") script -- A 4-letter titlecase (first letter uppercase, rest lowercase) ISO script code as defined in ISO 15924, separated by an underscore (e.g. "Hant") region -- A 2-letter uppercase ISO 3166-1 country code (e.g. "GB") Returns a text description in the given language code, for the given phone number, or an empty string if no description is available.""" from .geocoder import description_for_number as real_fn return real_fn(*args, **kwargs)
5,347,658
def get_pathway_nodes(pathway): """Return single nodes in pathway. :param pathme_viewer.models.Pathway pathway: pathway entry :return: BaseAbundance nodes :rtype: list[pybel.dsl.BaseAbundance] """ # Loads the BELGraph graph = from_bytes(pathway.blob) collapse_to_genes(graph) # Return BaseAbundace BEL nodes return { node.as_bel() for node in graph if isinstance(node, BaseAbundance) }
5,347,659
def restore_modifiers(scan_codes): """ Like `restore_state`, but only restores modifier keys. """ restore_state((scan_code for scan_code in scan_codes if is_modifier(scan_code)))
5,347,660
def check_currrent_user_privilege(): """ Check if our user has interesting tokens """ # Interesting Windows Privileges # - SeDebug # - SeRestore # - SeBackup # - SeTakeOwnership # - SeTcb # - SeCreateToken # - SeLoadDriver # - SeImpersonate # - SeAssignPrimaryToken interesting_priv = ( u'SeDebug', u'SeRestore', u'SeBackup', u'SeTakeOwnership', u'SeTcb', u'SeCreateToken', u'SeLoadDriver', u'SeImpersonate', u'SeAssignPrimaryToken' ) privs = get_currents_privs() priv = [] for (privilege, enabled) in privs: if enabled: string = privilege for p in interesting_priv: if p in privilege: string += ' => Could be used to elevate our privilege' break priv.append(string) return priv
5,347,661
def handle_domain_addition_commands(client: Client, demisto_args: dict) -> CommandResults: """ Adds the domains to the inbound blacklisted list. :type client: ``Client`` :param client: Client to use. :type demisto_args: ``dict`` :param demisto_args: The demisto arguments. :return: The command results which contains the added domains to the inbound blacklisted list. :rtype: ``CommandResults`` """ demisto_args = handle_args(demisto_args) domain = demisto_args.get('domain') if not domain: raise DemistoException( 'A domain must be provided in order to add it to the inbound blacklisted list.') demisto_args['domain'] = ','.join(argToList(domain)) raw_result = client.inbound_blacklisted_domain_add_command(demisto_args) domains_list = copy.deepcopy(raw_result.get('domains', [raw_result])) msg = 'Domains were successfully added to the inbound blacklisted list\n' objects_time_to_readable_time(domains_list, 'updateTime') readable_output = msg + tableToMarkdown('Added Domains', domains_list, headers=['domain', 'pgid', 'cid', 'update_time', 'annotation'], headerTransform=string_to_table_header, removeNull=True) return CommandResults( outputs_prefix='NetscoutAED.InboundBlacklistDomain', outputs_key_field='domain', outputs=domains_list, raw_response=raw_result, readable_output=readable_output, )
5,347,662
def _tc4(dom: AbsDom): """ Validate that my AcasNet module can be optimized at the inputs. """ mse = nn.MSELoss() max_retries = 100 max_iters = 30 # at each retry, train at most 100 iterations def _loss(outputs_lb): lows = outputs_lb[..., 0] distances = 0 - lows distances = F.relu(distances) prop = torch.zeros_like(distances) return mse(distances, prop) retried = 0 while retried < max_retries: # it is possible to get inputs optimized to some local area, thus retry multiple times net = AcasNet(dom, 2, 2, [2]).to(device) inputs = torch.randn(2, 2, 2, device=device) inputs_lb, _ = torch.min(inputs, dim=-1) inputs_ub, _ = torch.max(inputs, dim=-1) inputs_lb = inputs_lb.requires_grad_() inputs_ub = inputs_ub.requires_grad_() ins = dom.Ele.by_intvl(inputs_lb, inputs_ub) with torch.no_grad(): outputs_lb, outputs_ub = net(ins).gamma() if _loss(outputs_lb) <= 0: # found something to optimize continue retried += 1 # Now the network has something to optimize print(f'\n===== TC4: ({retried}th try) =====') print('Using inputs LB:', inputs_lb) print('Using inputs UB:', inputs_ub) print('Before any optimization, the approximated output is:') print('Outputs LB:', outputs_lb) print('Outputs UB:', outputs_ub) # This sometimes work and sometimes doesn't. It may stuck on a fixed loss and never decrease anymore. orig_inputs_lb = inputs_lb.clone() orig_inputs_ub = inputs_ub.clone() opti = torch.optim.Adam([inputs_lb, inputs_ub], lr=0.1) iters = 0 while iters < max_iters: iters += 1 # after optimization, lb ≤ ub may be violated _inputs_lbub = torch.stack((inputs_lb, inputs_ub), dim=-1) _inputs_lb, _ = torch.min(_inputs_lbub, dim=-1) _inputs_ub, _ = torch.max(_inputs_lbub, dim=-1) ins = dom.Ele.by_intvl(_inputs_lb, _inputs_ub) opti.zero_grad() outputs_lb, outputs_ub = net(ins).gamma() loss = _loss(outputs_lb) if loss <= 0: # until the final output's 1st element is >= 0 break loss.backward() opti.step() print(f'Iter {iters} - loss {loss.item()}') if iters < max_iters: # successfully trained break assert retried < max_retries with torch.no_grad(): print(f'At {retried} retry, all optimized after {iters} iterations. ' + f'Now the outputs 1st element should be >= 0 given the latest input.') outputs_lb, outputs_ub = net(ins).gamma() print('Outputs LB:', outputs_lb) print('Outputs UB:', outputs_ub) print('Original inputs LB:', orig_inputs_lb) print('Optimized inputs LB:', inputs_lb) print('Original inputs UB:', orig_inputs_ub) print('Optimized inputs UB:', inputs_ub) assert (outputs_lb[:, 0] >= 0.).all() return
5,347,663
def delete_image(inputkey, inputName): """Function to delete image""" folder = get_image_path(inputkey, inputName) os.remove(folder)
5,347,664
def skip_url(url): """ Skip naked username mentions and subreddit links. """ return REDDIT_PATTERN.match(url) and SUBREDDIT_OR_USER.search(url)
5,347,665
def assert_dataset_like( ds1: timeseries.MultiRegionDataset, ds2: timeseries.MultiRegionDataset, *, drop_na_timeseries=False, drop_na_latest=False, drop_na_dates=False, compare_tags=True, ): """Asserts that two datasets contain similar date, ignoring order.""" ts1 = _timeseries_sorted_by_location_date( ds1, drop_na=drop_na_timeseries, drop_na_dates=drop_na_dates ) ts2 = _timeseries_sorted_by_location_date( ds2, drop_na=drop_na_timeseries, drop_na_dates=drop_na_dates ) pd.testing.assert_frame_equal(ts1, ts2, check_like=True, check_dtype=False) latest1 = _latest_sorted_by_location_date(ds1, drop_na_latest) latest2 = _latest_sorted_by_location_date(ds2, drop_na_latest) pd.testing.assert_frame_equal(latest1, latest2, check_like=True, check_dtype=False) # Somehow tests/libs/datasets/combined_dataset_utils_test.py::test_update_and_load has # two provenance Series that are empty but assert_series_equal fails with message # 'Attribute "inferred_type" are different'. Don't call it when both series are empty. if not (ds1.provenance.empty and ds2.provenance.empty): pd.testing.assert_series_equal(ds1.provenance, ds2.provenance) if compare_tags: tag1 = ds1.tag.astype("string") tag2 = ds2.tag.astype("string") # Don't check the index types because they don't matter and some tests end up with different # types that otherwise compare as equal. pd.testing.assert_series_equal(tag1, tag2, check_index_type=False)
5,347,666
def test_not_connected(): """Test send commands without connection.""" device = _TestDevice(SERIAL, CREDENTIAL) with pytest.raises(DysonNotConnected): device.request_current_status() assert device._status is None with pytest.raises(DysonNotConnected): device._send_command("COMMAND")
5,347,667
def registry(): """ Return a dictionary of problems of the form: ```{ "problem name": { "params": ... }, ... }``` where `flexs.landscapes.AdditiveAAVPackaging(**problem["params"])` instantiates the additive AAV packaging landscape for the given set of parameters. Returns: dict: Problems in the registry. """ problems = { "heart": {"params": {"phenotype": "heart", "start": 450, "end": 540}}, "lung": {"params": {"phenotype": "lung", "start": 450, "end": 540}}, "kidney": {"params": {"phenotype": "kidney", "start": 450, "end": 540}}, "liver": {"params": {"phenotype": "liver", "start": 450, "end": 540}}, "blood": {"params": {"phenotype": "blood", "start": 450, "end": 540}}, "spleen": {"params": {"phenotype": "spleen", "start": 450, "end": 540}}, } return problems
5,347,668
def ecm(n, rounds, b1, b2, wheel=2310, output=True): """Elliptic Curve Factorization Method. In each round, the following steps are performed: 0. Generate random point and curve. 1. Repeatedly multiply the current point by small primes raised to some power, determined by b1. 2. Standard continuation from b1 to b2 with Brent-Suyama's Extension and Polyeval. Returns when a non-trivial factor is found. Args: n (int): Number to be factorized. n >= 12. rounds (int): Number of random curves to try. b1 (int): Bound for primes used in step 1. b2 (int): Bound for primes searched for in step 2. b1 < b2. wheel (int, optional): Wheel, where only numbers coprime to wheel will be considered in step 2. Defaults to 2310. output (bool, optional): Whether to print progress to stdout. Defaults to True. Raises: ValueError: Thrown when n < 12. Returns: int: Non-trivial factor if found, otherwise returns None. """ if n < 12: raise ValueError j_list = [j for j in range(1, wheel // 2) if gcd(j, wheel) == 1] block_size = 1 << (len(j_list) - 1).bit_length() - 1 for round_i in range(rounds): if output: st = time.time() print("Round {}...".format(round_i)) count = 0 success = False while not success and count < 20: try: count += 1 sigma = random.randint(6, n - 6) mnt_pt, mnt_curve = mnt.get_curve_suyama(sigma, n) success = True except InverseNotFound as e: res = gcd(e.x, n) if 1 < res < n: return res except CurveInitFail: pass if not success: print(" - Curve Init Failed.") break try: # Step 1 if output: print("{:>5.2f}: Step 1".format(time.time() - st)) for p in PRIME_GEN(b1): for _ in range(int(np.log(b1) / np.log(p))): mnt_pt = mnt.mul_pt_exn(mnt_pt, mnt_curve, p) # Step 2 if output: print("{:>5.2f}: Step 2".format(time.time() - st)) polynomial = (2, 0, 9, 0, 6, 0, 1) # f(x) = x^6 + 6x^4 + 9x^2 + 2 q, wst_curve = mnt.to_weierstrass(mnt_pt, mnt_curve) c1 = b1 // wheel c2 = b2 // wheel + 2 c = 0 k_ls = [ apply_polynomial(polynomial, j) for j in j_list ] + get_difference_seq(polynomial, c1 * wheel, wheel) mul_res = wst.mul_pt_multi(q, wst_curve, k_ls) xj_list = [] for i in range(len(j_list)): xj_list.append(mul_res[i][0]) cq_list = mul_res[len(j_list) :] f_tree = product_tree([Polynomial([n - xj, 1], n) for xj in xj_list], n) f_recip_tree = recip_tree(f_tree) H = Polynomial([1], n) g_poly_list = [] while c < c2 - c1: for _ in range(min(block_size, c2 - c1 - c)): g_poly_list.append(Polynomial([n - cq_list[0][0], 1], n)) step_difference_seq_exn(cq_list, wst_curve) c += 1 G = product_tree(g_poly_list, n)[0] H = (H * G).mod_with_recip(f_tree[0], f_recip_tree[0]) g_poly_list.clear() rem_tree = remainder_tree(H, f_tree, f_recip_tree, n) res = gcd(rem_tree[0], n) if 1 < res < n: return res elif res == n: for rem in rem_tree[len(rem_tree) // 2 :]: res = gcd(rem, n) if 1 < res < n: return res assert False if output: print("{:>5.2f}: End".format(time.time() - st)) except InverseNotFound as e: res = gcd(e.x, n) if 1 < res < n: return res return None
5,347,669
def FBSleep(MilliSeconds): """ Sleep function Puts system to sleep for specified time. MilliSeconds : Time to sleep for. """ pass
5,347,670
def raise_min_sdk_version(doc, min_sdk_version, target_sdk_version, library): """Ensure the manifest contains a <uses-sdk> tag with a minSdkVersion. Args: doc: The XML document. May be modified by this function. min_sdk_version: The requested minSdkVersion attribute. target_sdk_version: The requested targetSdkVersion attribute. library: True if the manifest is for a library. Raises: RuntimeError: invalid manifest """ manifest = parse_manifest(doc) # Get or insert the uses-sdk element uses_sdk = get_children_with_tag(manifest, 'uses-sdk') if len(uses_sdk) > 1: raise RuntimeError('found multiple uses-sdk elements') elif len(uses_sdk) == 1: element = uses_sdk[0] else: element = doc.createElement('uses-sdk') indent = get_indent(manifest.firstChild, 1) manifest.insertBefore(element, manifest.firstChild) # Insert an indent before uses-sdk to line it up with the indentation of the # other children of the <manifest> tag. manifest.insertBefore(doc.createTextNode(indent), manifest.firstChild) # Get or insert the minSdkVersion attribute. If it is already present, make # sure it as least the requested value. min_attr = element.getAttributeNodeNS(android_ns, 'minSdkVersion') if min_attr is None: min_attr = doc.createAttributeNS(android_ns, 'android:minSdkVersion') min_attr.value = min_sdk_version element.setAttributeNode(min_attr) else: if compare_version_gt(min_sdk_version, min_attr.value): min_attr.value = min_sdk_version # Insert the targetSdkVersion attribute if it is missing. If it is already # present leave it as is. target_attr = element.getAttributeNodeNS(android_ns, 'targetSdkVersion') if target_attr is None: target_attr = doc.createAttributeNS(android_ns, 'android:targetSdkVersion') if library: # TODO(b/117122200): libraries shouldn't set targetSdkVersion at all, but # ManifestMerger treats minSdkVersion="Q" as targetSdkVersion="Q" if it # is empty. Set it to something low so that it will be overriden by the # main manifest, but high enough that it doesn't cause implicit # permissions grants. target_attr.value = '16' else: target_attr.value = target_sdk_version element.setAttributeNode(target_attr)
5,347,671
def dc_mode_option(update: Update, contex: CallbackContext) -> Optional[int]: """Get don't care response mode option""" ndc = contex.user_data[0] if ndc.response_mode == DoesntCare.ResponseMode.TIME: if not re.match(r"[0-9]+:[0-9]+:[0-9]+", update.effective_message.text): update.effective_message.reply_text( 'Invalid time format, please send in this format: Hours:Minutes:Seconds') return None hms = update.effective_message.text.split(':') ndc.response_mode_option = \ datetime.timedelta(hours=int(hms[0]), minutes=int(hms[1]), seconds=int(hms[2])).total_seconds() else: if ((not update.effective_message.text.isdigit()) or (not (int(update.effective_message.text) > 1))): update.effective_message.reply_text('Invalid number. Please send a positive integer more than 1.') return None ndc.response_mode_option = float(update.effective_message.text) if ndc.add(): update.effective_message.reply_text("Added user to your don't care list!") logging.info( "Add: DCU: \"{}\", NIU: \"{}\", Chat: \"{}\", RM: \"{}\", RMO: \"{}\"" .format(ndc.doesnt_care_id, ndc.not_important_id, ndc.chat_id, ndc.response_mode, ndc.response_mode_option) ) else: update.effective_message.reply_text("Sorry, an error occurred! Please try again later.") logging.error( "Add, DCU: \"{}\", NIU: \"{}\", Chat: \"{}\"" .format(ndc.doesnt_care_id, ndc.not_important_id, ndc.chat_id) ) return ConversationHandler.END
5,347,672
def is_in_period(datetime_, start, end): """指定した日時がstartからendまでの期間に含まれるか判定する""" return start <= datetime_ < end
5,347,673
def consume(url): """Consume messages""" ctx = zmq.Context.instance() s = ctx.socket(zmq.PULL) s.connect(url) print("Consuming") for i in range(MSGS * PRODUCERS): msg = s.recv() print(msg.decode('ascii')) print("Consumer done") s.close()
5,347,674
def set_shared_params(a, b): """ Args: a (chainer.Link): link whose params are to be replaced b (dict): dict that consists of (param_name, multiprocessing.Array) """ assert isinstance(a, chainer.Link) for param_name, param in a.namedparams(): if param_name in b: shared_param = b[param_name] param.data = np.frombuffer( shared_param, dtype=param.data.dtype).reshape(param.data.shape)
5,347,675
def create_experiment_summary(): """Returns a summary proto buffer holding this experiment""" # Convert TEMPERATURE_LIST to google.protobuf.ListValue temperature_list = struct_pb2.ListValue().extend(TEMPERATURE_LIST) return summary.experiment_pb( hparam_infos=[ api_pb2.HParamInfo(name="initial_temperature", display_name="initial temperature", type=api_pb2.DATA_TYPE_FLOAT64, domain_discrete=temperature_list), api_pb2.HParamInfo(name="ambient_temperature", display_name="ambient temperature", type=api_pb2.DATA_TYPE_FLOAT64, domain_discrete=temperature_list), api_pb2.HParamInfo(name="heat_coefficient", display_name="heat coefficient", type=api_pb2.DATA_TYPE_FLOAT64, domain_discrete=temperature_list) ], metric_infos=[ api_pb2.MetricInfo( name=api_pb2.MetricName( tag="temparature/current/scalar_summary"), display_name="Current Temp."), api_pb2.MetricInfo( name=api_pb2.MetricName( tag="temparature/difference_to_ambient/scalar_summary"), display_name="Difference To Ambient Temp."), api_pb2.MetricInfo( name=api_pb2.MetricName( tag="delta/scalar_summary"), display_name="Delta T") ] )
5,347,676
def get_session(token, custom_session=None): """Get requests session with authorization headers Args: token (str): Top secret GitHub access token custom_session: e.g. betamax's session Returns: :class:`requests.sessions.Session`: Session """ session = custom_session or requests.Session() session.headers = { "Authorization": "token " + token, "User-Agent": "testapp" } return session
5,347,677
def mass_to_tbint_to_energy_map(dpath, filterfn=lambda x: True, fpath_list=None): """Given a directory, creates a mapping mass number -> ( a, b, c, d, j -> energy ) using the files in the directory :param fpath_list: :param dpath: the directory which is a direct parent to the files from which to generate the map :param filterfn: the filter function to apply to the files before constructing the map """ mida_map = _mass_tbme_data_map( dpath, filterfn, fpath_list) for k in mida_map.keys(): v = mida_map[k] nextv = dict() for row in v: tup = tuple(row[0:6]) energy = float(row[6]) nextv[tup] = energy mida_map[k] = nextv return mida_map
5,347,678
def strip_accents(text): """ Strip accents from input String. :param text: The input string. :type text: String. :returns: The processed String. :rtype: String. """ text = unicodedata.normalize('NFD', text) text = text.encode('ascii', 'ignore') text = text.decode("utf-8") return str(text)
5,347,679
def write_png(filename, image): """Writes a PNG image file.""" image = tf.squeeze(image, 0) if image.dtype.is_floating: image = tf.round(image) if image.dtype != tf.uint8: image = tf.saturate_cast(image, tf.uint8) string = tf.image.encode_png(image) tf.io.write_file(filename, string)
5,347,680
def json_formatter(result, _verbose): """Format result as json.""" if isinstance(result, list) and "data" in result[0]: res = [json.dumps(record) for record in result[0]["data"]] output = "\n".join(res) else: output = json.dumps(result, indent=4, sort_keys=True) return output
5,347,681
def show_all_companies(): """Show all companies a user has interest in.""" # redirect if user is not logged in if not session: return redirect('/') else: # get user_id from session user_id = session['user_id'] user = User.query.filter(User.user_id == user_id).one() user_companies = user.companies companies = {} for company in user_companies: count = Job.query.filter(Job.company_id == company.company_id).count() companies[company] = count return render_template('companies.html', companies=companies)
5,347,682
def add_atstop_proc(func): """At serving server stop, execute function""" global at_stop_list at_stop_list.append(func)
5,347,683
def run_sim(G, numsteps=100): """ Run a simulation for numsteps steps and plot the resulting curves :param G: A networkx graph :param numsteps: The number of steps to run the simulation for :return: None """ num_s = [] num_i = [] num_r = [] for i in range(numsteps): update(G) num_s.append(get_num_s(G)) num_i.append(get_num_i(G)) num_r.append(get_num_r(G)) x = list(range(numsteps)) plt.plot(x, num_s, label='Susceptible') plt.plot(x, num_i, label='Infected') plt.plot(x, num_r, label='Recovered') plt.legend() plt.show()
5,347,684
def test_duo_one_disconnect(opt, server_url): """Tests whether disconnects properly cause a task to fail and let the non-disconnecting partner complete the HIT. Also tests reconnecting after a partner disconnect or after a disconnect. """ global completed_threads print('{} Starting'.format(DUO_ONE_DISCONNECT_TEST)) opt['task'] = DUO_ONE_DISCONNECT_TEST hit_id = FAKE_HIT_ID.format(DUO_ONE_DISCONNECT_TEST) assign_id_1 = FAKE_ASSIGNMENT_ID.format(DUO_ONE_DISCONNECT_TEST, 1) worker_id_1 = FAKE_WORKER_ID.format(DUO_ONE_DISCONNECT_TEST, 1) assign_id_2 = FAKE_ASSIGNMENT_ID.format(DUO_ONE_DISCONNECT_TEST, 2) worker_id_2 = FAKE_WORKER_ID.format(DUO_ONE_DISCONNECT_TEST, 2) connection_id_1 = '{}_{}'.format(worker_id_1, assign_id_1) connection_id_2 = '{}_{}'.format(worker_id_2, assign_id_2) message_num = 0 partner_disconnects = 0 self_disconnects = 0 expected_messages = [ TestDuoWorld.MESSAGE_1, TestDuoWorld.MESSAGE_2, MTURK_DISCONNECT_MESSAGE ] mturk_agent_id_1 = AGENT_1_ID mturk_agent_id_2 = AGENT_2_ID mturk_manager = MTurkManager( opt=opt, mturk_agent_ids=[mturk_agent_id_1, mturk_agent_id_2], is_test=True ) mturk_manager.server_url = server_url mturk_manager.start_new_run() task_group_id = mturk_manager.task_group_id world_thread = threading.Thread(target=run_duo_world, args=(opt, mturk_manager, False)) world_thread.daemon = True world_thread.start() # create and set up the first agent test_agent_1 = MockAgent(opt, hit_id, assign_id_1, worker_id_1, task_group_id) def msg_callback_1(packet): nonlocal message_num nonlocal test_agent_1 nonlocal partner_disconnects nonlocal self_disconnects if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND: if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE: test_agent_1.wants_to_send = True elif packet.data['text'] == data_model.COMMAND_INACTIVE_DONE: partner_disconnects += 1 elif packet.data['text'] == data_model.COMMAND_INACTIVE_HIT: self_disconnects += 1 elif test_agent_1.conversation_id is not None and \ test_agent_1.conversation_id.startswith('t_'): assert packet.data['text'] == expected_messages[message_num], \ 'Expected {} for message {}, got {}'.format( expected_messages[message_num], message_num, packet.data['text'] ) message_num += 1 message_handler_1 = \ make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1) test_agent_1.setup_socket(server_url, message_handler_1) test_agent_1.wait_for_alive() wait_for_state_time(2, mturk_manager) # Assert that the state was properly set up check_new_agent_setup(test_agent_1, mturk_manager, AssignState.STATUS_WAITING) mturk_manager_assign_1 = \ mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1] assign_state_1 = mturk_manager_assign_1.state # Start heartbeats for 1 test_agent_1.always_beat = True test_agent_1.send_heartbeat() wait_for_state_time(2, mturk_manager) # Ensure agent 1 is sitting in a waiting world now assert test_agent_1.conversation_id.startswith('w_'), \ 'Mock agent didn\'t make it to waiting' check_status(assign_state_1.status, AssignState.STATUS_WAITING) # Set up the second agent test_agent_2 = MockAgent(opt, hit_id, assign_id_2, worker_id_2, task_group_id) def msg_callback_2(packet): nonlocal message_num nonlocal test_agent_2 nonlocal partner_disconnects nonlocal self_disconnects if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND: if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE: test_agent_2.wants_to_send = True elif packet.data['text'] == data_model.COMMAND_INACTIVE_DONE: partner_disconnects += 1 elif packet.data['text'] == data_model.COMMAND_INACTIVE_HIT: self_disconnects += 1 elif test_agent_2.conversation_id is not None and \ test_agent_2.conversation_id.startswith('t_'): assert packet.data['text'] == expected_messages[message_num], \ 'Expected {} for message {}, got {}'.format( expected_messages[message_num], message_num, packet.data['text'] ) message_num += 1 message_handler_2 = \ make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2) test_agent_2.setup_socket(server_url, message_handler_2) test_agent_2.wait_for_alive() wait_for_state_time(2.5, mturk_manager) # Assert that the state was properly set up check_new_agent_setup(test_agent_2, mturk_manager, AssignState.STATUS_IN_TASK) mturk_manager_assign_2 = \ mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2] assign_state_2 = mturk_manager_assign_2.state # Start heartbeats for 2 test_agent_2.always_beat = True test_agent_2.send_heartbeat() wait_for_state_time(2.5, mturk_manager) # Ensure both agents are in a task world assert test_agent_1.conversation_id.startswith('t_'), \ 'Mock agent didn\'t make it to task world' assert test_agent_2.conversation_id.startswith('t_'), \ 'Mock agent didn\'t make it to task world' check_status(assign_state_1.status, AssignState.STATUS_IN_TASK) check_status(assign_state_2.status, AssignState.STATUS_IN_TASK) first_agent = None second_agent = None mturk_first_agent = None mturk_second_agent = None assert test_agent_1.wants_to_send or test_agent_2.wants_to_send, \ 'Neither agent is ready to send a message after arriving in task' if test_agent_1.wants_to_send: first_agent = test_agent_1 second_agent = test_agent_2 mturk_first_agent = mturk_manager_assign_1 mturk_second_agent = mturk_manager_assign_2 else: second_agent = test_agent_1 first_agent = test_agent_2 mturk_second_agent = mturk_manager_assign_1 mturk_first_agent = mturk_manager_assign_2 # Step through the task first_agent.send_message(expected_messages[message_num]) wait_for_state_time(2, mturk_manager) second_agent.send_message(expected_messages[message_num]) # Disconnect the first agent first_agent.always_beat = False wait_for_state_time(2, mturk_manager) wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager) assert partner_disconnects == 1, \ 'Connected agent did not recieve an inactive_done command' # Refresh the second agent second_agent.conversation_id = None second_agent.send_alive() wait_for_state_time(2, mturk_manager) assert partner_disconnects == 2, \ 'Reconnected agent did not recieve an inactive_done command' # Refresh the first agent first_agent.conversation_id = None first_agent.send_alive() wait_for_state_time(2, mturk_manager) assert self_disconnects == 1, \ 'Disconnected agent did not recieve an inactive command' # Disconnect the second agent second_agent.always_beat = False wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager) check_status(mturk_second_agent.state.status, AssignState.STATUS_PARTNER_DISCONNECT) check_status(mturk_first_agent.state.status, AssignState.STATUS_DISCONNECT) assert mturk_manager.completed_conversations == 0, \ 'Incomplete conversation marked as complete' assert mturk_second_agent.disconnected == False, \ 'MTurk manager improperly marked the connected agent as disconnected' assert mturk_first_agent.disconnected == True, \ 'MTurk did not mark the disconnected agent as so' assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \ 'The socket manager didn\'t close the socket upon failure of the ' \ 'task, though it should have' assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \ 'The socket manager didn\'t close the socket upon failure of the ' \ 'task, though it should have' completed_threads[DUO_ONE_DISCONNECT_TEST] = True
5,347,685
def ll_combined_grad(x, item_ids, judge_ids, pairwise=[], individual=[]): """ This function computes the _negative_ gradient of the loglikelihood for each parameter in x, for both the individual and pairwise data. Keyword arguments: x -- the current parameter estimates. item_ids -- the ids of the items being evaluated judge_ids -- the ids of the judges being evaluted pairwise -- an iterator for the pairwise ratings individual -- an iterator for the individual ratings >>> ll_combined_grad([0,0,1,1,3,1], [0,1], [0], [], []) array([-0. , -0. , -0. , -1.33333333, 2. , -0. ]) """ item_val = {i:idx for idx, i in enumerate(item_ids)} discrim = {i:idx + len(item_val) for idx, i in enumerate(judge_ids)} bias = {i:idx + len(item_val) + len(judge_ids) for idx, i in enumerate(judge_ids)} precision = {i:idx + len(item_val) + 2*len(judge_ids) for idx, i in enumerate(judge_ids)} likert_mean = x[-1] likert_prec = x[-2] grad = np.zeros(len(x)) #grad = np.array([0.0 for v in x]) for r in pairwise: left = x[item_val[r.left.id]] right = x[item_val[r.right.id]] d = x[discrim[r.judge.id]] y = r.value z = d * (left - right) #z = (left - right) p = invlogit(z) g = y - p #grad[item_val[r.left.id]] += g #grad[item_val[r.right.id]] += -1 * g grad[item_val[r.left.id]] += d * g grad[item_val[r.right.id]] += -1 * d * g grad[discrim[r.judge.id]] += (left - right) * g for l in individual: u = x[item_val[l.item.id]] b = x[bias[l.judge.id]] prec = x[precision[l.judge.id]] #n = sqrt(1/prec) p0 = likert_prec s = 1 / sqrt(p0) error = (l.value - likert_mean - s * (b + u)) grad[item_val[l.item.id]] += prec * p0 * error * s grad[bias[l.judge.id]] += prec * p0 * error * s grad[-1] += prec * p0 * error grad[precision[l.judge.id]] += (1 / (2 * prec)) - (p0 / 2) * (error * error) grad[-2] += (1 / (2 * p0)) - (prec / 2) * ((b + u) * s * error + error * error) #error = (l.value - likert_mean - b - u) #grad[item_val[l.item.id]] += prec * error #grad[bias[l.judge.id]] += prec * error #grad[-1] += prec * error # likert mean #grad[precision[l.judge.id]] += (1 / (2 * prec)) - (error * error)/2 # Regularization # Normal prior on means item_reg = np.array([0.0 for v in x]) for i in item_val: item_reg[item_val[i]] += (x[item_val[i]] - item_mean) item_reg = -1 * item_prec * item_reg #item_reg = (-1.0 / (item_std * item_std)) * item_reg # Normal prior on discriminations judge_reg = np.array([0.0 for v in x]) for i in discrim: judge_reg[discrim[i]] += (x[discrim[i]] - discrim_mean) judge_reg = -1 * discrim_prec * judge_reg #judge_reg = (-1.0 / (discrim_std * discrim_std)) * judge_reg # Normal prior on bias bias_reg = np.array([0.0 for v in x]) for i in bias: bias_reg[bias[i]] += (x[bias[i]] - bias_mean) bias_reg = (-1.0 / (bias_std * bias_std)) * bias_reg # Normal prior on noise prec_reg = np.array([0.0 for v in x]) for i in precision: prec_reg[precision[i]] += (x[precision[i]] - prec_mean) prec_reg = (-1.0 / (prec_std * prec_std)) * prec_reg return -1 * (grad + item_reg + judge_reg + bias_reg + prec_reg)
5,347,686
def relu(inp): # ReLu function as activation function """ ReLu neural network activation function :param inp: Node value before activation :return: Node value after activation """ return np.max(inp, 0)
5,347,687
def getCmd(snmpEngine, authData, transportTarget, contextData, *varBinds, **options): """Creates a generator to perform one or more SNMP GET queries. On each iteration, new SNMP GET request is send (:RFC:`1905#section-4.2.1`). The iterator blocks waiting for response to arrive or error to occur. Parameters ---------- snmpEngine : :py:class:`~pysnmp.hlapi.SnmpEngine` Class instance representing SNMP engine. authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData` Class instance representing SNMP credentials. transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget` Class instance representing transport type along with SNMP peer address. contextData : :py:class:`~pysnmp.hlapi.ContextData` Class instance representing SNMP ContextEngineId and ContextName values. \*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType` One or more class instances representing MIB variables to place into SNMP request. Other Parameters ---------------- \*\*options : Request options: * `lookupMib` - load MIB and resolve response MIB variables at the cost of slightly reduced performance. Default is `True`. Yields ------ errorIndication : str True value indicates SNMP engine error. errorStatus : str True value indicates SNMP PDU error. errorIndex : int Non-zero value refers to `varBinds[errorIndex-1]` varBinds : tuple A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances representing MIB variables returned in SNMP response. Raises ------ PySnmpError Or its derivative indicating that an error occurred while performing SNMP operation. Notes ----- The `getCmd` generator will be exhausted immidiately unless a new sequence of `varBinds` are send back into running generator (supported since Python 2.6). Examples -------- >>> from pysnmp.hlapi.asyncore import * >>> g = getCmd(SnmpEngine(), ... CommunityData('public'), ... UdpTransportTarget(('demo.snmplabs.com', 161)), ... ContextData(), ... ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0))) >>> next(g) (None, 0, 0, [ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))]) >>> """ def cbFun(snmpEngine, sendRequestHandle, errorIndication, errorStatus, errorIndex, varBinds, cbCtx): cbCtx['errorIndication'] = errorIndication cbCtx['errorStatus'] = errorStatus cbCtx['errorIndex'] = errorIndex cbCtx['varBinds'] = varBinds cbCtx = {} while True: if varBinds: cmdgen.getCmd(snmpEngine, authData, transportTarget, contextData, *varBinds, **dict(cbFun=cbFun, cbCtx=cbCtx, lookupMib=options.get('lookupMib', True))) snmpEngine.transportDispatcher.runDispatcher() errorIndication = cbCtx['errorIndication'] errorStatus = cbCtx['errorStatus'] errorIndex = cbCtx['errorIndex'] varBinds = cbCtx['varBinds'] else: errorIndication = errorStatus = errorIndex = None varBinds = [] varBinds = (yield errorIndication, errorStatus, errorIndex, varBinds) if not varBinds: break
5,347,688
def augment_sentence(tokens: List[str], augmentations: List[Tuple[List[tuple], int, int]], begin_entity_token: str, sep_token: str, relation_sep_token: str, end_entity_token: str) -> str: """ Augment a sentence by adding tags in the specified positions. Args: tokens: Tokens of the sentence to augment. augmentations: List of tuples (tags, start, end). begin_entity_token: Beginning token for an entity, e.g. '[' sep_token: Separator token, e.g. '|' relation_sep_token: Separator token for relations, e.g. '=' end_entity_token: End token for an entity e.g. ']' An example follows. tokens: ['Tolkien', 'was', 'born', 'here'] augmentations: [ ([('person',), ('born in', 'here')], 0, 1), ([('location',)], 3, 4), ] output augmented sentence: [ Tolkien | person | born in = here ] was born [ here | location ] """ # sort entities by start position, longer entities first augmentations = list(sorted(augmentations, key=lambda z: (z[1], -z[2]))) # check that the entities have a tree structure (if two entities overlap, then one is contained in # the other), and build the entity tree root = -1 # each node is represented by its position in the list of augmentations, except that the root is -1 entity_tree = {root: []} # list of children of each node current_stack = [root] # where we are in the tree for j, x in enumerate(augmentations): tags, start, end = x if any(augmentations[k][1] < start < augmentations[k][2] < end for k in current_stack): # tree structure is not satisfied! logging.warning(f'Tree structure is not satisfied! Dropping annotation {x}') continue while current_stack[-1] >= 0 and \ not (augmentations[current_stack[-1]][1] <= start <= end <= augmentations[current_stack[-1]][2]): current_stack.pop() # add as a child of its father entity_tree[current_stack[-1]].append(j) # update stack current_stack.append(j) # create empty list of children for this new node entity_tree[j] = [] return ' '.join(expand_tokens( tokens, augmentations, entity_tree, root, begin_entity_token, sep_token, relation_sep_token, end_entity_token ))
5,347,689
def evaluate_model_recall_precision(mat, num_items, testRatings, K_recall, K_precision, num_thread): """ Evaluate the performance (Hit_Ratio, NDCG) of top-K recommendation Return: score of each test rating. """ global _mat global _testRatings global _K_recall global _K_precision global _K_max global _num_items _mat = mat _testRatings = testRatings _K_recall = K_recall _K_precision = K_precision _K_max = max(_K_precision,_K_recall) _num_items = num_items recalls, precisions = [], [] if (num_thread > 1): # Multi-thread pool = multiprocessing.Pool(processes=num_thread) res = pool.map(eval_recall_precision, range(len(_testRatings))) pool.close() pool.join() recalls = [r[0] for r in res] precisions = [r[1] for r in res] return (recalls, precisions) # Single thread for idx in range(len(_testRatings)): (recall, precision) = eval_recall_precision(idx) recalls.append(recall) precisions.append(precision) return (recalls, precisions)
5,347,690
def pipe_hoop_stress(P, D, t): """Calculate the hoop (circumferential) stress in a pipe using Barlow's formula. Refs: https://en.wikipedia.org/wiki/Barlow%27s_formula https://en.wikipedia.org/wiki/Cylinder_stress :param P: the internal pressure in the pipe. :type P: float :param D: the outer diameter of the pipe. :type D: float :param t: the pipe wall thickness. :type t: float :returns: the hoop stress in the pipe. :rtype: float """ return P * D / 2 / t
5,347,691
def node_constraints(node): """ Returns all constraints a node is linked to :param node: str :return: list(str) """ return maya.cmds.listRelatives(node, type='constraint')
5,347,692
def save_data_file(sourceFile, destination = None, subdirectory = None, user = None, verbose = True): """ Function used to save (i.e copy) a data file into a directory of choice after an experimental session Parameters: sourceFile - the path of the file that was generated by the experimental session and that resides in the local file system. destination - An optional destination path where to save the file. File name may be included or not at the end of the path. subdirectory - An optional subdirectory, i.e folder, to add to the destination path. For example, if the destination path is a folder called "experiments", the subdirectory can be a child folder of "experiments", named after the experiment type ("behaviour" for instance). user - An optional parameter to indicate which user is conducting the experiments. If supplied, and if no destination is passed, a configuration file is looked up to retrieve the folder into which the user is usually copying data files. If no destination and no user is provided, a default directory is looked up in the configuration file as the default destination of the file to be copied. Either way, a save as dialog box will appear and the user will have final say. """ # Validate file parameter passed. Also check to see if the path provided is lacking the default .h5 extension if not os.path.exists(sourceFile): if not os.path.exists(sourceFile+".h5"): # Error message if the source file path could not be found in the system error(None,"Woah there!\n\n1. Couldn't find the file that you want to copy.\ \n2. Check to see if it exists in the file system and the path provided is correct"\ , "File Finding Police report") return else: # File exists but has an extension and one was not provided in the path given. # Add it to file path descriptor sourceFile += ".h5" # information(None, "the filename of source provided lacked the \".h5\" extension.\ # \n\nA file with the extension was found and presumed to be the source meant"\ # ,"Path Police report") # Get file extension fileExtension = os.path.splitext(sourceFile)[-1] # Get the destination file name from the path provided destinationFile = os.path.split(sourceFile)[-1] destinationFolder = "" # If file has no extension, add the default .h5 extension to destination file name if fileExtension == "": warning(None, "The file you are trying to save has no extension\n\nAdding \".h5\" to the name of destination file"\ , ".h5 Extension Police") destinationFile = file + ".h5" # The file provided has different extension. Display a warning but do nothing. elif fileExtension != ".h5": warning(None, "Your file to be copied does not have an \".h5\" extension\n\nNo action taken."\ , "h5 Extension Police") # Display confirmation dialog for copying the file dlg = ConfirmationDialog(title = "You there!", yes_label = "Yes Please!", no_label = "Nah...", message = "Would you like to copy the data file generated after the session?\ \n\nIf you say Nah... and change your mind, you'll have to copy it manually later") # Open the dialog GUI dlg.open() # User provided a destination path if destination: # Check to see if destination is a file name with an extension. destinationExtension = os.path.splitext(destination)[-1] if destinationExtension: # Is it .h5? If not, warn but don't override. if destinationExtension != ".h5": warning(None, "Your destination filename does not have an \".h5\" extension\n\nNo action taken."\ , "h5 Extension Police") destinationFolder, destinationFile = os.path.split(destination) # Assume destination is directory since there is no extension. else: destinationFolder = destination # Look up a default destination from the config file since no <destination> parameter was provided. else: configFile = os.environ.get("Voyeur_config") config = ConfigObj(configFile) # A user specific folder was provided. if user: destinationFolder = config['server']['folder']['data']['user'] # Use default data folder as read from the config file. else: destinationFolder = config['server']['folder']['data']['default'] # User provided a subdirectory, i.e subfolder, into which to place the file. if subdirectory: # The subdirectory provided has common path with the directory provided. Display warning but do nothing. if os.path.commonprefix((destination,subdirectory)): warning(None, "Friendly warning!\n<subdirectory> parameter provided has a common path with the <destination>\ path parameter\n\n1. No action taken.\n2. Check your final destination path to make sure it is what you want"\ , "Path Police report") destinationFolder = os.path.join(destinationFolder,subdirectory) # Path of the destination of file to be copied. destinationPath = os.path.join(destinationFolder,destinationFile) if dlg.return_code == YES: # A file with same name exists. if os.path.isfile(destinationPath): warning(None, "A file with given path already exists!\n\n1. No action taken\ \n2. Make sure to either rename file or choose different folder", "Path Police report") # Provided folder does not exist. Make one and inform the user. elif not os.path.isdir(destinationFolder): information(None, "Making a new folder to put the file into...", "Information Transparency report") # TODO: What if this results in an exception? Catch and do something? # TODO: Keep track of made directories so we may delete them later os.makedirs(os.path.abspath(destinationFolder)) # The save as dialog box. # TODO: change wildcard to current extension wildcard dialog = FileDialog(action="save as", title = "Select directory into which the data file will be copied",\ wildcard = "*.*", default_directory = destinationFolder, default_filename = destinationFile) #*.h5||| elif dlg.return_code == NO and verbose: information(None, "No file was copied.\n\nIf you change your mind, you will have to transfer the data file manually."\ , "Information Transparency report") return dialog.open() # User clicked Save and successful input received. if dialog.return_code == OK: # The actual copying of the file. TODO: See if the copy2 function throws an exception copy2(sourceFile, dialog.path) # The user clicked Cancel. elif dialog.return_code == CANCEL: information(None, "No file was copied.\n\nIf you change your mind, you will have to transfer the data file manually."\ , "Information Transparency report") #TODO: update the Voyeur config file after asking user return dialog.path
5,347,693
def parse_file_name(filename): """ Parse the file name of a DUD mol2 file to get the target name and the y label :param filename: the filename string :return: protein target name, y_label string (ligand or decoy) """ bname = os.path.basename(filename) splitted_bname = bname.split('_') if len(splitted_bname) == 3: target_name = splitted_bname[0] y_label_str = splitted_bname[1] elif len(splitted_bname) == 4: target_name = '_'.join([splitted_bname[0], splitted_bname[1]]) y_label_str = splitted_bname[2] else: raise ValueError('File name has not expected format. Can not parse file name.') if y_label_str == 'decoys': y_label = 0 elif y_label_str == 'ligands': y_label = 1 else: raise ValueError('File name has not expected format. Can not parse file name.') return target_name, y_label
5,347,694
def load_opencv_stereo_calibration(path): """ Load stereo calibration information from xml file @type path: str @param path: video_path to xml file @return stereo calibration: loaded from the given xml file @rtype calib.data.StereoRig """ tree = etree.parse(path) stereo_calib_elem = tree.find("Rig") return rig.Rig.from_xml(stereo_calib_elem)
5,347,695
def _IsSingleElementTuple(token): """Check if it's a single-element tuple.""" close = token.matching_bracket token = token.next_token num_commas = 0 while token != close: if token.value == ',': num_commas += 1 if token.OpensScope(): token = token.matching_bracket else: token = token.next_token return num_commas == 1
5,347,696
def exportBufferView(gltf: GLTF2, primaryBufferIndex: int, byteOffset: int, byteLength: int) -> GLTFIndex: """Creates a glTF bufferView with the specified offset and length, referencing the default glB buffer. Args: gltf: Gltf object to append new buffer onto. primaryBufferIndex: Index of the primary glb buffer. byteOffset: Index of the starting byte in the referenced buffer. byteLength: Length in bytes of the bufferView. Returns: The index of the exported bufferView in the glTF bufferViews list. """ bufferView = BufferView() bufferView.buffer = primaryBufferIndex # index of the default glB buffer. bufferView.byteOffset = byteOffset bufferView.byteLength = byteLength return appendGetIndex(gltf.bufferViews, bufferView)
5,347,697
def ReadCan(filename): """Reads the candump in filename and returns the 4 fields.""" trigger = [] trigger_velocity = [] trigger_torque = [] trigger_current = [] wheel = [] wheel_velocity = [] wheel_torque = [] wheel_current = [] trigger_request_time = [0.0] trigger_request_current = [0.0] wheel_request_time = [0.0] wheel_request_current = [0.0] with open(filename, 'r') as fd: for line in fd: data = line.split() can_id = int(data[1], 16) if can_id == 0: data = [int(d, 16) for d in data[3:]] trigger.append(((data[0] + (data[1] << 8)) - 32768) / 32768.0) trigger_velocity.append( ((data[2] + (data[3] << 8)) - 32768) / 32768.0) trigger_torque.append( ((data[4] + (data[5] << 8)) - 32768) / 32768.0) trigger_current.append( ((data[6] + ((data[7] & 0x3f) << 8)) - 8192) / 8192.0) elif can_id == 1: data = [int(d, 16) for d in data[3:]] wheel.append(((data[0] + (data[1] << 8)) - 32768) / 32768.0) wheel_velocity.append( ((data[2] + (data[3] << 8)) - 32768) / 32768.0) wheel_torque.append( ((data[4] + (data[5] << 8)) - 32768) / 32768.0) wheel_current.append( ((data[6] + ((data[7] & 0x3f) << 8)) - 8192) / 8192.0) elif can_id == 2: data = [int(d, 16) for d in data[3:]] trigger_request_current.append( ((data[4] + (data[5] << 8)) - 32768) / 32768.0) trigger_request_time.append(len(trigger) * 0.001) elif can_id == 3: data = [int(d, 16) for d in data[3:]] wheel_request_current.append( ((data[4] + (data[5] << 8)) - 32768) / 32768.0) wheel_request_time.append(len(wheel) * 0.001) trigger_data_time = numpy.arange(0, len(trigger)) * 0.001 wheel_data_time = numpy.arange(0, len(wheel)) * 0.001 # Extend out the data in the interpolation table. trigger_request_time.append(trigger_data_time[-1]) trigger_request_current.append(trigger_request_current[-1]) wheel_request_time.append(wheel_data_time[-1]) wheel_request_current.append(wheel_request_current[-1]) return (trigger_data_time, wheel_data_time, trigger, wheel, trigger_velocity, wheel_velocity, trigger_torque, wheel_torque, trigger_current, wheel_current, trigger_request_time, trigger_request_current, wheel_request_time, wheel_request_current)
5,347,698
def retrieve(func): """ Decorator for Zotero read API methods; calls _retrieve_data() and passes the result to the correct processor, based on a lookup """ def wrapped_f(self, *args, **kwargs): """ Returns result of _retrieve_data() func's return value is part of a URI, and it's this which is intercepted and passed to _retrieve_data: '/users/123/items?key=abc123' the atom doc returned by _retrieve_data is then passed to _etags in order to extract the etag attributes from each entry, then to feedparser, then to the correct processor """ if kwargs: self.add_parameters(**kwargs) retrieved = self._retrieve_data(func(self, *args)) # determine content and format, based on url params content = self.content.search( self.request.get_full_url()) and \ self.content.search( self.request.get_full_url()).group(0) or 'bib' fmt = self.fmt.search( self.request.get_full_url()) and \ self.fmt.search( self.request.get_full_url()).group(0) or 'atom' # step 1: process atom if it's atom-formatted if fmt == 'atom': parsed = feedparser.parse(retrieved) processor = self.processors.get(content) # step 2: if the content is JSON, extract its etags if processor == self._json_processor: self.etags = etags(retrieved) # extract next, previous, first, last links self.links = self._extract_links(parsed) return processor(parsed) # otherwise, just return the unparsed content as is else: return retrieved return wrapped_f
5,347,699