content
stringlengths
22
815k
id
int64
0
4.91M
def guess_initializer(var, graph=None): """Helper function to guess the initializer of a variable. The function looks at the operations in the initializer name space for the variable (e.g. my_scope/my_var_name/Initializer/*). The TF core initializers have characteristic sets of operations that can be used to determine the initializer. Args: var: `tf.Variable`. The function will use the name to look for initializer operations in the same scope. graph: Optional `tf.Graph` that contains the variable. If None the default graph is used. Returns: Tuple of the name of the guessed initializer. """ if graph is None: graph = tf.get_default_graph() prefix = var.op.name + "/Initializer" ops = [op for op in graph.get_operations() if op.name.startswith(prefix)] assert ops, "No operations found for prefix {}".format(prefix) op_names = [op.name[len(prefix) + 1:] for op in ops] if len(op_names) == 1: if op_names[0] == "Const": value = ops[0].get_attr("value").float_val[0] if value == 0.0: return "zeros" if np.isclose(value, 1.0): return "ones" return "constant" return op_names[0] # ones or zeros if "Qr" in op_names and "DiagPart" in op_names: return "orthogonal" if "random_uniform" in op_names: return "glorot_uniform" stddev_ops = [op for op in ops if op.name.endswith("stddev")] if stddev_ops: assert len(stddev_ops) == 1 stddev = stddev_ops[0].get_attr("value").float_val[0] else: stddev = None if "random_normal" in op_names: return "random_normal" if "truncated_normal" in op_names: if len(str(stddev)) > 5: return "glorot_normal" return "truncated_normal"
5,347,000
def init_colors(): """Initialize the color definition for all pair identifiers""" # This allows the use of `-1` in `init_pair()` for accessing the # default foreground and background terminal colors. It also enables # transparency. curses.use_default_colors() # Default colors from terminal preferences set_color(COLOR_PAIR_DEFAULT, -1) # Locked data. Red on default background set_color(COLOR_PAIR_LOCKED, curses.COLOR_RED) # Flagged data set_color(COLOR_PAIR_FLAGGED, curses.COLOR_CYAN) # Until a theme is set up, just use defaults from the shell # for the header colors set_color(COLOR_PAIR_HEADER, -1) set_color(COLOR_PAIR_HEADER_ALT, -1)
5,347,001
def get_all(ptype=vendor): """ returns a dict of all partners """ if ptype == vendor: d = get_dict_from_json_file( VENDORS_JSON_FILE ) # will create file if not exist if ptype == customer: d = get_dict_from_json_file( CUSTOMERS_JSON_FILE ) return d
5,347,002
def validate_read_parameters(file_path, output_path, encryption_key, scrypt_n, scrypt_r, scrypt_p, block_height_override, block_width_override, max_cpu_cores, save_statistics, bad_frame_strikes, stop_at_metadata_load, auto_unpackage_stream, auto_delete_finished_stream): """This function verifies the arguments going into read() to ensure they comform with the required format for processing. """ logging.debug("Validating read parameters...") constants = session.query(Constants).first() valid_video_formats = constants.return_valid_video_formats() valid_image_formats = constants.return_valid_image_formats() if isinstance(file_path, str): # Single video or image file to decode path = Path(file_path) if not path.is_dir(): input_type = _file_path_validate(file_path, 'all', valid_video_formats, valid_image_formats) else: input_type = 'image' elif isinstance(file_path, list): # Multiple images for path in file_path: input_type = _file_path_validate(path, 'image', valid_video_formats, valid_image_formats) else: raise ValueError('file_path can only accept strings for single video file or a directory (with images inside), ' 'or list of string for image frames.') if output_path: is_valid_directory('file_to_input', output_path) proper_string_syntax('encryption_key', encryption_key) is_int_over_zero('bad_frame_strikes', bad_frame_strikes) is_int_over_zero('scrypt_n', scrypt_n) is_int_over_zero('scrypt_r', scrypt_r) is_int_over_zero('scrypt_p', scrypt_p) is_int_over_zero('block_height_override', block_height_override) is_int_over_zero('block_width_override', block_width_override) if not isinstance(max_cpu_cores, int) or max_cpu_cores < 0: raise ValueError('max_cpu_cores must be an integer greater than or equal to 0.') is_bool('save_statistics', save_statistics) is_bool('stop_at_metadata_load', stop_at_metadata_load) is_bool('auto_unpackage_stream', auto_unpackage_stream) is_bool('auto_delete_finished_stream', auto_delete_finished_stream) logging.debug("Read parameters validated.") return input_type
5,347,003
def test_RNNCell(): """ Test the RNNCell module to ensure that it produces the exact same output as the primary torch implementation, in the same order. """ # Disable mkldnn to avoid rounding errors due to difference in implementation mkldnn_enabled_init = torch._C._get_mkldnn_enabled() torch._C._set_mkldnn_enabled(False) batch_size = 5 input_size = 10 hidden_size = 50 test_input = torch.rand(batch_size, input_size) test_hidden = torch.rand(batch_size, hidden_size) # RNNCell implemented in pysyft rnn_syft = syft_nn.RNNCell(input_size, hidden_size, True, "tanh") # RNNCell implemented in original pytorch rnn_torch = nn.RNNCell(input_size, hidden_size, True, "tanh") # Make sure the weights of both RNNCell are identical rnn_syft.fc_xh.weight = rnn_torch.weight_ih rnn_syft.fc_hh.weight = rnn_torch.weight_hh rnn_syft.fc_xh.bias = rnn_torch.bias_ih rnn_syft.fc_hh.bias = rnn_torch.bias_hh output_syft = rnn_syft(test_input, test_hidden) output_torch = rnn_torch(test_input, test_hidden) assert torch.allclose(output_syft, output_torch, atol=1e-2) # Reset mkldnn to the original state torch._C._set_mkldnn_enabled(mkldnn_enabled_init)
5,347,004
def tsplot_data(cfg, mmodel, region, observations='PHC'): """Extract data for TS plots from one specific model. Parameters ---------- mmodel: str model name max_level: int maximum level (depth) of TS data to be used region: str region as defined in `hofm_regions` observations: str name of the observations Returns ------- None """ logger.info("Extract TS data for %s, region %s", mmodel, region) # generate input names for T and S. The files are generated by the # `timmean` function. ifilename_t = genfilename(cfg['work_dir'], 'thetao', mmodel, data_type='timmean', extension='.nc') ifilename_s = genfilename(cfg['work_dir'], 'so', mmodel, data_type='timmean', extension='.nc') # get the metadata for T and S metadata_t = load_meta(datapath=ifilename_t, fxpath=None) metadata_s = load_meta(datapath=ifilename_s, fxpath=None) # find index of the max_level lev_limit = metadata_t['lev'][ metadata_t['lev'] <= cfg['tsdiag_depth']].shape[0] + 1 # find indexes of data that are in the region indexes = hofm_regions(region, metadata_t['lon2d'], metadata_t['lat2d']) temp = np.array([]) salt = np.array([]) depth_model = np.array([]) # loop over depths for ind, depth in enumerate(metadata_t['lev'][0:lev_limit]): level_pp, level_pp_s = tsplot_extract_data(mmodel, observations, metadata_t, metadata_s, ind) # select individual points for T, S and depth temp = np.hstack((temp, level_pp[indexes[0], indexes[1]].compressed())) salt = np.hstack( (salt, level_pp_s[indexes[0], indexes[1]].compressed())) depth_temp = np.zeros_like( level_pp[indexes[0], indexes[1]].compressed()) depth_temp[:] = depth depth_model = np.hstack((depth_model, depth_temp)) # Saves the data to individual files data_info = {} data_info['basedir'] = cfg['work_dir'] data_info['mmodel'] = mmodel data_info['region'] = region data_info['levels'] = metadata_t['lev'] data_info['ori_file'] = [ifilename_t, ifilename_s] data_info['areacello'] = None tsplot_save_data(cfg, data_info, temp, salt, depth_model) metadata_t['datafile'].close() metadata_s['datafile'].close()
5,347,005
def FindCatkinResource(package, relative_path): """ Find a Catkin resource in the share directory or the package source directory. Raises IOError if resource is not found. @param relative_path Path relative to share or package source directory @param package The package to search in @return Absolute path to resource """ from catkin.find_in_workspaces import find_in_workspaces paths = find_in_workspaces(project=package, search_dirs=['share'], path=relative_path, first_match_only=True) if paths and len(paths) == 1: return paths[0] else: raise IOError('Loading resource "{:s}" failed.'.format( relative_path))
5,347,006
def plot_xfs_feature_frequency( freq, figsize=None, freq_pct=True, color=None, marker=None, markersize=None, markeredgecolor=None, markerfacecolor=None, markeredgewidth=None, fontsize=None, save_path=None, ): """Function to plot selected features frequency. This function is a helper function based on the features_frequency attribute of the XGBoostFeatureSelector class. Parameters ---------- freq: pandas.DataFrame Feature frequency figsize: tuple, optional, (default=(8, 4)) Figure size freq_pct: bool, optional, (default=True) Flag to show the features frequency in percent color: str, optional, (default="#87CEEB") Color of the vertical lines of lollipops marker: str, optional, (default="o") Marker style of the lollipops. Complete valid marker style can be found at: (https://matplotlib.org/2.1.1/api/markers_api.html#module-matplotlib.markers) markersize: int or float, optional, (default=10) Markersize markeredgecolor: str, optional, (default="#1F77B4") Marker edge color markerfacecolor: str, optional, (default="#1F77B4") Marker face color markeredgewidth: int or float, optional, (default=1) Marker edge width fontsize: int or float, optional, (default=12) Fontsize for xlabel and ylabel, and ticks parameters save_path: str, optional (default=None) The full or relative path to save the plot including the image format. For example "myplot.png" or "../../myplot.pdf" Returns None """ # initializing figsize if figsize is None: figsize = (8, 4) elif isinstance(figsize, list) or isinstance(figsize, tuple): figsize = figsize else: raise TypeError("Only tuple and list types are allowed for figsize.") # # initializing column to plot if freq_pct: col = "Frequency (%)" else: col = "Frequency" # initializing color if color is None: color = "#87CEEB" elif isinstance(color, str): color = color else: raise TypeError("Only str type is allowed for color.") # initializing marker if marker is None: marker = "o" elif isinstance(marker, str): marker = marker else: raise TypeError("Only str type is allowed for marker.") # initializing markersize if markersize is None: markersize = 10 elif isinstance(markersize, float) or isinstance(markersize, int): markersize = markersize else: raise TypeError("Only int and float types are allowed for markersize.") # initializing markeredgecolor if markeredgecolor is None: markeredgecolor = "#1F77B4" elif isinstance(markeredgecolor, str): markeredgecolor = markeredgecolor else: raise TypeError("Only str type is allowed for markeredgecolor.") # initializing markerfacecolor if markerfacecolor is None: markerfacecolor = "#1F77B4" elif isinstance(markerfacecolor, str): markerfacecolor = markerfacecolor else: raise TypeError("Only str type is allowed for markerfacecolor.") # initializing markeredgewidth if markeredgewidth is None: markeredgewidth = 1 elif isinstance(markeredgewidth, int) or isinstance(markeredgewidth, float): markeredgecolor = markeredgecolor else: raise TypeError("Only int and float types are allowed for markeredgewidth.") # initializing fontsize if fontsize is None: fontsize = 12 elif isinstance(fontsize, float) or isinstance(fontsize, int): fontsize = fontsize else: raise TypeError("Only int and float types are allowed for fontsize.") # reindex freq freq = freq.reindex(index=[idx for idx in range(len(freq) - 1, -1, -1)]) fig, ax = plt.subplots(figsize=figsize) ax.hlines(y=freq["Feature"], xmin=0, xmax=freq[col], color=color) ax.plot( freq[col], freq["Feature"].values, marker, markersize=markersize, markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor, markeredgewidth=markeredgewidth, ) ax.set_xlabel(f"{col}", fontsize=fontsize) ax.set_ylabel("Feature", fontsize=fontsize) ax.set_title("Important Features Frequency", fontsize=fontsize) ax.tick_params(axis="both", which="major", labelsize=fontsize) if save_path: plt.savefig(save_path, bbox_inches="tight", dpi=200) plt.show()
5,347,007
def copy_all( network_filename="socialNetwork.json", source_image_json="sourceImageFilenames.json", entity_image_json="entityImageFilenames.json", images_filename="images.json" ): """ Copy all exported files to their correct paths within the Javascript src directory Args: network_filename (str, defult="socialNetwork.json") source_image_json (str, defult="sourceImageFilenames.json") entity_image_json (str, defult="entityImageFilenames.json") Returns: None """ network_filename_path = Path("../src") / network_filename images_filename_path = Path("../src") / images_filename source_image_path = Path("../src/data") / source_image_json entity_image_path = Path("../src/data") / entity_image_json copy_with_confirmation(network_filename, network_filename_path) copy_with_confirmation(images_filename, images_filename_path) copy_with_confirmation(source_image_json, source_image_path) copy_with_confirmation(entity_image_json, entity_image_path)
5,347,008
def clear_errors() -> None: """ """
5,347,009
def send_udf_call( api_func: Callable[..., urllib3.HTTPResponse], api_kwargs: Dict[str, Any], decoder: decoders.AbstractDecoder, id_callback: Optional[IDCallback] = None, *, results_stored: bool, ) -> "results.RemoteResult[_T]": """Synchronously sends a request to the given API. This handles the boilerplate parts (exception handling, parsing, response construction) of calling one of the generated API functions for UDFs. It runs synchronously and will return a :class:`results.RemoteResult`. To run the same function asychronously, use :meth:`Client.wrap_async_base_call` around the function that calls this (by convention, the ``whatever_api_base`` functions). This should only be used by callers *inside* this package. :param api_func: The UDF API function that we want to call from here. For instance, this might be :meth:`rest_api.SqlApi.run_sql`. :param api_kwargs: The arguments to pass to the API function as a dict. This should only include the parameters you want to send to the server, *not* any of the “meta” parameters that are mixed in with them (e.g. ``_preload_content``; this function will correctly set up the request). :param decoder: The Decoder to use to decode the response. :param id_callback: When the request completes (either by success or failure), this will be called with the UUID from the HTTP response, or None if the UUID could not be parsed. :param results_stored: A boolean indicating whether the results were stored. This does *not affect* the request; the ``store_results`` parameter of whatever API message the call uses must be set, and this must match that value. :return: A response containing the parsed result and metadata about it. """ try: http_response = api_func(_preload_content=False, **api_kwargs) except rest_api.ApiException as exc: if id_callback: id_callback(results.extract_task_id(exc)) raise tiledb_cloud_error.check_exc(exc) from None task_id = results.extract_task_id(http_response) if id_callback: id_callback(task_id) return results.RemoteResult( body=http_response.data, decoder=decoder, task_id=task_id, results_stored=results_stored, )
5,347,010
def migration_area_baidu(area="乌鲁木齐市", indicator="move_in", date="20200201"): """ 百度地图慧眼-百度迁徙-XXX迁入地详情 百度地图慧眼-百度迁徙-XXX迁出地详情 以上展示 top100 结果,如不够 100 则展示全部 迁入来源地比例: 从 xx 地迁入到当前区域的人数与当前区域迁入总人口的比值 迁出目的地比例: 从当前区域迁出到 xx 的人口与从当前区域迁出总人口的比值 https://qianxi.baidu.com/?from=shoubai#city=0 :param area: 可以输入 省份 或者 具体城市 但是需要用全称 :type area: str :param indicator: move_in 迁入 move_out 迁出 :type indicator: str :param date: 查询的日期 20200101以后的时间 :type date: str :return: 迁入地详情/迁出地详情的前50个 :rtype: pandas.DataFrame """ city_dict.update(province_dict) inner_dict = dict(zip(city_dict.values(), city_dict.keys())) if inner_dict[area] in province_dict.keys(): dt_flag = "province" else: dt_flag = "city" url = "https://huiyan.baidu.com/migration/cityrank.jsonp" params = { "dt": dt_flag, "id": inner_dict[area], "type": indicator, "date": date, } res = requests.get(url, params=params) json_data = json.loads(res.text[res.text.find("({") + 1:res.text.rfind(");")]) return pd.DataFrame(json_data["data"]["list"])
5,347,011
def ranges(locdata: LocData, loc_properties=None, special=None, epsilon=1): """ Provide data ranges for locdata.data property. If LocData is empty None is returned. If LocData carries a single value, the range will be (value, value + `epsilon`). Parameters ---------- locdata : LocData Localization data. loc_properties : str, tuple[str], list[str], True, None. Localization properties for which the range is determined. If None the ranges for all spatial coordinates are returned. If True the ranges for all locdata.data properties are returned. special : None, str If None (min, max) ranges are determined from data and returned; if 'zero' (0, max) ranges with max determined from data are returned. if 'link' (min_all, max_all) ranges with min and max determined from all combined data are returned. epsilon : float number to specify the range for single values in locdata. Returns ------- numpy.ndarray of float with shape (dimension, 2), None The data range (min, max) for each localization property. """ if locdata.data.empty: return None elif len(locdata) == 1: pass if loc_properties is None: ranges_ = locdata.bounding_box.hull.T.copy() elif loc_properties is True: ranges_ = np.array([locdata.data.min(), locdata.data.max()]).T elif isinstance(loc_properties, str): ranges_ = np.array( [[locdata.data[loc_properties].min(), locdata.data[loc_properties].max()]] ) else: loc_properties = list(loc_properties) ranges_ = np.array( [locdata.data[loc_properties].min(), locdata.data[loc_properties].max()] ).T if len(locdata) == 1: if ranges_.size == 0: ranges_ = np.concatenate( [locdata.coordinates, locdata.coordinates + epsilon], axis=0 ).T else: ranges_ = ranges_ + [0, epsilon] if special is None: pass elif special == "zero": ranges_[:, 0] = 0 elif special == "link": minmax = np.array([ranges_[:, 0].min(axis=0), ranges_[:, 1].max(axis=0)]) ranges_ = np.repeat(minmax[None, :], len(ranges_), axis=0) else: raise ValueError(f"The parameter special={special} is not defined.") return ranges_
5,347,012
def subnet_create(request, network_id, **kwargs): """Create a subnet on a specified network. :param request: request context :param network_id: network id a subnet is created on :param cidr: (optional) subnet IP address range :param ip_version: (optional) IP version (4 or 6) :param gateway_ip: (optional) IP address of gateway :param tenant_id: (optional) tenant id of the subnet created :param name: (optional) name of the subnet created :param subnetpool_id: (optional) subnetpool to allocate prefix from :param prefixlen: (optional) length of prefix to allocate :returns: Subnet object Although both cidr+ip_version and subnetpool_id+preifxlen is listed as optional you MUST pass along one of the combinations to get a successful result. """ LOG.debug("subnet_create(): netid=%(network_id)s, kwargs=%(kwargs)s", {'network_id': network_id, 'kwargs': kwargs}) body = {'subnet': {'network_id': network_id}} if 'tenant_id' not in kwargs: kwargs['tenant_id'] = request.user.project_id body['subnet'].update(kwargs) subnet = neutronclient(request).create_subnet(body=body).get('subnet') return Subnet(subnet)
5,347,013
def evaluate_available(item, type_name, predicate): """ Run the check_available predicate and cache the result. If there is already a cached result, use that and don't run the predicate command. :param str item: name of the item to check the type for. i.e. 'server_types :param str type_name: name of the type. i.e. 'headless' :param str predicate: the check_available command :return bool type_available: whether or not the type is available """ global cached_available if (item, type_name) not in cached_available: exit_code, _, _ = run_command_print_ready( shell=True, command=predicate ) cached_available[(item, type_name)] = exit_code == 0 return cached_available[(item, type_name)]
5,347,014
def dB_transform(R, metadata=None, threshold=None, zerovalue=None, inverse=False): """Methods to transform precipitation intensities to/from dB units. Parameters ---------- R: array-like Array of any shape to be (back-)transformed. metadata: dict, optional Metadata dictionary containing the transform, zerovalue and threshold attributes as described in the documentation of :py:mod:`pysteps.io.importers`. threshold: float, optional Optional value that is used for thresholding with the same units as R. If None, the threshold contained in metadata is used. If no threshold is found in the metadata, a value of 0.1 is used as default. zerovalue: float, optional The value to be assigned to no rain pixels as defined by the threshold. It is equal to the threshold - 1 by default. inverse: bool, optional If set to True, it performs the inverse transform. False by default. Returns ------- R: array-like Array of any shape containing the (back-)transformed units. metadata: dict The metadata with updated attributes. """ R = R.copy() if metadata is None: if inverse: metadata = {"transform": "dB"} else: metadata = {"transform": None} else: metadata = metadata.copy() # to dB units if not inverse: if metadata["transform"] == "dB": return R, metadata if threshold is None: threshold = metadata.get("threshold", 0.1) zeros = R < threshold # Convert to dB R[~zeros] = 10.0 * np.log10(R[~zeros]) threshold = 10.0 * np.log10(threshold) # Set value for zeros if zerovalue is None: zerovalue = threshold - 5 # TODO: set to a more meaningful value R[zeros] = zerovalue metadata["transform"] = "dB" metadata["zerovalue"] = zerovalue metadata["threshold"] = threshold return R, metadata # from dB units elif inverse: if metadata["transform"] != "dB": return R, metadata if threshold is None: threshold = metadata.get("threshold", -10.0) if zerovalue is None: zerovalue = 0.0 R = 10.0 ** (R / 10.0) threshold = 10.0 ** (threshold / 10.0) R[R < threshold] = zerovalue metadata["transform"] = None metadata["threshold"] = threshold metadata["zerovalue"] = zerovalue return R, metadata
5,347,015
def setup() -> None: """ Sets up the Partial FC backbone such that the model may be fetched using `get()`. Primarily, the setup involves the download of pretrained weights from `PARTIAL_FC_BACKBONE_PRETRAIN_URL`. """ # Download pretrained model _file_jar.store_file( PARTIAL_FC_BACKBONE_PRETRAIN_FILE_NAME, lambda p: gdown.download(PARTIAL_FC_BACKBONE_PRETRAIN_URL, str(p), quiet=False), )
5,347,016
def set_geocentric_zero_from_oxy_ha(target_data, geo_results = None, velocity_column = None, data_column = None, variance_column = None, new_velocity_column = None, subtract = True, use_quadratic_fit = False, **kwargs): """ shifts GEO Velocity to be set by oxygen line Parameters ---------- target_row: `SkySurvey` row Row to match spectra to geo_results: `list` of `lmfit.model.ModelResult resutls of fits if None, will do fitting data_column: 'str', optional, must be keyword Name of data column, default of "DATA" velocity_column: 'str', optional, must be keyword Name of velocity column, default of "VELOCITY_GEO" variance_volumn: 'str', optional, must be keyword Name of velocity column, default of "VARIANCE" new_velocity_column: 'str', optional, must be keyword Name of new Velocity column to set geo velocity to default of "VELOCITY_GEO" subtract: `bool`, optional, must be keyword if True, subtracts bright fit from data use_quadratic_fit: `bool`, optional, must be keyword if True, uses quadratic fit method to find peak and sets velocity based on peak Warning: Cannot subtract a fit using this method, but useful to first do this before fitting for peak **kwargs: dict keywords passed to Model.fit() """ if velocity_column is None: velocity_column = "VELOCITY_GEO" if data_column is None: data_column = "DATA" if variance_column is None: variance_column = "VARIANCE" if new_velocity_column is None: new_velocity_column = "VELOCITY_GEO" # Create new velocity column if needed if new_velocity_column not in target_data.keys(): target_data[new_velocity_column] = target_data[velocity_column][:] if "allow_jitter" not in target_data.keys(): target_data["allow_jitter"] = np.ones(len(target_data), dtype = bool) if use_quadratic_fit: quad_fit = target_data.get_quadratic_centroid(data_column = data_column, velocity_column = velocity_column, variance_column = variance_column, window_len = 5) for ell, row in enumerate(target_data): offset = quad_fit[ell,0] - 272.44 if (np.abs(offset) < 12) & (np.abs(quad_fit[ell,0] - np.nanmax(row[velocity_column])) > 5): row[new_velocity_column] = row[velocity_column] - offset row["allow_jitter"] = False else: row[new_velocity_column][:] = row[velocity_column][:] else: if geo_results is None: geo_results = fit_all_oxy_ha(target_data, velocity_column = velocity_column, data_column = data_column, **kwargs) for ell, (res, row) in enumerate(zip(geo_results, target_data)): if not hasattr(res, "assert_any_call"): if res.message.rsplit(".")[0] == 'Fit succeeded': offset = res.params["mean"].value - (272.44) if (np.abs(offset) < 12) & (res.params["amp"].value > 1.1): row[new_velocity_column] = row[velocity_column] - offset row["allow_jitter"] = False if subtract: comp = res.best_fit - res.params["baseline"].value # bright = # bright_g = Gaussian1D(res.params["amp"a].value, # res.params["mean"].value, # res.params["std"].value) row[data_column] -= comp else: row[new_velocity_column][:] = np.nan
5,347,017
def cut( video: typing.Union[str, VideoObject], output_path: str = None, threshold: float = 0.95, frame_count: int = 5, compress_rate: float = 0.2, target_size: typing.Tuple[int, int] = None, offset: int = 3, limit: int = None, ) -> typing.Tuple[VideoCutResult, str]: """ cut the video, and get series of pictures (with tag) :param video: video path or object :param output_path: output path (dir) :param threshold: float, 0-1, default to 0.95. decided whether a range is stable. larger => more unstable ranges :param frame_count: default to 5, and finally you will get 5 frames for each range :param compress_rate: before_pic * compress_rate = after_pic. default to 0.2 :param target_size: (100, 200) :param offset: it will change the way to decided whether two ranges can be merged before: first_range.end == second_range.start after: first_range.end + offset >= secord_range.start :param limit: ignore some ranges which are too short, 5 means ignore stable ranges which length < 5 :return: tuple, (VideoCutResult, data_home) """ if isinstance(video, str): video = VideoObject(video) cutter = VideoCutter() res = cutter.cut(video, compress_rate=compress_rate, target_size=target_size) stable, unstable = res.get_range(threshold=threshold, limit=limit, offset=offset) data_home = res.pick_and_save(stable, frame_count, to_dir=output_path) res_json_path = os.path.join( output_path or data_home, constants.CUT_RESULT_FILE_NAME ) res.dump(res_json_path) return res, data_home
5,347,018
def re_evaluate_did(scope, name, rule_evaluation_action, session=None): """ Re-Evaluates a did. :param scope: The scope of the did to be re-evaluated. :param name: The name of the did to be re-evaluated. :param rule_evaluation_action: The Rule evaluation action. :param session: The database session in use. :raises: DataIdentifierNotFound """ try: did = session.query(models.DataIdentifier).filter(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name).one() except NoResultFound: raise DataIdentifierNotFound() if rule_evaluation_action == DIDReEvaluation.ATTACH: __evaluate_did_attach(did, session=session) else: __evaluate_did_detach(did, session=session) # Update size and length of did if session.bind.dialect.name == 'oracle': stmt = session.query(func.sum(models.DataIdentifierAssociation.bytes), func.count(1)).\ with_hint(models.DataIdentifierAssociation, "index(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) for bytes_, length in stmt: did.bytes = bytes_ did.length = length # Add an updated_col_rep if did.did_type == DIDType.DATASET: models.UpdatedCollectionReplica(scope=scope, name=name, did_type=did.did_type).save(session=session)
5,347,019
def parse_args(): """ Parses command line arguments """ parser = ArgumentParser(description="A multi-threaded gemini server") parser.add_argument("-b", "--host", default=DEFAULT_HOST, help="Host to bind to") parser.add_argument("-p", "--port", default=DEFAULT_PORT, help="Port to bind to") parser.add_argument( "-c", "--cert", default=DEFAULT_CERTFILE, help="SSL certificate in PEM format" ) parser.add_argument( "-k", "--key", default=DEFAULT_KEYFILE, help="SSL private key in PEM format" ) parser.add_argument( "-w", "--webroot", default=DEFAULT_WEBROOT, help="Webroot directory" ) parser.add_argument( "-q", "--queue", default=DEFAULT_QSIZE, help="Size of request queue" ) parser.add_argument( "-t", "--threads", default=DEFAULT_THREADS, help="Number of threads" ) parser.add_argument( "-u", "--uid", default=0, type=int, help="uid to use after loading SSL certificate", ) parser.add_argument( "-g", "--gid", default=0, type=int, help="gid to use after loading SSL certificate", ) return parser.parse_args()
5,347,020
def insert_deleted_entries(deleted_entries, data_type): """ Inserts every entry in deleted_entries dict into inbound_queue table. Args: deleted_entries: An array containing the remote_ids/distinguished names of the users/groups that were deleted. data_type: A string with the value of either user_deleted or group_deleted. This value will be used in the data_type field when we insert our data into the inbound_queue. Raises: ValueError: If parameter data_type does not have the value of "user_deleted" or "group_delete". """ if data_type not in ["user_deleted", "group_deleted"]: raise ValueError( "For deletions, data_type field must be either " "user_deleted or group_deleted. Found {}".format(data_type) ) conn = connect_to_db() for remote_id in deleted_entries: data = {"remote_id": remote_id} inbound_entry = { "data": data, "data_type": data_type, "sync_type": "delta", "timestamp": datetime.now().replace(tzinfo=timezone.utc).isoformat(), "provider_id": LDAP_DC, } r.table("inbound_queue").insert(inbound_entry).run(conn) conn.close()
5,347,021
def get_mapping_rules(): """ Get mappings rules as defined in business_object.js Special cases: Aduit has direct mapping to Program with program_id Request has a direct mapping to Audit with audit_id Response has a direct mapping to Request with request_id DocumentationResponse has a direct mapping to Request with request_id DocumentationResponse has normal mappings with all other objects in maping modal Section has a direct mapping to Standard/Regulation/Poicy with directive_id Anything can be mapped to a request, frotent show audit insted """ def filter(object_list): """ remove all lower case items since real object are CamelCase """ return set([item for item in object_list if item != item.lower()]) # these rules are copy pasted from # src/ggrc/assets/javascripts/apps/business_objects.js line: 276 business_object_rules = { "Program": "Issue ControlAssessment Regulation Contract Policy Standard Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Audit Request", # noqa # removed Person because Programs have a "Mapped" attribute for people mappings "Audit": "Issue ControlAssessment Request history Person program program_controls Request", # noqa "Issue": "ControlAssessment Control Audit Program Regulation Contract Policy Standard Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Issue Request", # noqa "ControlAssessment": "Issue Objective Program Regulation Contract Policy Standard Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Regulation": "Program Issue ControlAssessment Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa "Policy": "Program Issue ControlAssessment Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa "Standard": "Program Issue ControlAssessment Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa "Contract": "Program Issue ControlAssessment Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Section Request", # noqa "Clause": "Contract Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Section Policy Regulation Standard Request", # noqa "Section": "Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Policy Regulation Standard Contract Clause Request", # noqa "Objective" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa "Control" : "Issue ControlAssessment Request Program Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Person" : "Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Audit Request", # noqa "OrgGroup" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Vendor" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "System" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Process" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "DataAsset" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "AccessGroup" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Product" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Project" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Facility" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Market" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request" # noqa } split_rules = {k: v.split() for k, v in business_object_rules.items()} filtered_rules = {k: filter(v) for k, v in split_rules.items()} return filtered_rules
5,347,022
def get_src_hash(sls_config, path): """Get hash(es) of serverless source.""" funcs = sls_config['functions'] if sls_config.get('package', {}).get('individually'): hashes = {key: get_hash_of_files(os.path.join(path, os.path.dirname(funcs[key].get('handler')))) for key in funcs.keys()} else: directories = [] for (key, value) in funcs.items(): func_path = {'path': os.path.dirname(value.get('handler'))} if func_path not in directories: directories.append(func_path) hashes = {sls_config['service']: get_hash_of_files(path, directories)} return hashes
5,347,023
def gsma_not_found(ctx, config, statsd, logger, run_id, conn, metadata_conn, command, metrics_root, metrics_run_root, force_refresh, disable_retention_check, disable_data_check, debug_query_performance, month, year, output_dir): """Generate report of all GSMA not found IMEIs.""" _reports_validation_checks(disable_retention_check, year, month, logger, config, conn, disable_data_check) metadata.add_optional_job_metadata(metadata_conn, command, run_id, refreshed_data=force_refresh, month=month, year=year, report_schema_version=report_schema_version, output_dir=os.path.abspath(str(output_dir))) report_dir = _make_report_directory(ctx, output_dir, run_id, conn, config, year=year, month=month) report_metadata = [] with utils.CodeProfiler() as cp: logger.info('Generating country GSMA not found report...') country_name = config.region_config.name report_metadata.extend(_write_country_gsma_not_found_report(conn, config, month, year, country_name, report_dir)) statsd.gauge('{0}runtime.per_report.gsma_not_found'.format(metrics_run_root), cp.duration) # Store metadata about the report data ID and classification run ID metadata.add_optional_job_metadata(metadata_conn, command, run_id, report_outputs=report_metadata)
5,347,024
def extract_zip(zip_path, ret_extracted_path=False): """Extract a zip and delete the .zip file.""" dir_parents = os.path.dirname(zip_path) dir_name = Path(zip_path).stem extracted_path = os.path.join(dir_parents, dir_name, '') if ret_extracted_path: return extracted_path with zipfile.ZipFile(zip_path, 'r') as zip_ref: zip_ref.extractall(dir_parents) os.remove(zip_path) print(f"Extracted '{Path(zip_path).name}' to '{extracted_path}'.")
5,347,025
async def get_pipeline(request: web.Request, organization, pipeline) -> web.Response: """get_pipeline Retrieve pipeline details for an organization :param organization: Name of the organization :type organization: str :param pipeline: Name of the pipeline :type pipeline: str """ return web.Response(status=200)
5,347,026
def chunk_queue(dir_in="../audio/chunk_queue", dir_out="../audio/wav_chunked", chunk_len=5, sr=22050, log=True ): """ Feeds each song in queue directory to the chunk_song() function. --- IN dir_in: path of audio queue directory, absolute or relative (str) dir_out: path of output directory, absolute or relative (str) chunk_len: duration of chunk in seconds (int) sr: sample rate (int) fileid_min: either number of first file id for song (int) or 'auto' to detect last number used in the directory (str) log: True to log chunk and song name in MongoDB and printout, else False NO OUT """ for root, dirs, files in os.walk(dir_in): for fname in files: if not re.match(r'^\.', fname): rel_fpath = os.path.join(root, fname) chunk_song(rel_fpath, chunk_len=chunk_len, sr=sr, log=log)
5,347,027
def add_attachment(manager, issue, file): """ Replace jira's method 'add_attachment' while don't well fixed this issue https://github.com/shazow/urllib3/issues/303 And we need to set filename limit equaled 252 chars. :param manager: [jira.JIRA instance] :param issue: [jira.JIRA.resources.Issue instance] :param path: [string] :return: [jira.JIRA.resources.Attachment instance] """ filename = _get_filename(file.name) return _upload_file(manager, issue, file.file.read(), filename)
5,347,028
def app_start(page_name): """ device start at init """ page_value = gr.get_flow_behave_value(page_name, None) cur_platform = g_context.platform if cur_platform.strip().lower() == "web": log.info('[app_start] cur_platform is web, run web_start.') web_start(page_value) return device_id = gr.get_device_id() package_name = gr.get_app_package_name() log.info("device_id:{},".format(device_id)) log.info("page_name:{},".format(page_name)) log.info("package_name:{}".format(package_name)) if not (package_name is None or page_value is None or device_id is None): if "restartApp" == page_value: app.shut_app(package_name) wait_time = gr.get_frame_config_value("app_start_time", 6) app.wake_app(package_name, wait_time) log.info("complete restartApp and sleep {}".format(wait_time)) elif "startApp" == page_value: wait_time = gr.get_frame_config_value("app_start_time", 6) app.wake_app(package_name, wait_time) log.info("complete startApp and sleep {}".format(wait_time)) elif "stopApp" == page_value: app.shut_app(package_name) log.info("stop app before running") elif "backupPage" == page_value: ake.key_event("4")
5,347,029
def plot_experiment3_1(): """Figure 3 (a) of the paper.""" lat128 = np.load('./results/experiment3/experiment3_full_anomal-rec_ae_lat128_best_aps.npy') lat32 = np.load('./results/experiment3/experiment3_full_anomal-rec_ae_lat32_best_aps.npy') spatial1 = np.load('./results/experiment3/experiment3_full_anomal-rec_spatial-ae_lat1_best_aps.npy') spatial2 = np.load('./results/experiment3/experiment3_full_anomal-rec_spatial-ae_lat2_best_aps.npy') vqvae = np.load('./results/experiment3/experiment3_full_anomal-rec_vq-vae_best_aps.npy') intensities = np.linspace(0, 1, num=len(lat128)) plt.plot(intensities, lat128, label='AE latent 128') plt.plot(intensities, lat32, label='AE latent 32') plt.plot(intensities, spatial1, label='Spatial-AE latent 8x8x1') plt.plot(intensities, spatial2, label='Spatial-AE latent 8x8x2') plt.plot(intensities, vqvae, label='VQ-VAE') plt.legend() plt.xlabel('intensity') plt.ylabel('AP') plt.ylim(0, 1) plt.show()
5,347,030
def _ols_iter(inv_design, sig, min_diffusivity): """ Helper function used by ols_fit_dki - Applies OLS fit of the diffusion kurtosis model to single voxel signals. Parameters ---------- inv_design : array (g, 22) Inverse of the design matrix holding the covariants used to solve for the regression coefficients. sig : array (g,) Diffusion-weighted signal for a single voxel data. min_diffusivity : float Because negative eigenvalues are not physical and small eigenvalues, much smaller than the diffusion weighting, cause quite a lot of noise in metrics such as fa, diffusivity values smaller than `min_diffusivity` are replaced with `min_diffusivity`. Returns ------- dki_params : array (27,) All parameters estimated from the diffusion kurtosis model. Parameters are ordered as follows: 1) Three diffusion tensor's eigenvalues 2) Three lines of the eigenvector matrix each containing the first, second and third coordinates of the eigenvector 3) Fifteen elements of the kurtosis tensor """ # DKI ordinary linear least square solution log_s = np.log(sig) result = np.dot(inv_design, log_s) # Extracting the diffusion tensor parameters from solution DT_elements = result[:6] evals, evecs = decompose_tensor(from_lower_triangular(DT_elements), min_diffusivity=min_diffusivity) # Extracting kurtosis tensor parameters from solution MD_square = (evals.mean(0))**2 KT_elements = result[6:21] / MD_square # Write output dki_params = np.concatenate((evals, evecs[0], evecs[1], evecs[2], KT_elements), axis=0) return dki_params
5,347,031
def test_collect_rollout_values() -> None: """ Test the values of the returned RolloutStorage objects from trainer.collect_rollout(). """ # Initialize trainer. settings = dict(DEFAULT_SETTINGS) settings["env_name"] = "unique-env" trainer = RLTrainer(settings) # Run rollout. _, _ = trainer.collect_rollout() rollout = trainer.rollout env = trainer.env # Check if rollout info came from UniqueEnv. for step in range(rollout.rollout_step): obs = rollout.obs[step] value_pred = rollout.value_preds[step] action = rollout.actions[step] action_log_prob = rollout.action_log_probs[step] reward = rollout.rewards[step] # Check shapes. assert obs.shape == torch.Size([settings["num_processes"], 1]) assert value_pred.shape == torch.Size([settings["num_processes"], 1]) assert action.shape == torch.Size([settings["num_processes"], 1]) assert action_log_prob.shape == torch.Size([settings["num_processes"], 1]) assert reward.shape == torch.Size([settings["num_processes"], 1]) # Check consistency of values. assert float(obs) == float(step + 1) assert float(action) - int(action) == 0 and int(action) in env.action_space assert float(obs) == float(reward) trainer.close()
5,347,032
def unarchive_collector(collector): """ This code is copied from `Collector.delete` method """ # sort instance collections for model, instances in collector.data.items(): collector.data[model] = sorted(instances, key=attrgetter("pk")) # if possible, bring the models in an order suitable for databases that # don't support transactions or cannot defer constraint checks until the # end of a transaction. collector.sort() # number of objects deleted for each model label unarchived_counter = Counter() with transaction.atomic(using=collector.using, savepoint=False): # reverse instance collections for instances in collector.data.values(): instances.reverse() # delete instances for model, instances in collector.data.items(): if not is_archivable_cls(model): continue pk_list = [obj.pk for obj in instances] queryset = model.all_objects.filter(pk__in=pk_list) count = queryset.update(archived_at=None) unarchived_counter[model._meta.label] += count if not model._meta.auto_created: for obj in instances: # user post archive instead of post delete signals.post_unarchive.send( sender=model, instance=obj, using=collector.using ) for obj in instances: setattr(obj, "archived_at", None) return sum(unarchived_counter.values()), dict(unarchived_counter)
5,347,033
def create_dataset_content(datasetName=None): """ Creates the content of a data set by applying a "queryAction" (a SQL query) or a "containerAction" (executing a containerized application). See also: AWS API Documentation Exceptions :example: response = client.create_dataset_content( datasetName='string' ) :type datasetName: string :param datasetName: [REQUIRED]\nThe name of the data set.\n :rtype: dict ReturnsResponse Syntax{ 'versionId': 'string' } Response Structure (dict) -- versionId (string) --The version ID of the data set contents which are being created. Exceptions IoTAnalytics.Client.exceptions.InvalidRequestException IoTAnalytics.Client.exceptions.ResourceNotFoundException IoTAnalytics.Client.exceptions.InternalFailureException IoTAnalytics.Client.exceptions.ServiceUnavailableException IoTAnalytics.Client.exceptions.ThrottlingException :return: { 'versionId': 'string' } """ pass
5,347,034
def remove_tag_from_issues( issues: List[GitHubIssue], tag: str, scope: str = "all", ignore_list: Optional[Union[List[int], List[Dict[str, int]]]] = None, ) -> List[GitHubIssue]: """remove_tag_from_issues Removes all of a tag from the given issues. If scoped to just issues, we still check the first comment as this comment is the issue body. """ if ignore_list is None: ignore_list = [-1] for index, issue in enumerate(issues): if scope in ("all", "issues"): if tag in issue.metadata: # If the issue is one we should ignore, continue. # This is usually due to the issue being empty. if index in ignore_list: continue issue.metadata.remove(tag) if tag in issue.all_comments[0].tags: issue.all_comments[0].tags.remove(tag) if scope in ("all", "comments"): for comment in issue.all_comments: if tag in comment.tags: # If the comment is one we should ignore, continue. # This is usually due to the comment being empty. if {"issue": index, "comment": comment.number} in ignore_list: continue comment.tags.remove(tag) return issues
5,347,035
def days_remaining_context_processor(request): """Context processor. Adds days_remaining to context of every view.""" now = datetime.now() return {'days_remaining' : (wedding_date - now).days}
5,347,036
def convert_time_range(trange, tz=None): """ Converts freeform time range into a tuple of localized timestamps (start, end). If `tz` is None, uses settings.TIME_ZONE for localizing time range. :param trange: - string representing time-range. The options are: * string in format 'x1|x2', where x1 and x2 are start and end date in the format YYYYmmdd[THH:MM:SS.mmmmm] (in fact, any other format would work well, the function tries its best to determine format and parse timestamps) * string in format 'x1|x2', where x1 and x2 are given in human readable format, as described in the dateparser doc: (see https://github.com/scrapinghub/dateparser) * one of the following keywords: 'today', 'yesterday', 'this week', 'last week', 'this month', 'last month', 'this year', 'last year' :param tz: - timezone (optional). Either string representing a timezone (e.g. "America/Lima") or a pytz object. :return: tuple of two TZ-aware timestamps. """ # Form time range as a tuple of naive datetimes. assert isinstance(trange, str), "Value is not a string: %s" % trange trange = trange.strip().lower() _time = lambda d: datetime.combine(d, time()) today = date.today() if trange == 'today': ts_from = _time(today) ts_to = ts_from + timedelta(days=1, seconds=-1) elif trange == 'yesterday': ts_from = _time(today+timedelta(days=-1)) ts_to = ts_from + timedelta(days=1, seconds=-1) elif trange == 'this week': ts_from = _time(today-timedelta(days=today.weekday())) ts_to = ts_from + timedelta(days=7, seconds=-1) elif trange == 'last week': this_week = _time(today-timedelta(days=today.weekday())) ts_to = this_week + timedelta(seconds=-1) ts_from = _time(ts_to - timedelta(days=ts_to.weekday())) elif trange == 'this month': ts_from = _time(today.replace(day=1)) next_month = ts_from.replace(day=28) + timedelta(days=4) this_month_last_day = next_month - timedelta(days=next_month.day) ts_to = this_month_last_day + timedelta(days=1, seconds=-1) elif trange == 'last month': ts_to = _time(today.replace(day=1)) + timedelta(seconds=-1) ts_from = _time(ts_to.replace(day=1)) elif trange == 'this year': ts_from = _time(today.replace(month=1, day=1)) this_year_last_day = _time(today.replace(month=12, day=31)) ts_to = this_year_last_day + timedelta(days=1, seconds=-1) elif trange == 'last year': ts_to = _time(today.replace(month=1, day=1)) + timedelta(seconds=-1) ts_from = _time(ts_to.replace(month=1, day=1)) else: try: ts_from, ts_to = [dateparser.parse(t) for t in trange.split('|')] except ValueError: raise MalformedValueError( 'Cannot parse datetime range: wrong format!\n' + \ 'Datetime range should be two date[time] values divided by vertical bar (|)' ) if (ts_from is None) or (ts_to is None): raise MalformedValueError('Cannot parse datetime range: wrong format!') # Stretch date values (without time) to the end of day # (ignore microseconds). if ts_to.minute == 0 and ts_to.second == 0: ts_to += timedelta(days=1, seconds=-1) # Figure out desired timezone. time_zone = get_tz(tz) # Add timezone info to the result. ts_from = ts_from.replace(tzinfo=time_zone) ts_to = ts_to.replace(tzinfo=time_zone) if ts_from > ts_to: raise MalformedValueError( 'Start date cannot be greater than the end date!' ) return (ts_from, ts_to)
5,347,037
def process_waiting_time(kernel_data, node_id, phase_id, norm_vehs=False): """Processes batched waiting time computation""" cycle_time = 60 def fn(x): if (x / 13.89) < 0.1: return 1.0 else: return 0.0 wait_times = [] for t in kernel_data: qt = defaultdict(lambda : 0) for veh in t[node_id][phase_id]: key = (veh.edge_id, veh.lane) qt[key] += fn(veh.speed) if len(qt) == 0: wait_times.append(0.0) else: if norm_vehs: wait_times.append( sum([v / MAX_VEHS_PER_LANE[k] for k, v in qt.items()])) else: wait_times.append(sum(qt.values())) ret = round(sum(wait_times) / cycle_time, 2) return ret
5,347,038
def setup_group(dpath, mod, ofctl): """ Default Group. """ _LOG.debug("Setup Group: %d %s %s", dpath.id, mod, ofctl)
5,347,039
def get_version(): """ Do this so we don't have to import lottery_ticket_pruner which requires keras which cannot be counted on to be installed when this package gets installed. """ with open('lottery_ticket_pruner/__init__.py', 'r') as f: for line in f.readlines(): if line.startswith('__version__'): version = line.split('=')[1].strip().replace('"', '').replace('\'', '') return version return ''
5,347,040
def train_model_regression(X, X_test, y, params, folds, model_type='lgb', eval_metric='mae', columns=None, plot_feature_importance=False, model=None, verbose=10000, early_stopping_rounds=200, n_estimators=50000): """ A function to train a variety of regression models. Returns dictionary with oof predictions, test predictions, scores and, if necessary, feature importances. :params: X - training data, can be pd.DataFrame or np.ndarray (after normalizing) :params: X_test - test data, can be pd.DataFrame or np.ndarray (after normalizing) :params: y - target :params: folds - folds to split data :params: model_type - type of model to use :params: eval_metric - metric to use :params: columns - columns to use. If None - use all columns :params: plot_feature_importance - whether to plot feature importance of LGB :params: model - sklearn model, works only for "sklearn" model type """ columns = X.columns if columns is None else columns X_test = X_test[columns] # to set up scoring parameters metrics_dict = {'mae': {'lgb_metric_name': 'mae', 'catboost_metric_name': 'MAE', 'sklearn_scoring_function': metrics.mean_absolute_error}, 'group_mae': {'lgb_metric_name': 'mae', 'catboost_metric_name': 'MAE', 'scoring_function': group_mean_log_mae}, 'mse': {'lgb_metric_name': 'mse', 'catboost_metric_name': 'MSE', 'sklearn_scoring_function': metrics.mean_squared_error} } result_dict = {} # out-of-fold predictions on train data oof = np.zeros(len(X)) # averaged predictions on train data prediction = np.zeros(len(X_test)) # list of scores on folds scores = [] feature_importance = pd.DataFrame() # split and train on folds for fold_n, (train_index, valid_index) in enumerate(folds.split(X)): print(f'Fold {fold_n + 1} started at {time.ctime()}') if type(X) == np.ndarray: X_train, X_valid = X[columns][train_index], X[columns][valid_index] y_train, y_valid = y[train_index], y[valid_index] else: X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] if model_type == 'lgb': model = lgb.LGBMRegressor(**params, n_estimators=n_estimators, n_jobs=-1) model.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_valid, y_valid)], eval_metric=metrics_dict[eval_metric]['lgb_metric_name'], verbose=verbose, early_stopping_rounds=early_stopping_rounds) y_pred_valid = model.predict(X_valid) y_pred = model.predict(X_test, num_iteration=model.best_iteration_) if model_type == 'xgb': train_data = xgb.DMatrix(data=X_train, label=y_train, feature_names=X.columns) valid_data = xgb.DMatrix(data=X_valid, label=y_valid, feature_names=X.columns) watchlist = [(train_data, 'train'), (valid_data, 'valid_data')] model = xgb.train(dtrain=train_data, num_boost_round=20000, evals=watchlist, early_stopping_rounds=200, verbose_eval=verbose, params=params) y_pred_valid = model.predict(xgb.DMatrix(X_valid, feature_names=X.columns), ntree_limit=model.best_ntree_limit) y_pred = model.predict(xgb.DMatrix(X_test, feature_names=X.columns), ntree_limit=model.best_ntree_limit) if model_type == 'sklearn': model = model model.fit(X_train, y_train) y_pred_valid = model.predict(X_valid).reshape(-1, ) score = metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid) print(f'Fold {fold_n}. {eval_metric}: {score:.4f}.') print('') y_pred = model.predict(X_test).reshape(-1, ) if model_type == 'cat': model = CatBoostRegressor(iterations=20000, eval_metric=metrics_dict[eval_metric]['catboost_metric_name'], **params, loss_function=metrics_dict[eval_metric]['catboost_metric_name']) model.fit(X_train, y_train, eval_set=(X_valid, y_valid), cat_features=[], use_best_model=True, verbose=False) y_pred_valid = model.predict(X_valid) y_pred = model.predict(X_test) oof[valid_index] = y_pred_valid.reshape(-1, ) if eval_metric != 'group_mae': scores.append(metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid)) else: scores.append(metrics_dict[eval_metric]['scoring_function'](y_valid, y_pred_valid, X_valid['type'])) prediction += y_pred if model_type == 'lgb' and plot_feature_importance: # feature importance fold_importance = pd.DataFrame() fold_importance["feature"] = columns fold_importance["importance"] = model.feature_importances_ fold_importance["fold"] = fold_n + 1 feature_importance = pd.concat([feature_importance, fold_importance], axis=0) prediction /= folds.n_splits print('CV mean score: {0:.4f}, std: {1:.4f}.'.format(np.mean(scores), np.std(scores))) result_dict['oof'] = oof result_dict['prediction'] = prediction result_dict['scores'] = scores # if model_type == 'lgb': # if plot_feature_importance: # feature_importance["importance"] /= folds.n_splits # cols = feature_importance[["feature", "importance"]].groupby("feature").mean().sort_values( # by="importance", ascending=False)[:50].index # # best_features = feature_importance.loc[feature_importance.feature.isin(cols)] # # plt.figure(figsize=(16, 12)); # sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False)); # plt.title('LGB Features (avg over folds)'); # # result_dict['feature_importance'] = feature_importance return result_dict
5,347,041
def extract(d, keys): """ Extract a key from a dict. :param d: The dict. :param keys: A list of keys, in order of priority. :return: The most important key with an value found. """ if not d: return for key in keys: tmp = d.get(key) if tmp: return tmp
5,347,042
def test_combinations(): """Tests reading and updating the combinations txt file""" assert "temp.txt" not in os.listdir(".") assert not generator.check(filename="temp.txt", combination=OUTPUT) generator.update(filename="temp.txt", combination=OUTPUT) assert generator.check(filename="temp.txt", combination=OUTPUT) os.remove("temp.txt")
5,347,043
def create_file_list(files, suffices, file_type, logger, root_path=None): ############################################################################### """Create and return a master list of files from <files>. <files> is either a comma-separated string of pathnames or a list. If a pathname is a directory, all files with extensions in <suffices> are included. Wildcards in a pathname are expanded. <suffices> is a list of allowed file types. Filenames in <files> with an allowed suffix will be added to the master list. Filenames with a '.txt' suffix will be parsed to look for allowed filenames. <file_type> is a description of the allowed file types. <logger> is a logger used to print warnings (unrecognized filename types) and debug messages. If <root_path> is not None, it is used to create absolute paths for <files>, otherwise, the current working directory is used. """ master_list = list() txt_files = list() # Already processed txt files pathname = None if isinstance(files, str): file_list = [x.strip() for x in files.split(',') if x.strip()] elif isinstance(files, (list, tuple)): file_list = files else: raise ParseInternalError("Bad input, <files> = {}".format(files)) # end if if root_path is None: root_path = os.getcwd() # end if master_list, errors = _create_file_list_int(file_list, suffices, file_type, logger, txt_files, pathname, root_path, master_list) if errors: emsg = 'Error processing list of {} files:\n {}' raise CCPPError(emsg.format(file_type, '\n '.join(errors))) # end if return master_list
5,347,044
def register_urls(app): """ Register hapapi URLs """ RootView.register(app) ProxyView.register(app) BackendView.register(app)
5,347,045
def get_raster(layer, bbox, path=None, update_cache=False, check_modified=False, mosaic=False): """downloads National Elevation Dataset raster tiles that cover the given bounding box for the specified data layer. Parameters ---------- layer : str dataset layer name. (see get_available_layers for list) bbox : (sequence of float|str) bounding box of in geographic coordinates of area to download tiles in the format (min longitude, min latitude, max longitude, max latitude) path : ``None`` or path if ``None`` default path will be used update_cache: ``True`` or ``False`` (default) if ``False`` and output file already exists use it. check_modified: ``True`` or ``False`` (default) if tile exists in path, check if newer file exists online and download if available. mosaic: ``True`` or ``False`` (default) if ``True``, mosaic and clip downloaded tiles to the extents of the bbox provided. Requires rasterio package and GDAL. Returns ------- raster_tiles : geojson FeatureCollection metadata as a FeatureCollection. local url of downloaded data is in feature['properties']['file'] """ _check_layer(layer) raster_tiles = _download_tiles(get_raster_availability(layer, bbox), path=path, check_modified=check_modified) if mosaic: if path is None: path = os.path.join(util.get_ulmo_dir(), DEFAULT_FILE_PATH) util.mkdir_if_doesnt_exist(os.path.join(path, 'by_boundingbox')) xmin, ymin, xmax, ymax = [float(n) for n in bbox] uid = util.generate_raster_uid(layer, xmin, ymin, xmax, ymax) output_path = os.path.join(path, 'by_boundingbox', uid + '.tif') if os.path.isfile(output_path) and not update_cache: return output_path raster_files = [tile['properties']['file'] for tile in raster_tiles['features']] util.mosaic_and_clip(raster_files, xmin, ymin, xmax, ymax, output_path) return [output_path] return raster_tiles
5,347,046
def edit_distance(y, y_hat): """Edit distance between two sequences. Parameters ---------- y : str The groundtruth. y_hat : str The recognition candidate. the minimum number of symbol edits (i.e. insertions, deletions or substitutions) required to change one word into the other. """ return _edit_distance_matrix(y, y_hat)[-1, -1]
5,347,047
def sort_predictions(classes, predictions, bboxes): """ Sorts predictions from most probable to least, generate extra metadata about them. """ results = [] for idx, pred in enumerate(predictions): results.append({ "class_idx": np.argmax(pred), "class": classes[np.argmax(pred)], "prob": pred[np.argmax(pred)], "fname": get_region_filename(idx), "coords": bboxes[idx], }) results.sort(key=itemgetter("prob"), reverse=True) return results
5,347,048
def task_bootstrap_for_adming(): """ """ return {'actions': [(clushUtils.exec_script, [targetNode, "bootstrap_for_adming.py"], { 'dependsFiles': [".passwords", f"{homeDir}/.ssh/id_rsa.pub"], 'user':"root", 'manageEnv': False, 'dependsPkgs':['py3-pip', 'py3-psutil', 'curl'], 'logOutput': 'bootstrap_for_adming.log' } ) ], 'targets': [f'{logDir}/bootstrap_for_adming.log.{targetNode}'], 'file_dep': ["deployConfig.py"], }
5,347,049
def reassign_clustered( knn: List[Tuple[npt.NDArray, npt.NDArray]], clusters: List[Tuple[str, int]], min_sim_threshold: float = 0.6, n_iter: int = 20, epsilon: float = 0.05, ) -> List[Tuple[str, int]]: """Reassigns companies to new clusters based on the average similarity to nearest neighbours belonging to clusters. Args: knn: A list of pairs of nearest neighbour index IDs and their similarities. clusters: A list of cluster ID and org ID pairs. min_sim_threshold: Minimum cosine similarity for a cluster reassignment to be accepted. n_iter: Number of timer to iteratively reaassign companies to clusters. epsilon: Minimum fraction of companies required for an iteration of reassignment to happen. If the fraction of companies being reassigned falls below this value, then there will be no more reassignment iterations, even if n_iter has not been reached. Returns: clusters: A list of reassigned cluster ID and org ID pairs. """ org_ids = [c[1] for c in clusters] shift = epsilon complete = 0 while (shift >= epsilon) and (n_iter > complete): index_id_cluster_lookup = np.array([c[0] for c in clusters]) changed = 0 _clusters = [] agg_clusters = [] agg_cluster_sims = [] for org_id, (knn_ids, sims) in zip(org_ids, knn): knn_ids, sims, source_id = decompose_knn( knn_ids, sims, source=True, ) knn_cluster_ids = index_id_cluster_lookup[knn_ids] unique_clusters, agg_sims = mean_cluster_similarities(knn_cluster_ids, sims) best_cluster, best_sim = get_best_cluster(unique_clusters, agg_sims) original_cluster = index_id_cluster_lookup[source_id] same_cluster = best_cluster == original_cluster if same_cluster: _clusters.append((original_cluster, org_id)) else: if best_sim >= min_sim_threshold: _clusters.append((best_cluster, org_id)) changed += 1 else: _clusters.append((original_cluster, org_id)) agg_clusters.append(unique_clusters) agg_cluster_sims.append(agg_sims) clusters = _clusters complete += 1 shift = changed / len(knn) return clusters, np.array(agg_clusters), np.array(agg_cluster_sims)
5,347,050
def boys(n,t): """Boys function for the calculation of coulombic integrals. Parameters ---------- n : int Order of boys function t : float Varible for boys function. Raises ------ TypeError If boys function order is not an integer. ValueError If boys function order n is not a none negative number. """ if not isinstance(n, int): raise TypeError("Boys function order n must be an integer") if n < 0: raise ValueError("Boys function order n must be a none negative number") if not isinstance(t, float): raise TypeError("Boys function varible t must be integer or float") return sc.hyp1f1(n+0.5,n+1.5,-t)/(2.0*n+1.0)
5,347,051
def show_next_action_with_min_prio(context): """Check that the next actions have the minimum priority.""" for line in context.next_action().strip().split("\n"): assert_in("(A)", line)
5,347,052
def get_diameter_by_sigma(sigma, proba): """ Get diameter of nodule given sigma of normal distribution and probability of diameter coverage area. Transforms sigma parameter of normal distribution corresponding to cancerous nodule to its diameter using probability of diameter coverage area. Parameters ---------- sigma : float square root of normal distribution variance. proba : float probability of diameter coverage area. Returns ------- float equivalent diameter. """ return 2 * sigma * stats.norm.ppf((1 + proba) / 2)
5,347,053
def serve(environment): """Run Marian application based on FLASK_ENV configuration.""" if environment == 'production': os.system('sh ./serve.sh') else: os.system('flask run')
5,347,054
def run_benchmark(mode: str) -> None: """ Run the given model on a benchmark and save the prediction. :param mode: "repos_direct" for using direct embeddings of repos. "repos_by_libraries" for using embeddings of repos as average of libraries. "jaccard" for using Jaccard similarity. :return: None. """ benchmark = defaultdict(list) with open(f"benchmark/benchmark.txt") as fin: # Load the benchmark. for line in fin: repo_full_name = line.rstrip().split(';')[0] benchmark[get_year(repo_full_name)].append(repo_full_name) for config in tqdm(get_configs()): idf_power = config['idf_power'] sim_power = config['sim_power'] num_closest = config['num_closest'] with open(f"benchmark/results/{mode}_{idf_power}_{sim_power}_" f"{num_closest}.txt", "w+") as fout: for year, year_benchmark in benchmark.items(): results = suggest_libraries(mode=mode, names=year_benchmark, single_version=True, config=config) # Get the predictions. for repo in results: fout.write(f"{repo};{','.join([x[0] for x in results[repo]])}\n")
5,347,055
def _colorize(val, color): """Colorize a string using termcolor or colorama. If any of them are available. """ if termcolor is not None: val = termcolor.colored(val, color) elif colorama is not None: val = "{}{}{}".format(TERMCOLOR2COLORAMA[color], val, colorama.Style.RESET_ALL) return val
5,347,056
def get_output_filename(output_folder: str, repository_type: str, repository_name: str, filename: str) -> Path: """Returns the output filename for the file fetched from a repository.""" return ( Path(output_folder) / Path(repository_type.lower()) / Path(Path(repository_name).name) / Path(Path(filename).name) )
5,347,057
def get_data(cpe): """collect data from ser_dev single value of z-accel""" cpe.reset_input_buffer() next = cpe.readline() light = (float(next.decode("ascii"))) # TODO wrap in TRY? return light
5,347,058
def wave_exist_2d_full_v2(b=.8): """ plot zeros of -nu1 + G(nu1,nu2) and -nu2 + G(nu2,nu1) as a function of g use accurate fourier series """ # get data # nc1 bifurcation values bif = np.loadtxt('twod_wave_exist_br1.dat') #bif2 = np.loadtxt('twod_wave_exist_br2.dat') bif_diag1 = np.loadtxt('twod_wave_exist_diag1.dat') bif_diag2 = np.loadtxt('twod_wave_exist_diag2.dat') # clean bifx,bify = clean(bif[:,3],bif[:,7],tol=.47) bifx2,bify2 = clean(bif[:,3],bif[:,8],tol=.47) bif_diag1x,bif_diag1y = clean(bif_diag1[:,0],np.abs(bif_diag1[:,1]),tol=.2) bif_diag2x,bif_diag2y = clean(bif_diag2[:,0],np.abs(bif_diag2[:,1]),tol=.2) # remove nans for calculating minima (usually nans are taken to be max/min vals, which is bad) bifx_nonan = bifx[(~np.isnan(bifx))*(~np.isnan(bify))] bify_nonan = bify[(~np.isnan(bifx))*(~np.isnan(bify))] bifx2_nonan = bifx2[(~np.isnan(bifx2))*(~np.isnan(bify2))] bify2_nonan = bify2[(~np.isnan(bifx2))*(~np.isnan(bify2))] bif_diag1x_nonan = bif_diag1x[(~np.isnan(bif_diag1x))*(~np.isnan(bif_diag1y))] bif_diag1y_nonan = bif_diag1y[(~np.isnan(bif_diag1x))*(~np.isnan(bif_diag1y))] bif_diag2x_nonan = bif_diag2x[(~np.isnan(bif_diag2x))*(~np.isnan(bif_diag2y))] bif_diag2y_nonan = bif_diag2y[(~np.isnan(bif_diag2x))*(~np.isnan(bif_diag2y))] fig = plt.figure(figsize=(10,5)) ax1 = fig.add_subplot(121, projection='3d') ax2 = fig.add_subplot(122) plane1_z = .895 plane2_z = 1.17 # get plane intersection idx bifx_int_p1 = np.argmin(np.abs(bifx_nonan-plane1_z)) bifx_int_p2 = np.argmin(np.abs(bifx_nonan-plane2_z)) bifx2_int_p1 = np.argmin(np.abs(bifx2_nonan-plane1_z)) bifx2_int_p2 = np.argmin(np.abs(bifx2_nonan-plane2_z)) bif_diagx_int_p1 = np.argmin(np.abs(bif_diag1x_nonan-plane1_z)) bif_diagx_int_p2 = np.argmin(np.abs(bif_diag1x_nonan-plane2_z)) bif_diagx2_int_p1 = np.argmin(np.abs(bif_diag2x_nonan-plane1_z)) bif_diagx2_int_p2 = np.argmin(np.abs(bif_diag2x_nonan-plane2_z)) ## plot curves in 3d # plot off diagonal and axial curves v1a = bify2[(bify>=0)*(bify2>=0)*(bify<=1)*(bify2<=1)*(bifx<=2)] v2a = bify[(bify>=0)*(bify2>=0)*(bify<=1)*(bify2<=1)*(bifx<=2)] ga = bifx[(bify>=0)*(bify2>=0)*(bify<=1)*(bify2<=1)*(bifx<=2)] #v1b = bif_diag1y[(bif_diag1y>=0)*(bif_diag2y>=0)*(bif_diag1y<=1)*(bif_diag2y<=1)*(bif_diag1x<=2)] #v2b = bif_diag1y[(bif_diag1y>=0)*(bif_diag2y>=0)*(bif_diag1y<=1)*(bif_diag2y<=1)*(bif_diag1x<=2)] gb = np.linspace(np.amin(bif_diag1x[~np.isnan(bif_diag1x)]),np.amax(bif_diag1x[~np.isnan(bif_diag1x)]),20) # clean ga,v1a,v2a = clean3d(ga,v1a,v2a,tol=.47) # remove nans for linewidth stuff later. ga_nonan = ga[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))] v1a_nonan = v1a[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))] v2a_nonan = v2a[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))] # prep for plotting with different line widths sol = np.zeros((len(ga),3)) sol[:,0] = v1a sol[:,1] = ga sol[:,2] = v2a sol = np.transpose(sol) points = np.array([sol[0,:],sol[1,:],sol[2,:]]).T.reshape(-1,1,3) segs = np.concatenate([points[:-1],points[1:]],axis = 1) line3d = Line3DCollection(segs,linewidths=(1.+(v1a_nonan)/np.amax(v1a_nonan)*3.),colors='k') # add modified curves to figure ax1.add_collection3d(line3d) # repleat above to capture remaining axial branch(es) # prep for plotting with different line widths sol = np.zeros((len(ga),3)) sol[:,0] = v2a sol[:,1] = ga sol[:,2] = v1a sol = np.transpose(sol) points = np.array([sol[0,:],sol[1,:],sol[2,:]]).T.reshape(-1,1,3) segs = np.concatenate([points[:-1],points[1:]],axis = 1) line3d = Line3DCollection(segs,linewidths=(1.+(v2a_nonan)/np.amax(v2a_nonan)*3.),colors='k') # add modified curves to figure ax1.add_collection3d(line3d) # plot diagonal guys # prep for plotting with different line widths diagx = bif_diag2y[(bif_diag2y<=1)*(bif_diag2x<=2.)] diagy = bif_diag2x[(bif_diag2y<=1)*(bif_diag2x<=2.)] diagz = bif_diag2y[(bif_diag2y<=1)*(bif_diag2x<=2.)] diagx_nonan = diagx[~np.isnan(diagx)] sol = np.zeros((len(diagx),3)) sol[:,0] = diagx sol[:,1] = diagy sol[:,2] = diagz sol = np.transpose(sol) points2 = np.array([sol[0,:],sol[1,:],sol[2,:]]).T.reshape(-1,1,3) segs2 = np.concatenate([points2[:-1],points2[1:]],axis = 1) line3d2 = Line3DCollection(segs2,linewidths=(1.+(diagx_nonan)/np.amax(diagx_nonan)*3.),colors='k') ax1.add_collection3d(line3d2) # plot zero solution ax1.plot([.0,0],[.5,plane1_z],[.0,0],color='black',lw=1) # plot bifurcation planes X,Y = np.meshgrid(np.linspace(0,1,10),np.linspace(0,1,10)) ax1.plot_surface(X,0.*X+plane1_z,Y,alpha=.5,color='gray') ax1.plot_surface(X,0.*X+plane2_z,Y,alpha=.5,color='red') # plot plane intersections ax1.scatter(bify[bifx_int_p1],bifx[bifx_int_p1],bify2[bifx_int_p1],color='black',s=20) #ax1.scatter(bify[bifx_int_p2],bifx[bifx_int_p2],bify2[bifx_int_p2],color='black',s=20) #ax1.scatter(bif_diag2y_nonan[bif_diagx_int_p2],bif_diag1x_nonan[bif_diagx_int_p2],bif_diag1y_nonan[bif_diagx_int_p2],color='black',s=20) ax1.scatter(0,1.17,.51,color='red',s=20,zorder=10) ax1.scatter(.5,1.17,0.,color='red',s=40,zorder=10) ax1.scatter(.37,1.17,.37,color='red',s=50,zorder=10) """ ax1.scatter(L1[g_int_p2],g[g_int_p2],M1[g_int_p2],color='black',s=20) ax1.scatter(L2[g_int_p1],g[g_int_p1],M2[g_int_p1],color='black',s=20) ax1.scatter(L2[g_int_p2],g[g_int_p2],M2[g_int_p2],color='black',s=20) ax1.scatter(L3[g_int_p1],g[g_int_p1],M3[g_int_p1],color='black',s=20) ax1.scatter(L3[g_int_p2],g[g_int_p2],M3[g_int_p2],color='black',s=20) ax1.scatter(L4[g_int_p1],g[g_int_p1],M4[g_int_p1],color='black',s=20) ax1.scatter(L4[g_int_p2],g[g_int_p2],M4[g_int_p2],color='black',s=20) """ ## plot curves in 2d # bifurcation lines ax2.plot([plane1_z,plane1_z],[-1,1.8],color='black',alpha=.5,lw=2) ax2.plot([plane2_z,plane2_z],[-1,1.8],color='red',alpha=.5,lw=2) ax2.plot(bifx,bify,color='black') ax2.plot(bifx2,bify2,color='black') ax2.plot(bif_diag1x,bif_diag1y,color='black') ax2.plot(bif_diag2x,bif_diag2y,color='black') ax2.plot([0,5],[0,0],color='black') # label curves ax2.annotate(r'$x$-axis direction', xy=(1.04,.37),xycoords='data',textcoords='data', xytext=(.6,.6), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3", color='black'), ) ax2.annotate(r'$y$-axis direction', xy=(1.0,.0),xycoords='data',textcoords='data', xytext=(.55,.33), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3", color='black'), ) ax2.annotate(r'$g^*$', xy=(.9,.0),xycoords='data',textcoords='data', xytext=(.8,.05), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3", color='black'), ) ax2.annotate('Diagonal', xy=(1.1,.32),xycoords='data',textcoords='data', xytext=(1.4,.2), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3", color='black'), ) ax2.annotate('Off-diagonal', xy=(1.4,.41),xycoords='data',textcoords='data', xytext=(1.5,.34), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3", color='black'), ) ax2.annotate('Off-diagonal', alpha=0., xy=(1.4,.62),xycoords='data',textcoords='data', xytext=(1.5,.34), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3", color='black'), ) # plot params ax1.view_init(20,-8) # set labels ax1.set_xlabel(r'$\nu_2$') ax2.set_xlabel(r'$g$') ax1.set_ylabel(r'$g$') ax2.set_ylabel(r'$\nu_1$') ax1.set_zlabel(r'$\nu_1$') ax1.set_xlim(0.,1.) ax2.set_xlim(.5,2.) ax1.set_ylim(.5,2.) ax2.set_ylim(-.05,1.) ax1.set_zlim(0.,1.) #plt.show() return fig
5,347,059
def _parse_variables(vars_list): """Transform the list of vars stored in module definition in dictionnary""" vars = {} for var in vars_list: key = var['name'] value = None for var_type in ATTRIBUTE_TYPE: if var_type in var: value = var[var_type] break vars[key] = value return vars
5,347,060
def get_vertical_axes(nc_file): """ Scan input netCDF file and return a list of vertical axis variables, requiring specific axis names """ vertical_axes = [] for var_name, var in nc_file.variables.items(): if var_name in ('full_levels', 'half_levels'): vertical_axes.append(var) logging.info('Found %i vertical axes.', len(vertical_axes)) return vertical_axes
5,347,061
def update_workload_volumes(workload,config,spec_config): """ Return True if some env is updated;otherwise return False """ volumemount_configs = get_property(spec_config,("containers",0,"volumeMounts")) if not volumemount_configs: del_objs = models.WorkloadVolume.objects.filter(workload=workload).delete() if del_objs[0]: logger.debug("Delete the volumes for workload({}),deleted objects = {}".format(workload,del_objs)) return True else: return False updated = False name = None del_objs = models.WorkloadVolume.objects.filter(workload=workload).exclude(name__in=[c["name"] for c in volumemount_configs]).delete() if del_objs[0]: logger.debug("Delete the volumes for workload({}),deleted objects = {}".format(workload,del_objs)) updated = True #exact all volumes from yaml file volume_configs = {} for volume_config in get_property(spec_config,"volumes") or []: volume_configs[volume_config["name"]] = volume_config for volumemount_config in volumemount_configs: name = volumemount_config["name"] try: obj = models.WorkloadVolume.objects.get(workload=workload,name=name) except ObjectDoesNotExist as ex: obj = models.WorkloadVolume(workload=workload,name=name) writable = get_property(volumemount_config,"readOnly",lambda val: False if val else True) update_fields = set_fields_from_config(obj,volumemount_config,[ ("mountpath","mountPath",None), ("subpath","subPath",None) ]) if name not in volume_configs: continue volume_config = volume_configs[name] if "persistentVolumeClaim" in volume_config: #reference the volume from volume claim claimname = volume_config["persistentVolumeClaim"]["claimName"] set_field(obj,"volume_claim", models.PersistentVolumeClaim.objects.get(cluster=workload.cluster,namespace=workload.namespace,name=claimname),update_fields) set_field(obj,"volume", obj.volume_claim.volume,update_fields) set_field(obj,"volumepath", obj.volume_claim.volume.volumepath if obj.volume_claim.volume else None ,update_fields) set_field(obj,"other_config", None,update_fields) if writable: writable = obj.volume_claim.writable elif "hostPath" in volume_config: hostpath = volume_config["hostPath"]["path"] set_field(obj,"volume_claim", None,update_fields) set_field(obj,"volumepath", hostpath,update_fields) set_field(obj,"volume", models.PersistentVolume.objects.filter(cluster=workload.cluster,volumepath=hostpath).first(),update_fields) set_field(obj,"other_config", None,update_fields) if writable and obj.volume: writable = obj.volume.writable else: set_field(obj,"other_config", volume_config,update_fields) set_field(obj,"writable",writable,update_fields) if obj.pk is None: obj.modified = workload.modified obj.created = workload.modified obj.save() updated = True logger.debug("Create deployment workload volume({})".format(obj)) elif update_fields: obj.modified = workload.modified update_fields.append("modified") update_fields.append("updated") obj.save(update_fields=update_fields) updated = True logger.debug("Update the deployment workload volume({}),update_fields={}".format(obj,update_fields)) else: logger.debug("The deployment workload volume({}) is not changed".format(obj)) return updated
5,347,062
def b_2_d(x): """ Convert byte list to decimal :param x: byte list :return: decimal """ s = 0 for i in range(0, len(x)): s += x[i]*2**i return s
5,347,063
def test_subscript_imports_exports(): """ If we look at an element of an object such as a dataframe(e.g. x[0]), that dataframe needs to be defined so should be an import. If the element is an assignment target it should also be an export. """ code = "df.x[0] = 3\n" frames = ["df"] testdata = {"code": code, "frames": frames, "hash": "irrelevant"} result = handle_exports(testdata) assert(len(result["imports"])==1) assert(result["imports"][0] == "df") assert(len(result["exports"])==1) assert(result["exports"][0] == "df")
5,347,064
def get(s, delimiter='', format="diacritical"): """Return pinyin of string, the string must be unicode """ return delimiter.join(_pinyin_generator(u(s), format=format))
5,347,065
def test_multiple_insert(client, auth_header: dict, random_depots: List[dict]): """Test with multiple objects in array""" input_data: dict = {"stack_id": 1, "depots": random_depots} logging.debug(f"Number of depots : {len(random_depots)}") logging.debug(f"Input data: {input_data}") HEADERS: dict = dict(auth_header, **{"Content-Type": "application/json"}) res: Response = client.post(ENDPOINT, headers=HEADERS, json=input_data) logging.debug(f"Response : {res}") logging.debug(f"Response Data : {res.data}") assert res.status_code == 400 assert res.headers["Content-Type"] == "application/json" assert "contains more than one object" in res.json["message"]
5,347,066
def deposit(**kwargs): """ Deposit CifData object """ from aiida.orm.data.cif import CifData node = kwargs.pop('node') deposition_type = kwargs.pop('deposition_type') parameter_data = kwargs.pop('parameter_data') # if kwargs['database'] is None: # echo.echo_critical("Default database is not defined, please specify.") kwargs.pop('database') # looks like a bug, but deposit function called inside deposit_tcod # complains about the 'database' keywords argument for key, value in kwargs.items(): if value is None: kwargs.pop(key) if not isinstance(node, CifData): echo.echo_critical("Node {} is of class {} instead of {}".format(node, type(node), CifData)) echo.echo(deposit_tcod(node, deposition_type, parameter_data, **kwargs))
5,347,067
def top_dist(g1, g2, name='weight', topology_type=0): """ :param g1: graph 1 :param g2: graph 2 :param name: compared edge attribute :param topology_type: topology distance normalization method :return: topology distance """ max_v = max_edge(g1, name, max_edge(g2, name, 0)) # find max value in a graph v = 0 nodes_list = set(g1.nodes()) | set(g2.nodes()) # define nodes list in g1 or g2 degree1 = g1.degree(weight=name) # define degree of g1 degree2 = g2.degree(weight=name) # define degree of g2 for node in nodes_list: # consider each node if node in g1.nodes() and node in g2.nodes(): # node appears in both graphs nodes1 = set(g1.neighbors(node)) # adjacent nodes in g1 nodes2 = set(g2.neighbors(node)) - nodes1 # distinct adjacent nodes in g2 for node2 in nodes1: if node2 in g2.neighbors(node): v += abs(g1[node][node2][name]-g2[node][node2][name]) else: v += g1[node][node2][name] for node2 in nodes2: v += g2[node][node2][name] else: if node in g1.nodes(): # node appears only in g1 v += degree1[node] else: v += degree2[node] # node appears only in g2 v /= max_v if topology_type == 0: return v/len(nodes_list)/len(nodes_list) else: num_edges = len(set(g1.edges()) | set(g2.edges())) return v/num_edges/num_edges
5,347,068
async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: Callable[[list], None], ) -> bool: """Set up the ISY994 sensor platform.""" hass_isy_data = hass.data[ISY994_DOMAIN][entry.entry_id] devices = [] for node in hass_isy_data[ISY994_NODES][SENSOR]: _LOGGER.debug("Loading %s", node.name) devices.append(ISYSensorEntity(node)) for vname, vobj in hass_isy_data[ISY994_VARIABLES]: devices.append(ISYSensorVariableEntity(vname, vobj)) await migrate_old_unique_ids(hass, SENSOR, devices) async_add_entities(devices)
5,347,069
def analyze(options, args=None): """This function runs analysis on simulation data. What analysis and what to output is specified in the options object. """ run_analysis(options)
5,347,070
def refresh_track(): """ For now the interface isn't refreshed :return: """ try: url = request.form["url"] except KeyError: return "nok" with app.database_lock: Track.refresh_by_url(app.config["DATABASE_PATH"], url) return "ok"
5,347,071
def test_user_mention_with_dictionary(): """This function tests creating a user mention with all required dictionary key value pairs provided. .. versionadded:: 2.4.0 :returns: None """ user_id, login = get_user_test_data() user_info = {'id': user_id, 'login': login} response = messages.format_user_mention(user_info=user_info) assert expected_user_response(response) return
5,347,072
def get_weights(): """ Loads uni-modal text and image CNN model weights. Returns: tuple: text and image weights. """ text_weight_file = open("models/unimodal_text_CNN_weights.pickle", "rb") text_weights = pickle.load(text_weight_file) text_weight_file.close() image_weight_file = open("models/unimodal_image_CNN_LSTM_weights.pickle", "rb") image_weights = pickle.load(image_weight_file) image_weight_file.close() return text_weights, image_weights
5,347,073
def get_data(start_runno, start_fileno, hall, fields): # pylint: disable=too-many-locals,too-many-branches """Pull the data requested, starting from first VALID run/file after/including the specified one""" val_dict = lambda: {'values': []} ad_dict = lambda: {f'AD{det}': val_dict() for det in dets_for(hall, start_runno)} wp_dict = lambda: {f'WP{det}': val_dict() for det in ['I', 'O']} result = {'runnos': [], 'filenos': [], 'metrics': { field_desc(field): wp_dict() if field.endswith('WP') else ad_dict() for field in fields }, # Send 'latest' so that frontend knows whether to disable END button 'latest': all_latest()} focus = focus_sql(hall, start_runno) try: end_runno, end_fileno = get_shifted(start_runno, start_fileno, hall, 1, skipfirst=False) except EndOfDataException: # return empty result, let caller decide how to proceed return result ad_fields = [f for f in fields if not f.endswith('WP')] wp_fields = [f[:-2] for f in fields if f.endswith('WP')] uniq_fields = list(set(ad_fields + wp_fields)) if any(f.endswith('counts') for f in uniq_fields): livetimes = {} rows = get_livetimes(start_runno, start_fileno, end_runno, end_fileno, hall) for runno, fileno, lt_ms in rows: livetimes[(runno, fileno)] = lt_ms / 1000 default_livetime = sum(livetimes.values()) / len(livetimes) field_sel = f', {",".join(uniq_fields)}' if uniq_fields else '' loc = loc_pred(start_runno, start_fileno, end_runno, end_fileno) query = f'''SELECT runno, fileno, detectorid {field_sel} FROM DqDetectorNew NATURAL JOIN DqDetectorNewVld vld LEFT JOIN runno_fileno_sitemask USING (runno, fileno) WHERE ({loc}) AND ({focus}) AND vld.sitemask = {sitemask(hall)} AND streamtype = 'Physics' ORDER BY runno, fileno, detectorid, insertdate''' rows = dq_exec(query).fetchall() def val_arr(field, det): if det >= 5: prefix = 'WP' det = 'O' if det == 6 else 'I' else: prefix = 'AD' return result['metrics'][field_desc(field)][f'{prefix}{det}']['values'] last_runno, last_fileno = None, None for row in rows: runno, fileno, det = row[:3] if runno != last_runno or fileno != last_fileno: result['runnos'].append(runno) result['filenos'].append(fileno) for each_ad in dets_for(hall, start_runno): for field in ad_fields: val_arr(field, each_ad).append(-2) # default value for each_wp in [5, 6]: for field in wp_fields: val_arr(field+'WP', each_wp).append(-2) for i, field in enumerate(uniq_fields): val = row[i+3] if field.endswith('counts'): try: norm = livetimes[(runno, fileno)] except KeyError: print(f'WARNING: Missing livetime for {runno}, {fileno}') norm = default_livetime if val is not None: # in case we got a NULL in this row val /= norm if val is None: val = -3 # NOTE If the loc_pred queries are slow due to IN, consider # simplifying those and instead doing a more precise AD check # here if field in ad_fields and det <= 4: val_arr(field, det)[-1] = val # replace default/older elif field in wp_fields and det >= 5: val_arr(field+'WP', det)[-1] = val last_runno, last_fileno = runno, fileno result['xs'] = scale_xs(result['runnos'], result['filenos'], (start_runno, start_fileno), (end_runno, end_fileno), hall) return result
5,347,074
def timelength_label_to_seconds( timelength_label: spec.TimelengthLabel, ) -> spec.TimelengthSeconds: """convert TimelengthLabel to seconds""" number = int(timelength_label[:-1]) letter = timelength_label[-1] base_units = timelength_units.get_base_units() base_seconds = base_units['1' + letter] seconds = number * base_seconds return seconds
5,347,075
def _save_node_ip_address(task_id, node): """Helper function for saving IP address and creating DNS records of a new compute node""" assert node.address try: ip_address = node.create_ip_address() except IPAddress.DoesNotExist as exc: logger.warning('Could not save node %s IP address "%s" into admin network (%s)', node, node.address, exc) return logger.info('Saving node %s IP address "%s" into admin network', node, node.ip_address) ip_address.save() admin_net = node.ip_address.subnet # The network was updated by init_mgmt() # Reload Subnet object because it is cached inside node instance admin_net = admin_net.__class__.objects.get(pk=admin_net.pk) # We need a request object request = get_dummy_request(DefaultDc(), 'POST', system_user=True) record_cls = RecordView.Record if admin_net.dns_domain and admin_net.dns_domain == node.domain_name: logger.info('Creating forward A DNS record for node %s', node) # This will fail silently RecordView.add_or_update_record(request, record_cls.A, admin_net.dns_domain, node.hostname, node.address, task_id=task_id, related_obj=node) if admin_net.ptr_domain: logger.info('Creating reverse PTR DNS record for node %s', node) # This will fail silently RecordView.add_or_update_record(request, record_cls.PTR, admin_net.ptr_domain, record_cls.get_reverse(node.address), node.hostname, task_id=task_id, related_obj=node)
5,347,076
def test_directory_new(tmp_path): """ Test file.directory when the directory does not exist Should just return "New Dir" """ path = os.path.join(tmp_path, "test") ret = file.directory( name=path, makedirs=True, win_perms={"Administrators": {"perms": "full_control"}}, win_deny_perms={"Guest": {"perms": "full_control"}}, ) expected = {path: {"directory": "new"}} assert ret["changes"] == expected permissions = win_dacl.get_permissions(path) expected = { "Inherited": { "Administrators": { "grant": { "applies to": "This folder, subfolders and files", "permissions": "Full control", } }, "SYSTEM": { "grant": { "applies to": "This folder, subfolders and files", "permissions": "Full control", } }, CURRENT_USER: { "grant": { "applies to": "This folder, subfolders and files", "permissions": "Full control", } }, }, "Not Inherited": { "Administrators": { "grant": { "applies to": "This folder, subfolders and files", "permissions": "Full control", } }, "SYSTEM": { "grant": { "applies to": "This folder, subfolders and files", "permissions": "Full control", } }, CURRENT_USER: { "grant": { "applies to": "This folder, subfolders and files", "permissions": "Full control", } }, "Guest": { "deny": { "applies to": "This folder, subfolders and files", "permissions": "Full control", } }, }, } assert permissions == expected
5,347,077
def check(src, perm, dest, cmds, comp, verbose=False): """ Report if src and dest are different. Arguments src: Location of the source file. perm: Permissions of the destination file (ignored). dest: Location of the destination file. cmds: Post-install commands (ignored). comp: Cmp enum verbose: Report if files are the same. """ if comp == Cmp.differ: ansiprint(f"The file '{src}' differs from '{dest}'.", fg=Color.red, i=True) elif comp == Cmp.nodest: ansiprint( f"The destination file '{dest}' does not exist", fg=Color.black, bg=Color.red, ) elif comp == Cmp.nosrc: ansiprint( f"The source file '{src}' does not exist.", fg=Color.black, bg=Color.red ) elif comp == Cmp.same and verbose: ansiprint(f"The files '{src}' and '{dest}' are the same.", fg=Color.green)
5,347,078
def getUrlsAlias()->List[str]: """获取所有urls.py的别名""" obj = getEnvXmlObj() return obj.get_childnode_lists('alias/file[name=urls]')
5,347,079
def upload_model(): """ Upload the model that is serialized to disk to the server. This method does NOT communicate with Excel at all. @return: """ model = PersistenceHandler(os.path.dirname(__file__)).load_model() uploader = UploadHandler() uploader.upload_database(model) uploader.upload_sponsors(model)
5,347,080
def project_xarray(run: BlueskyRun, *args, projection=None, projection_name=None): """Produces an xarray Dataset by projecting the provided run. EXPERIMENTAL: projection code is experimental and could change in the near future. Projections come with multiple types: linked, and caclulated. Calculated fields are only supported in the data (not at the top-level attrs). Projected fields will be inserted into the resulting xarray.Dataset Parameters ---------- run : BlueskyRun run to project projection_name : str, optional name of a projection to select in the run, by default None projection : dict, optional projection not from the run to use, by default None Returns ------- xarray.Dataset The return Dataset will contain: - single value meta data (from the run start) in the return Dataset's attrs dict, keyed on the projection key. These are projections marked "location": "start" - single value meta data (from a streams configuration field) in the return Dataset's xarray's dict, keyed on the projection key. These are projections marked "location": "configuration" - multi-value data (from a stream). Keys for the dict-like xarray.Dataset match keys in the passed-in projection. These are projections with "location": "linked"...note that every xarray for a field froma given stream will contain a reference to the same set of configuration attrs for as all fields from the same stream Dataset |_attrs |_'projection_start_field': value |_data |_ 'projection_event_field': xarray |_ attrs |_'projection_configuration_field': value Raises ------ ProjectionError """ attrs = {} # will populate the return Dataset attrs field data_vars = {} # will populate the return Dataset DataArrays stream_configurations = {} # will populate a collection of dicts of stream configurations def metadata_cb(field, value): attrs[field] = value def event_configuration_cb( projection_field, stream, config_index, config_device, config_field, value): if stream not in stream_configurations: stream_configurations[stream] = [] if len(stream_configurations[stream]) == 0: stream_configurations[stream].append({}) if config_device not in stream_configurations[stream][config_index]: stream_configurations[stream][config_index][config_device] = {} stream_configurations[stream][config_index][config_device][config_field] = value def event_field_cb(projection_field, stream, field, xarray: xarray.DataArray): if projection_field not in stream_configurations: stream_configurations[stream] = [] # associate the stream configuration to the xarrays's atrtrs xarray.attrs['configuration'] = stream_configurations[stream] data_vars[projection_field] = xarray # Use the callbacks defined above to project the run and build up a return xarray.Dataset projector = Projector( metadata_cb=metadata_cb, event_configuration_cb=event_configuration_cb, event_field_cb=event_field_cb) projector.project(run, projection=projection, projection_name=projection_name) dataset = xarray.Dataset(data_vars, attrs=attrs) return dataset, projector.issues
5,347,081
def collect_users(): """Collect a list of all Santas from the user""" list_of_santas = [] while 1: item = input("Enter a name\n") if not item: break list_of_santas.append(item) return list_of_santas
5,347,082
def check_role_exists(role_name, access_key, secret_key): """ Check wheter the given IAM role already exists in the AWS Account Args: role_name (str): Role name access_key (str): AWS Access Key secret_key (str): AWS Secret Key Returns: Boolean: True if env exists else False """ iam_client = get_iam_client(access_key, secret_key) try: role = iam_client.get_role(RoleName=role_name) return True if role else False except: return False
5,347,083
def group_delay(group_key, flights): """ Group the arrival delay flights based on keys. :param group_key: Group key to use for categorization. :param flights: List of flights matching from an origin airport. :return: Dictionary containing the list of flights grouped. """ dict_of_group_flights = defaultdict(list) if group_key == 'distance': global distance_range # segmentation every distance range # Remove duplicate value & Get the maximum distance distance_set = set() for flight in flights: distance_set.add(int(flight['distance'])) distance_list = sorted(list(distance_set)) max_distance = max(distance_list) # Segment into Ranges temp_dict = defaultdict(list) for flight in flights: distance_limit = 0 while distance_limit <= max_distance: if int(flight[group_key]) in range(distance_limit, distance_limit + distance_range): time_of_arrival = int(flight['arr_delay']) if flight['arr_delay'] else None if time_of_arrival is not None and time_of_arrival < 0: distance_ranges = str(distance_limit) + " - " + str(distance_limit + distance_range) + " miles" temp_dict[distance_ranges].append(time_of_arrival) distance_limit += distance_range elif group_key == 'day_of_week': temp_dict = defaultdict(list) for flight in flights: time_of_arrival = int(flight['arr_delay']) if flight['arr_delay'] else None if time_of_arrival is not None and time_of_arrival < 0: name_of_day = get_day_name(int(flight[group_key])) temp_dict[name_of_day].append(time_of_arrival) else: temp_dict = defaultdict(list) for flight in flights: time_of_arrival = int(flight['arr_delay']) if flight['arr_delay'] else None if time_of_arrival is not None and time_of_arrival < 0: temp_dict[flight[group_key]].append(time_of_arrival) # Overall Arrival Delay in "<minimum> - <maximum> minute(s) late" format for key, delay_list in temp_dict.iteritems(): fastest_delay = str(abs(max(delay_list))) longest_delay = str(abs(min(delay_list))) if fastest_delay == longest_delay: dict_of_group_flights[key].append(fastest_delay + " minute(s) late") else: dict_of_group_flights[key].append(fastest_delay + " - " + longest_delay + " minute(s) late") return dict_of_group_flights
5,347,084
def create_matrix( score_same_brackets, score_other_brackets, score_reverse_brackets, score_brackets_dots, score_two_dots, add_score_for_seq_match, mode='simple'): """ Function that create matrix that can be used for further analysis, please take note, that mode must be the same in case of matrix and multiple sequence alignment, otherwise random-like effects will occur :param score_same_brackets: int, score for the same tye of brackets like ( and ( :param score_other_brackets: int, score for different type of brackets like ( and [ :param score_reverse_brackets: int, socre for reverse brackets like ( and ) :param score_brackets_dots: int, socre for brakcet and dot like ( and . :param score_two_dots: int, score for two dots like . and . :param add_score_for_seq_match: int, value to add if sequence letter is the same :param mode: string, simple - only level one pseudoknots, pseudo - multiplelevel of pseudoknots :return: string containing matrix that can be saved """ header = " A C D E F G H I K L M " \ "N P Q R S T V W Y" matrix = defaultdict(dict) if mode == 'simple': for letter1 in LETTERS: nucleotide1 = None dot_bracket1 = None for nucleotide in SIMPLE_CONVERSION: for dot_bracket in SIMPLE_CONVERSION[nucleotide]: if SIMPLE_CONVERSION[nucleotide][dot_bracket] == letter1: nucleotide1 = nucleotide dot_bracket1 = dot_bracket for letter2 in LETTERS: nucleotide2 = None dot_bracket2 = None for nucleotide in SIMPLE_CONVERSION: for dot_bracket in SIMPLE_CONVERSION[nucleotide]: if SIMPLE_CONVERSION[nucleotide][dot_bracket] == \ letter2: nucleotide2 = nucleotide dot_bracket2 = dot_bracket score = score_brackets( dot_bracket1, dot_bracket2, score_same_brackets, score_other_brackets, score_reverse_brackets, score_brackets_dots, score_two_dots) if nucleotide1 == nucleotide2: score += add_score_for_seq_match matrix[letter1][letter2] = score elif mode == 'pseudo': for letter1 in LETTERS: dot_bracket1 = None for dot_bracket in PSEUDOKNOT_CONVERSION: if PSEUDOKNOT_CONVERSION[dot_bracket] == letter1: dot_bracket1 = dot_bracket for letter2 in LETTERS: score = 0 dot_bracket2 = None for dot_bracket in PSEUDOKNOT_CONVERSION: if PSEUDOKNOT_CONVERSION[dot_bracket] == letter2: dot_bracket2 = dot_bracket if dot_bracket2 is not None and dot_bracket1 is not None: score = score_brackets( dot_bracket1, dot_bracket2, score_same_brackets, score_other_brackets, score_reverse_brackets, score_brackets_dots, score_two_dots) matrix[letter1][letter2] = score else: print('Wrong mode') text = [header] for letter1 in LETTERS: string = [letter1, ' '] for letter2 in LETTERS: score = matrix[letter1][letter2] string.append(str(score).rjust(5)) text.append("".join(string)) return "\n".join(text)
5,347,085
def view_inv(inventory_list): """list -> None empty string that adds Rental attributes """ inventory_string = '' for item in inventory_list: inventory_string += ('\nRental: ' + str(item[0])+ '\nQuantity: '+ str(item[1])+ '\nDeposit: '+"$"+ str(item[2])+"\nPrice Per Week: "+ "$" + str(item[3])+ '\nReplacement Value: '+ "$" + str(int(item[4]))+ "\n") return inventory_string
5,347,086
def generate_smb_proto_payload(*protos): """Generate SMB Protocol. Pakcet protos in order. """ hexdata = [] for proto in protos: hexdata.extend(proto) return "".join(hexdata)
5,347,087
def FeatureGrad_LogDet(grad_feature): """Part of the RegTerm inside the integral It calculates the logarithm of the determinant of the matrix [N_y x N_y] given by the scalar product of the gradients along the N_x axis. Args: grad_feature (array_like): [N_samples, N_y, N_x], where N_x is the input space and N_y the feature space. Returns: (array_like): [N_samples] """ # Case of 1d feature if len(grad_feature.shape) == 2: grad_feature = grad_feature[:, np.newaxis, :] matrix_j = grad_feature@grad_feature.swapaxes(1, -1) s, d = np.linalg.slogdet(matrix_j) # return s*d # We remove terms with zero s (i.e. errors) return s[s != 0]*d[s != 0]
5,347,088
def get_every_second_indexes(ser: pd.Series, even_index=True) -> pd.core.series.Series: """Return all rows where the index is either even or odd. If even_index is True return every index where idx % 2 == 0 If even_index is False return every index where idx % 2 != 0 Assume default indexing i.e. 0 -> n """ idx = 0 if even_index else 1 return ser.iloc[idx::2]
5,347,089
def EVLAApplyCal(uv, err, SNver=0, CLin=0, CLout=0, maxInter=240.0, \ doSelf=False, logfile=None, check=False, debug=False): """ Applies an SN table to a CL table and writes another Returns task error code, 0=OK, else failed * uv = UV data object to clear * err = Obit error/message stack * SNver = SN table to apply, 0=>highest * CLin = input CL table, 0=>highest * CLout = output CL table, 0=>create new * maxInter = Max time (min) over which to interpolate * doSelf = If true only apply calibrations to same source * logfile = logfile for messages * check = Only check script, don't execute tasks * debug = show input, ObitTasks debug """ ################################################################ # Open/close UV to update header if not check: uv.Open(UV.READONLY,err) uv.Close(err) if err.isErr: OErr.printErr(err) mess = "Update UV header failed" printMess(mess, logfile) return 1 if not check: if SNver<=0: SNver = uv.GetHighVer("AIPS SN") if CLin<=0: CLin = uv.GetHighVer("AIPS CL") if CLout<=0: CLout = uv.GetHighVer("AIPS CL")+1 if CLin<1: mess = "No input CL table to update" printMess(mess, logfile) uv.Header(err) return 1 mess = "Update CL "+str(CLin)+" with SN "+str(SNver)+" to CL "+str(CLout) printMess(mess, logfile) clcal = ObitTask.ObitTask("CLCal") try: clcal.userno = OSystem.PGetAIPSuser() # This sometimes gets lost except Exception as exception: pass if not check: setname(uv,clcal) clcal.solnVer = SNver clcal.calIn = CLin clcal.calOut = CLout clcal.maxInter = maxInter if doSelf: clcal.interMode = "SELF" clcal.taskLog = logfile clcal.debug = debug if debug: clcal.i # Trap failure try: if not check: clcal.g except Exception as exception: print(exception) mess = "CLCal Failed retCode="+str(clcal.retCode) printMess(mess, logfile) return 1 else: pass # End CLCal # Open/close UV to update header if not check: uv.Open(UV.READONLY,err) uv.Close(err) if err.isErr: OErr.printErr(err) mess = "Update UV header failed" printMess(mess, logfile) return 1 return 0 # end EVLAApplyCal
5,347,090
def parse_imports_from_import_statement( statement: Statement, strict: bool = False ) -> typing.Iterable[ImportStatement]: """ Parse import statements """ # import statement standalone_comments are the only possible possible standalone_comments = statement.standalone_comments # import statement has only inline comments # in all nodes within a statment so we can immediatly # add all of them to inline_comments inline_comments = list(itertools.chain(*[i.comments for i in statement.nodes])) # much simpler to do splits on string and since # comments are already handled this is much simpler route # compared to inspecting tree structure # go string parsing! data = "".join(n.combinable_value for n in statement.nodes if n.is_combinable) # loop to handle multiple comma delimited imports for imp in data.split(","): leafs: typing.List[ImportLeaf] = [] as_name: typing.Union[str, None] = None try: stem, as_name = imp.split(" as ") except ValueError: stem, as_name = imp, None stem_split = stem.rsplit(".", 1) # if import has "as" name and stem can be split to multiple imports # import can be transformed to from..import # e.g. import a.b.c as d -> from a.b import c as d # no need to handle local imports (e.g. import .a) # since that is invalid python syntax if as_name and all(stem_split) and len(stem_split) > 1: stem = stem_split[0] leafs.append(ImportLeaf(stem_split[1], as_name)) as_name = None yield ImportStatement( stem=stem, as_name=as_name, leafs=leafs, line_numbers=statement.line_numbers, standalone_comments=standalone_comments, inline_comments=inline_comments, strict=strict, )
5,347,091
def set_exports(pub, cg): """In file pub, mark the include for cg with IWYU pragma: export""" lines = [] for line in open(pub).read().splitlines(): if line.startswith('#include %s' % to_inc(cg)): lines.append('#include %s // IWYU pragma: export' % to_inc(cg)) else: lines.append(line) open(pub, 'w').write('\n'.join(lines) + '\n')
5,347,092
def preprocess_input(text): """ 정제된 텍스트를 토큰화합니다 :param text: 정제된 텍스트 :return: 문장과 단어로 토큰화하여 분석에 투입할 준비를 마친 텍스트 """ sentences = nltk.sent_tokenize(text) tokens = [nltk.word_tokenize(sentence) for sentence in sentences] return tokens
5,347,093
def create_count_dictionaries_for_letter_placements(all_words_list): """Returns a tuple of dictionaries where the index of the tuple is the counts for that index of each word >>> create_count_dictionaries_for_letter_placements(all_words_list) (dictPosition0, dictPosition1, dictPosition2, dictPosition3, dictPosition4) For example: dictPosition0 has the counts of all characters (a-z) in the first position of all the words. dictPosition3 has the counts of all characters (a-z) in the fourth position of all the words. """ dictPosition0 = create_dictionary_of_characters_at_word_index(0, all_words_list) dictPosition1 = create_dictionary_of_characters_at_word_index(1, all_words_list) dictPosition2 = create_dictionary_of_characters_at_word_index(2, all_words_list) dictPosition3 = create_dictionary_of_characters_at_word_index(3, all_words_list) dictPosition4 = create_dictionary_of_characters_at_word_index(4, all_words_list) return dictPosition0, dictPosition1, dictPosition2, dictPosition3, dictPosition4
5,347,094
def parse_known(key, val) -> str: """ maps string from html to to function for parsing Args: key: string from html val: associated value in html Returns: str """ key_to_func = {} key_to_func["left"] = parse_number key_to_func["top"] = parse_number key_to_func["width"] = parse_number key_to_func["font-size"] = parse_number key_to_func["color"] = parse_color if key in key_to_func: return key_to_func[key](key, val) else: return val
5,347,095
def getRecordsPagination(page, filterRecords=''): """ get all the records created by users to list them in the backend welcome page """ newpage = int(page)-1 offset = str(0) if int(page) == 1 \ else str(( int(conf.pagination) *newpage)) queryRecordsPagination = """ PREFIX prov: <http://www.w3.org/ns/prov#> PREFIX base: <"""+conf.base+"""> SELECT DISTINCT ?g ?title ?userLabel ?modifierLabel ?date ?stage WHERE { GRAPH ?g { ?s ?p ?o . OPTIONAL { ?g rdfs:label ?title; prov:wasAttributedTo ?user; prov:generatedAtTime ?date ; base:publicationStage ?stage. ?user rdfs:label ?userLabel . OPTIONAL {?g prov:wasInfluencedBy ?modifier. ?modifier rdfs:label ?modifierLabel .} } OPTIONAL {?g rdfs:label ?title; prov:generatedAtTime ?date ; base:publicationStage ?stage . } BIND(COALESCE(?date, '-') AS ?date ). BIND(COALESCE(?stage, '-') AS ?stage ). BIND(COALESCE(?userLabel, '-') AS ?userLabel ). BIND(COALESCE(?modifierLabel, '-') AS ?modifierLabel ). BIND(COALESCE(?title, 'none', '-') AS ?title ). filter not exists { ?g prov:generatedAtTime ?date2 filter (?date2 > ?date) } } """+filterRecords+""" FILTER( str(?g) != '"""+conf.base+"""vocabularies/' ) } ORDER BY DESC(?date) LIMIT """+conf.pagination+""" OFFSET """+offset+""" """ records = list() sparql = SPARQLWrapper(conf.myEndpoint) sparql.setQuery(queryRecordsPagination) sparql.setReturnFormat(JSON) results = sparql.query().convert() for result in results["results"]["bindings"]: records.append( (result["g"]["value"], result["title"]["value"], result["userLabel"]["value"], result["modifierLabel"]["value"], result["date"]["value"], result["stage"]["value"] )) return records
5,347,096
async def async_setup_entry(hass, config_entry, async_add_entities): """Perform the setup for Gardena sensor devices.""" entities = [] for sensor in hass.data[DOMAIN][GARDENA_LOCATION].find_device_by_type("SENSOR"): for sensor_type in SENSOR_TYPES: entities.append(GardenaSensor(sensor, sensor_type)) for sensor in hass.data[DOMAIN][GARDENA_LOCATION].find_device_by_type("SOIL_SENSOR"): for sensor_type in SOIL_SENSOR_TYPES: entities.append(GardenaSensor(sensor, sensor_type)) for mower in hass.data[DOMAIN][GARDENA_LOCATION].find_device_by_type("MOWER"): # Add battery sensor for mower entities.append(GardenaSensor(mower, ATTR_BATTERY_LEVEL)) for water_control in hass.data[DOMAIN][GARDENA_LOCATION].find_device_by_type("WATER_CONTROL"): # Add battery sensor for water control entities.append(GardenaSensor(water_control, ATTR_BATTERY_LEVEL)) _LOGGER.debug("Adding sensor as sensor %s", entities) async_add_entities(entities, True)
5,347,097
def g16_vs_cnn(): """ Examine the DLAs in G16 against those in CNNo Returns ------- """ # Load cnn_dlas, dr12_dla, g16_abs, g16_dlas = load_dr12() # DLA coords dr12_dla_coord = SkyCoord(ra=dr12_dla['RA'], dec=dr12_dla['DEC'], unit='deg') g16_coord = SkyCoord(ra=g16_dlas['RAdeg'], dec=g16_dlas['DEdeg'], unit='deg') # Match dr12_to_g16 = match_boss_catalogs(dr12_dla, g16_dlas) matched = dr12_to_g16 >= 0 g16_idx = dr12_to_g16[matched] # DLAs in both pdb.set_trace()
5,347,098
def integrate_intensity(data_sets, id, nθ, iN, NCO2, color1, color2): """Integrate intensity ove angle theta Arguments: data_sets {[type]} -- [description] id {[type]} -- [description] nθ {[type]} -- [description] iN {[type]} -- [description] NCO2 {[type]} -- [description] color1 {[type]} -- [description] color2 {[type]} -- [description] Returns: [type] -- [description] """ import scipy.integrate θ_0 = np.deg2rad(data_sets.get(id, iN, 0, iθ)[0]) # theta θ_1 = np.deg2rad(data_sets.get(id, iN, 1, iθ)[0]) # theta θ_2 = np.deg2rad(data_sets.get(id, iN, 2, iθ)[0]) # theta I_0 = data_sets.get(id, iN, 0, iI)[-1] # intensity at TOA I_1 = data_sets.get(id, iN, 1, iI)[-1] # intensity at TOA I_2 = data_sets.get(id, iN, 2, iI)[-1] # intensity at TOA # qubic approximation of I(θ) R1 = I_1 - I_0 R2 = I_2 - I_0 a0 = I_0 det = θ_1**2 * θ_2**3 - θ_1**3 * θ_2**2 a2 = (R1 * θ_2**3 - R2 * θ_1**3) / det a3 = (R2 * θ_1**2 - R1 * θ_2**2) / det c1 = scipy.integrate.quad(lambda x: np.cos(x)*np.sin(x), 0.0, np.pi*0.5)# θ_2) c2 = scipy.integrate.quad(lambda x: np.cos(x)*np.sin(x)*x**2, 0.0, np.pi*0.5)# θ_2) c3 = scipy.integrate.quad(lambda x: np.cos(x)*np.sin(x)*x**3, 0.0, np.pi*0.5)# θ_2) θ = np.mgrid[0.0:np.pi*0.5:100j] I = a0 + a2*θ**2 + a3*θ**3 # plot plt.plot(θ, I, color1, label='%d ppm, cubic approximation' % NCO2) plt.plot([θ_0, θ_1, θ_2], [I_0, I_1, I_2], color2+'o', label="%d ppm, computed" % NCO2) plt.xlabel("angle θ [rad]") plt.ylabel("TOA flux I(θ) [W/m²]") plt.legend(loc='best') # integrated intensity Iint = 2.0*np.pi * (a0*c1[0] + a2*c2[0] + a3*c3[0]) return Iint
5,347,099