content
stringlengths
22
815k
id
int64
0
4.91M
def textarea(): """ Returns a textarea parser. Example:: ...[5] The number defines the number of rows. """ rows = number_enclosed_in('[]')('rows') textarea = Suppress('...') + Optional(rows) textarea.setParseAction(tag(type='textarea')) return textarea
5,348,200
def run(task): """Run the train/predict flow for `task`.""" st.markdown(f'<h1 align="center">{task}</h1>', unsafe_allow_html=True) train_button = st.sidebar.button("Train") sidebar_train_message = st.sidebar.empty() main_train_message = st.empty() slug = slugify(task) trained = is_trained(slug) if trained: sidebar_train_message.success(TRAINED_MODEL_MESSAGE) else: sidebar_train_message.warning(NO_TRAINED_MODEL_MESSAGE) main_train_message.warning(NO_TRAINED_MODEL_MESSAGE) if train_button: sidebar_train_message.info(TRAINING_MESSAGE) main_train_message.info(TRAINING_MESSAGE) train(slug, sidebar_train_message, TRAINING_MESSAGE) sidebar_train_message.success(TRAINED_MODEL_MESSAGE) main_train_message.empty() trained = is_trained(slug) if trained: show_metrics = st.sidebar.checkbox("Show metrics") if show_metrics: metrics = get_metrics(slug) for key, value in metrics.items(): st.sidebar.text(f"{key}: {value}") user_input = st.text_area("Input") st.text("Output") if user_input: output = predict(slug, user_input) display_function = DISPLAY_FUNCTIONS[slug] display_function(output)
5,348,201
def union_categoricals( to_union: List[ Union[ pandas.core.indexes.category.CategoricalIndex, pandas.core.series.Series, pandas.core.arrays.categorical.Categorical, ] ] ): """ usage.dask: 15 """ ...
5,348,202
def get_agent(agent_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAgentResult: """ This data source provides details about a specific Agent resource in Oracle Cloud Infrastructure Database Migration service. Display the ODMS Agent configuration. ## Example Usage ```python import pulumi import pulumi_oci as oci test_agent = oci.databasemigration.get_agent(agent_id=oci_database_migration_agent["test_agent"]["id"]) ``` :param str agent_id: The OCID of the agent """ __args__ = dict() __args__['agentId'] = agent_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('oci:databasemigration/getAgent:getAgent', __args__, opts=opts, typ=GetAgentResult).value return AwaitableGetAgentResult( agent_id=__ret__.agent_id, compartment_id=__ret__.compartment_id, defined_tags=__ret__.defined_tags, display_name=__ret__.display_name, freeform_tags=__ret__.freeform_tags, id=__ret__.id, lifecycle_details=__ret__.lifecycle_details, public_key=__ret__.public_key, state=__ret__.state, stream_id=__ret__.stream_id, system_tags=__ret__.system_tags, time_created=__ret__.time_created, time_updated=__ret__.time_updated, version=__ret__.version)
5,348,203
def darknet(): """Darknet-53 classifier. """ inputs = Input(shape=(416, 416, 3)) x = darknet_base(inputs) x = GlobalAveragePooling2D()(x) x = Dense(1000, activation='softmax')(x) model = Model(inputs, x) return model
5,348,204
def create_process_chain_entry(input_object, python_file_url, udf_runtime, udf_version, output_object): """Create a Actinia command of the process chain that uses t.rast.udf :param strds_name: The name of the strds :param python_file_url: The URL to the python file that defines the UDF :param output_name: The name of the output raster layer :return: A Actinia process chain description """ # rn = randint(0, 1000000) pc = {"id": "t_rast_udf", "module": "t.rast.udf", "inputs": [{"import_descr": {"source": python_file_url, "type": "file"}, "param": "pyfile", "value": "$file::my_py_func"}, {"param": "input", "value": input_object.grass_name()}, {"param": "output", "value": output_object.grass_name()}]} return pc
5,348,205
def add_user_to_authorization_domain(auth_domain_name, email, permission): """Add group with given permissions to authorization domain.""" # request URL for addUserToGroup uri = f"https://api.firecloud.org/api/groups/{auth_domain_name}/{permission}/{email}" # Get access token and and add to headers for requests. # -H "accept: */*" -H "Authorization: Bearer [token]" headers = {"Authorization": "Bearer " + get_access_token(), "accept": "*/*"} # capture response from API and parse out status code response = requests.put(uri, headers=headers) status_code = response.status_code if status_code != 204: # AD update with member fail print(f"WARNING: Failed to update Authorization Domain, {auth_domain_name}, with group: {email}.") print("Check output file for error details.") return False, response.text # AD update with member success print(f"Successfully updated Authorization Domain, {auth_domain_name}, with group: {email}.") return True, None
5,348,206
def test_subpart_decl_build_cls(subpart_decl): """Test that the class creation does get the docs and set the attributes. """ class Test: pass with subpart_decl as ss: ss.a = Feature() subpart_decl._name_ = 'sub' cls = subpart_decl.build_cls(Test, None, {'sub': 'Test docs', 'ss.a': 'A docs'}) assert cls.__doc__ == 'Test docs' assert cls.__name__ == 'Test_Sub' assert not hasattr(cls, '_docs_') assert cls.a.__doc__.split('\n')[0] == 'A docs' assert cls for att in ('_name_', '_parent_', '_bases_', '_aliases_'): assert not hasattr(cls, att) class T: pass cls2 = subpart_decl.build_cls(T, cls, {}) assert cls2.__name__ == 'T_Sub' assert cls2.mro()[1] is cls
5,348,207
def error_032_link_two_pipes(text): """Fix some cases and return (new_text, replacements_count) tuple.""" (text, ignored) = ignore(text, r"\[\[\s*:?\s*{}.*?\]\]".format(IMAGE)) (text, count1) = re.subn(r"\[\[([^|\[\]\n]+)\|\|([^|\[\]\n]+)\]\]", "[[\\1|\\2]]", text) (text, count2) = re.subn(r"\[\[([^|\[\]\n]+)\|([^|\[\]\n]+)\|\]\]", "[[\\1|\\2]]", text) text = deignore(text, ignored) return (text, count1 + count2)
5,348,208
def define_answer(defined_answer): """ ランダムに「正解」を生成する 1桁ずつ、0~15までの乱数を引いて決めていく count桁目の乱数(digit_kari)を引いた時、count-1桁目までの数字と重複がないかをチェック。  重複がなければ、引いた乱数(digit_kari)をans_list[count]に保存。  重複してたらその桁の乱数を引き直す。 """ global ans_str #,ans_list if type(defined_answer) == str and len(defined_answer) == 5: ans_str = defined_answer return defined_answer else: ans_list = [0, 0, 0, 0, 0] ans_str = "" digit_kari = 0 count = 0 check = 0 while count < 5: if count == 0: ans_list[count] = random.randint(0,15) count += 1 else: digit_kari = random.randint(0,15) for j in range(count): if ans_list[j] == digit_kari: check = -1 if check == 0: ans_list[count] = digit_kari count += 1 else: check = 0 for i in range(5): ans_str += str(hex(ans_list[i]))[2] print("answer:"+ans_str) #あらかじめ答えを知りたいときのみ有効化する return ans_str
5,348,209
def cifar10(eps, use_bounds=False): """Example dataloader. For MNIST and CIFAR you can actually use existing ones in utils.py.""" assert eps is not None database_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'datasets') # You can access the mean and std stored in config file. mean = torch.tensor(arguments.Config["data"]["mean"]) std = torch.tensor(arguments.Config["data"]["std"]) normalize = transforms.Normalize(mean=mean, std=std) test_data = datasets.CIFAR10(database_path, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), normalize])) # Load entire dataset. testloader = torch.utils.data.DataLoader(test_data, batch_size=10000, shuffle=False, num_workers=4) X, labels = next(iter(testloader)) if use_bounds: # Option 1: for each example, we return its element-wise lower and upper bounds. # If you use this option, set --spec_type ("specifications"->"type" in config) to 'bound'. absolute_max = torch.reshape((1. - mean) / std, (1, -1, 1, 1)) absolute_min = torch.reshape((0. - mean) / std, (1, -1, 1, 1)) # Be careful with normalization. new_eps = torch.reshape(eps / std, (1, -1, 1, 1)) data_max = torch.min(X + new_eps, absolute_max) data_min = torch.max(X - new_eps, absolute_min) # In this case, the epsilon does not matter here. ret_eps = None else: # Option 2: return a single epsilon for all data examples, as well as clipping lower and upper bounds. # Set data_max and data_min to be None if no clip. For CIFAR-10 we clip to [0,1]. data_max = torch.reshape((1. - mean) / std, (1, -1, 1, 1)) data_min = torch.reshape((0. - mean) / std, (1, -1, 1, 1)) if eps is None: raise ValueError('You must specify an epsilon') # Rescale epsilon. ret_eps = torch.reshape(eps / std, (1, -1, 1, 1)) return X, labels, data_max, data_min, ret_eps
5,348,210
def truncate_dataset_position(filename, joint_type="w", threshold=0.01, directory="./2_smoothed/"): """ Truncates dataset **with raw position data** from last zero value before maximum velocity to following zero value. :param filename: Input filename of position dataset :param joint_type: Chooses which joint type is used to truncate the whole dataset (w, e, gh) :param threshold: factor for maximum velocity, every value below threshold*v_max is set to zero. Threshold=0 uses original dataset. :param directory: directory of files :return: new truncated dataset as dataframe, indexes stay the same """ dataset = open_dataset_pandas(filename, directory=directory) dataset_velocity = generate_velocity_dataframe(filename, directory) [index_left, index_right] = \ find_nearest_minima_from_maximum(dataset_velocity, joint_type=joint_type, threshold=threshold) truncated_dataset = dataset.truncate(before=index_left, after=index_right) if len(truncated_dataset) > 150 and threshold < 0.3: return truncate_dataset_position(filename, joint_type=joint_type, threshold=threshold+0.01, directory=directory) print(f"{filename};{threshold};{len(truncated_dataset)}") truncated_dataset = truncated_dataset.reset_index(drop=True) return truncated_dataset, threshold
5,348,211
def quadric_errors_representative(bucket): """ Quadric errors representative function. :param bucket: bucket to calculate representative from. :type bucket: Bucket :return: bucket's representative vertex coordinates :rtype: tuple(float, float, float) """ A = np.zeros((3, 3)) b = np.zeros((3, 1)) faces_set = set() faces = [] for vertex in bucket.original_vertices: # type: Polyhedron_3_Vertex_handle circulator = vertex.vertex_begin() # type: Polyhedron_3_Halfedge_around_vertex_circulator for i in range(vertex.vertex_degree()): he = circulator.next() # type: Polyhedron_3_Halfedge_handle if he.is_border(): continue f = he.facet() facet_circulator = f.facet_begin() # type: Polyhedron_3_Halfedge_around_facet_circulator vertices = [] for j in range(3): facet_he = facet_circulator.next() # type: Polyhedron_3_Halfedge_handle vertices.append(tuple([float(x) for x in str(facet_he.vertex().point()).split()])) triangle_already_added = False n = len(vertices) for permutation in [[vertices[i - j] for i in range(n)] for j in range(n)]: if tuple(permutation) in faces_set: triangle_already_added = True break faces_set.add(tuple(permutation)) if triangle_already_added: continue face = [] for v in vertices: face.append(v) faces.append(face) for face in faces: p1 = np.array(face[0]) p2 = np.array(face[1]) p3 = np.array(face[2]) normal = np.reshape(np.cross((p2 - p1), (p3 - p1)), (3, 1)) normal_norm = norm(normal) normal /= normal_norm normal_t = normal.transpose() dist = np.dot(normal_t, p1) A += np.dot(normal, normal_t) b += dist * normal pinv_A = pinv(A) representative = np.dot(pinv_A, b) return tuple([representative[0][0], representative[1][0], representative[2][0]])
5,348,212
def getMonthTicks(start, end, increment, offset=0): """ Create a set of matplotlib compatible ticks and tick labels for every `increment` month in the range [start, end], beginning at the month of start + `offset` months. """ xLabels = [] xTicks = [] y, m, d = helpers.yearmonthday(start) def normalize(y, m): if m > 12: m -= 12 y += 1 elif m < 0: m += 12 y -= 1 return y, m def nextyearmonth(y, m): m += increment return normalize(y, m) y, m = normalize(y, m+offset) tick = helpers.mktime(y, m) end = end + C.DAY*120 # Make a few extra months worth. while True: xTicks.append(tick) xLabels.append(time.strftime("%b '%y", time.gmtime(tick))) y, m = nextyearmonth(y, m) tick = helpers.mktime(y, m) if tick > end: break return xTicks, xLabels
5,348,213
def untar(fileobj): """Extract tar archive.""" logger.debug("untar") fileobj = seekable(fileobj) with tarfile.open(fileobj=fileobj) as tar_data: tar_data.extractall()
5,348,214
def entropy_layer(inp, theta, num_samples, sample_init, sample_const, train_vect): """ Entropy PersLay WARNING: this function assumes that padding values are zero """ bp_inp = tf.einsum("ijk,kl->ijl", inp, tf.constant(np.array([[1.,-1.],[0.,1.]], dtype=np.float32))) sp = tf.get_variable("s", shape=[1, 1, num_samples], initializer=sample_init, trainable=train_vect) if not sample_const else tf.get_variable("s", initializer=sample_init) L, X, Y = bp_inp[:, :, 1:2], bp_inp[:, :, 0:1], bp_inp[:, :, 0:1] + bp_inp[:, :, 1:2] LN = tf.multiply(L, 1. / tf.expand_dims(tf.matmul(L[:,:,0], tf.ones([L.shape[1],1])), -1)) entropy_terms = tf.where(LN > 0., -tf.multiply(LN, tf.log(LN)), LN) return tf.multiply(entropy_terms, 1. / ( 1. + tf.exp( -theta * (.5*(Y-X) - tf.abs(sp - .5*(Y+X))) ) ))
5,348,215
def set_version_code(data): """ Utility function to set new versionCode """ match = version_code_pattern.search(data) if not match: raise ValueError('Version code not found') version_code = int(match.group('value')) next_version_code = '\g<key> {}'.format(version_code + 1) return version_code_pattern.sub(next_version_code, data)
5,348,216
def add_resource(label, device_type, address, userid, password, rackid='', rack_location='', ssh_key=None, offline=False): """ Add device to the list of devices in the configuration managed Args: label: label for device device_type: type of device from device enumeration address: IP address of device userid: string with device userid password: string with device password (or password for ssh key) rackid: string identify rack id, if not specified will default to management rack rack:_location string identifying rack location ssh_key: ssh key string offline: Add the resource even if it can't be contacted Returns: RC: integer return code Message: string with message associated with return code """ _method_ = 'resource_mgr.add_resource' label = label.strip() address = address.strip() session = persistent_mgr.create_database_session() if not offline: ipv4, hostname = _check_address(address) else: ipv4 = address hostname = "" # adding default hostname for cases where dns doesn't resolve the address # we need *some* hostname to use on Nagios configuration if hostname == "": hostname = address rc, message = validate_address(ipv4) if rc != 0: return rc, message rc, message = validate_label(label) if rc != 0: return rc, message if not offline: (validate_ret, device_type, mtm, serialnum, version, architecture) = validate( ipv4, userid, password, device_type, ssh_key) if validate_ret != 0: logging.error( "%s::failed to add device, validate device(%s) return value(%d).", _method_, label, validate_ret) error_message = None if validate_ret == 1: error_message = _("Failed to connect the device.") elif validate_ret == 2: error_message = _("The userid/password combination is not valid.") elif validate_ret == 3: error_message = _("No plugin capable of managing device was found.") elif validate_ret == 109: error_message = _("Connect timeout.") return validate_ret, error_message else: if _check_device_exist_by_props(session, device_type, mtm, serialnum): logging.error("%s::failed to add device, device(machine-type-model=%s, " "serial-number=%s) is already managed.", _method_, mtm, serialnum) error_message = _("The device is not added, a device with the same serial number " "and machine type model is found in the configuration file.") return 110, error_message # figure out the rack ID to add the device under if rackid: rack = persistent_mgr.get_rack_by_id(session, rackid) else: # don't have a rack id. find first the rack and assign it there try: racks_info = persistent_mgr.get_all_racks(session) rack = racks_info[0] except IndexError: # No rack exist, create one rack = Rack() rack.label = "Default" persistent_mgr.add_racks(session, [rack]) device_info = Resource() device_info.rack = rack device_info.eia_location = rack_location device_info.machine_type_model = mtm device_info.serial_number = serialnum device_info.address = ipv4 device_info.hostname = hostname device_info.userid = userid if password and not ssh_key: device_info.password = persistent_mgr.encrypt_data(password) device_info.label = label device_info.resource_type = device_type device_info.version = version device_info.architecture = architecture device_info.status = constants.access_status.SUCCESS.value device_info.statusTime = datetime.utcnow() # we are adding the device after validation, set validated. device_info.validated = True hooks = _load_inventory_device_plugins() hook_name = 'unknown' # keeps pylint happy try: for hook_name, hook_plugin in hooks.items(): hook_plugin.add_device_pre_save(device_info) except Exception as e: logging.exception(e) message = _("Before device was added. Error in plugin (%s): %s") % (hook_name, e) return 102, message persistent_mgr.add_devices(session, [device_info]) if ssh_key: key_info = Key() key_info.resource = device_info key_info.type = "RSA" key_info.value = ssh_key if password: key_info.password = persistent_mgr.encrypt_data(password) persistent_mgr.add_ssh_keys(session, [key_info]) try: for hook_name, hook_plugin in hooks.items(): hook_plugin.add_device_post_save(device_info) except Exception as e: logging.exception(e) message = _("After device was added. Error in plugin (%s): %s") % (hook_name, e) if not message: message = _("Added device successfully.") session.close() return 0, message
5,348,217
def kegg_df_to_smiles(kegg_df, column_name): """ Args: kegg_df : pandas dataframe with SID numbers in the third column Returns: kegg_df : modified with a fourth column containing CID and fifth column containing SMILES unsuccessful_list : list of SIDs for which no CID or SMILES were found """ res = [] cid_list = [] unsuccessful_list = [] for i in range(len(kegg_df)): # cell index of desired SID sid = kegg_df.loc[i, column_name] try: smile_result = sid_to_smiles(sid)[0] res.append(smile_result) cid_result = sid_to_smiles(sid)[1] cid_list.append(cid_result) except BaseException: res.append('none') cid_list.append('none') unsuccessful_list.append(sid) pass kegg_df.insert(0, column='CID', value=cid_list) # Change this 2 to the number where the smiles column should be kegg_df.insert(1, column='SMILES', value=res) # kegg_df.to_csv(r'../datasets/df_cleaned_kegg_with_smiles.csv') return kegg_df, unsuccessful_list
5,348,218
async def get_user(user_id: int) -> User: """Gets user settings. Returns ------- User object Raises ------ sqlite3.Error if something happened within the database. exceptions.NoDataFoundError if no user was found. LookupError if something goes wrong reading the dict. Also logs all errors to the database. """ table = 'settings_user' function_name = 'get_user' sql = 'SELECT * FROM settings_user where user_id=?' try: cur = ARCHMAGE_DB.cursor() cur.row_factory = sqlite3.Row cur.execute(sql, (user_id,)) record = cur.fetchone() except sqlite3.Error as error: await log_error( INTERNAL_ERROR_SQLITE3.format(error=error, table=table, function=function_name, sql=sql) ) raise if not record: raise exceptions.NoDataFoundError('User not in database') try: user_settings = User( user_id = record['user_id'], target_enchant = record['target_enchant'], ) except Exception as error: await log_error( INTERNAL_ERROR_LOOKUP.format(error=error, table=table, function=function_name, record=record) ) raise LookupError return user_settings
5,348,219
def _valid_optimizer_args(cfg_user, logger): """ Validates the "optimizer" parameters of a json configuration file used for training. The function returns False if an error has occurred and True if all settings have passed the check. :param cfg_user: EasyDict, json configuration file imported as dictionary :param logger: logger instance :return: boolean, True if no errors have been detected, False otherwise """ error = False if 'optimizer' in cfg_user: if not all_keys_known(cfg_user.optimizer, arguments.OPTIMIZER_KEYS, logger): error = True if 'name' not in cfg_user.optimizer: logger.error(f"The optimizer is not specified. Choose among {arguments.OPTIMIZERS} to specify 'name'.\n") error = True else: if cfg_user.optimizer.name not in arguments.OPTIMIZERS: logger.error(f"Unknown optimizer '{cfg_user.optimizer.name}'. Choose among {arguments.OPTIMIZERS} " "to specify 'name'.\n") error = True if 'learning_rate' in cfg_user.optimizer and cfg_user.optimizer.learning_rate <= 0: logger.error("Invalid value for the argument 'learning_rate': " f"{cfg_user.optimizer.learning_rate}. Specify a positive number.\n") error = True if 'weight_decay' in cfg_user.optimizer and cfg_user.optimizer.weight_decay <= 0: logger.error("Invalid value for the argument 'weight_decay': " f"{cfg_user.optimizer.weight_decay}. Specify a positive number.\n") error = True if error: logger.info('\n') else: logger.info('Settings check: ok.\n\n') return not error
5,348,220
def connect( server_index: int = typer.Argument(None, help="Connect to a server with the index given by 'csgo servers'"), ): """ Start CSGO and connect to a specific game server. """ if server_index is None: typer.echo("Please specify the server index") return if server_index >= 0: servers = get_servers() if server_index > len(servers): typer.echo(f"No server with index {server_index} exists.") return server = servers[server_index] typer.echo(f"Starting CSGO and joining server {server['NAME']} ...") start_csgo(server['ADDRESS']) return
5,348,221
def test_insert_again(report): """Insert already present data into the database.""" report.insert("London, GB")
5,348,222
def delete_host_network(host_id, host_network_id): """Delete host network.""" data = _get_request_data() return utils.make_json_response( 200, host_api.del_host_network( host_id, host_network_id, user=current_user, **data ) )
5,348,223
def rrms_error(y: np.array, y_hat: np.array) -> float: """ Computes the RRMS error of an estimation. :param y: true parameters as numpy array :param y_hat: estimated parameters as numpy array :return: Frobenius norm of the relative estimation error, as percentage """ return fro_error(y, y_hat) / np.linalg.norm(y, ('fro' if len(y.shape) > 1 else 2)) * 100
5,348,224
def create(): """Creates new quiz and stores information about it in database.""" if request.method == "GET": return render_template("quizzes/create.html") error = None questions = [] quiz_name = None if isinstance(request.json, dict): for quiz in request.json: quiz_name = quiz for question_text in request.json[quiz_name]: question_options = request.json[quiz_name][question_text] questions.append(Question(text=question_text, options=question_options)) else: error = locale.error_wrong_data new_quiz = Quiz(author_id=g.user["id"], name=quiz_name, questions=questions) errors = new_quiz.validate() if error or errors: error_msg = "\n".join(filter(None, [error, *errors])) return jsonify(error=error_msg) else: db = get_db() new_quiz.add_to_db() db.commit() return jsonify(result="success", url=redirect(url_for("quizzes.index")).headers["Location"])
5,348,225
def get_teams_from_account(client: FrameioClient) -> Dict: """ Builds a list of teams for the account. Note: the API offers two strategies to fetch an account's teams, `'get_teams`` and `get_all_teams`. Using `get_teams`, we'll pull only the teams owned by the account_id, disregarding teams the user belongs to but does not own. More info: https://docs.frame.io/docs/directory-lists-and-file-trees#2-fetch-the-accounts-teams """ acct = client.users.get_me() acct_id = acct["account_id"] team_name_kv = dict() for team in client.teams.list(acct_id): team_name_kv[team["id"]] = team["name"] return team_name_kv
5,348,226
def verify_mfib_vrf_hardware_rate( device, vrf, num_of_igmp_groups, var, rate_pps, max_time=60, check_interval=10): """Verify mfib vrf hardware rate Args: device ('obj'): Device object neighbors (`list`): neighbors to be verified max_time (`int`, optional): Max time, default: 30 check_interval (`int`, optional): Check interval, default: 10 """ res = True timeout = Timeout(max_time, check_interval) while timeout.iterate(): try: output=device.parse("show ip mfib vrf {vrf} active".format(vrf=vrf)) except SubCommandFailure as e: timeout.sleep() continue ip_list,hd_rt=[],0 ##Verify wether the ips learnet have expected harware rate or not for ip in output.q.get_values('groups'): hd_rt=output.q.contains(ip).get_values('hw_rate_utilized')[0] rate1 = int(rate_pps)/int(num_of_igmp_groups) max_r = int(rate1)+int(var) min_r = int(rate1)-int(var) if hd_rt>= min_r and hd_rt<=max_r: ip_list.append(ip) else: log.error("The ip {ip} has unexpected hardware rate {hd_rt}, while expected should be between {min_r} and {max_r}".format(ip=ip, hd_rt=hd_rt, min_r=min_r, max_r=max_r)) res = False if res: ips=",".join(ip_list) log.info("ip {ip_list} have expected hardware rate {hd_rt}".format(ip_list=ips,hd_rt=hd_rt)) return True timeout.sleep() return res
5,348,227
def sieve(n): """ Returns a list with all prime numbers up to n. >>> sieve(50) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47] >>> sieve(25) [2, 3, 5, 7, 11, 13, 17, 19, 23] >>> sieve(10) [2, 3, 5, 7] >>> sieve(9) [2, 3, 5, 7] >>> sieve(2) [2] >>> sieve(1) [] """ l = [True] * (n + 1) # noqa: E741 prime = [] start = 2 end = int(math.sqrt(n)) while start <= end: # If start is a prime if l[start] is True: prime.append(start) # Set multiples of start be False for i in range(start * start, n + 1, start): if l[i] is True: l[i] = False start += 1 for j in range(end + 1, n + 1): if l[j] is True: prime.append(j) return prime
5,348,228
def is_trueish(expression: str) -> bool: """True if string and "True", "Yes", "On" (ignorecase), False otherwise""" expression = str(expression).strip().lower() return expression in {'true', 'yes', 'on'}
5,348,229
def u_onequbit_h(qc: qiskit.QuantumCircuit, thetas, wire: int): """Return a simple series of 1 qubit - gate which is measured in X-basis Args: - qc (QuantumCircuit): Init circuit - thetas (Numpy array): Parameters - wire (Int): position that the gate carries on Returns: - QuantumCircuit: The circuit which have added gates """ if isinstance(wire, int) != True: wire = (wire['wire']) qc.rz(thetas[0], wire) qc.rx(thetas[1], wire) qc.rz(thetas[2], wire) qc.h(wire) return qc
5,348,230
def test_union_g_month_day_g_year_month_enumeration_nistxml_sv_iv_union_g_month_day_g_year_month_enumeration_1_1(mode, save_output, output_format): """ Type union/gMonthDay-gYearMonth is restricted by facet enumeration. """ assert_bindings( schema="nistData/union/gMonthDay-gYearMonth/Schema+Instance/NISTSchema-SV-IV-union-gMonthDay-gYearMonth-enumeration-1.xsd", instance="nistData/union/gMonthDay-gYearMonth/Schema+Instance/NISTXML-SV-IV-union-gMonthDay-gYearMonth-enumeration-1-1.xml", class_name="NistschemaSvIvUnionGMonthDayGYearMonthEnumeration1", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,348,231
def get_manager() -> ArchiveManager: """ Returns the object storage manager for the archive subsys :return: """ global _manager_singleton if _manager_singleton is None: raise Exception("Not initialized. Call init_archive_manager") return _manager_singleton
5,348,232
def require_password_and_profile_via_email( strategy, backend, user=None, flow=None, current_partial=None, *args, **kwargs ): # pylint: disable=unused-argument """ Sets a new user's password and profile Args: strategy (social_django.strategy.DjangoStrategy): the strategy used to authenticate backend (social_core.backends.base.BaseAuth): the backend being used to authenticate user (User): the current user flow (str): the type of flow (login or register) current_partial (Partial): the partial for the step in the pipeline Raises: RequirePasswordAndProfileException: if the user hasn't set password or name """ if backend.name != EmailAuth.name or flow != SocialAuthState.FLOW_REGISTER: return {} data = strategy.request_data() profile = user.profile with transaction.atomic(): if "name" in data: profile = profile_api.ensure_profile(user, {"name": data["name"]}) if "password" in data: user.set_password(data["password"]) user.save() if not user.password or not profile.name: raise RequirePasswordAndProfileException(backend, current_partial) return {"user": user, "profile": profile or user.profile}
5,348,233
def plot_feat_barplot(feat_data: pd.DataFrame, top_x_feats: int = 15, plot_features: dict = None ): """Plots local feature explanations Parameters ---------- feat_data: pd.DataFrame Feature explanations top_x_feats: int The number of feature to display. plot_features: dict Dict containing mapping between model features and display features """ feat_data = copy.deepcopy(feat_data) if plot_features: plot_features['Pruned Events'] = 'Pruned Events' feat_data['Feature'] = feat_data['Feature'].apply(lambda x: plot_features[x]) feat_data['sort_col'] = feat_data['Shapley Value'].apply(lambda x: abs(x)) if top_x_feats is not None and feat_data.shape[0] > top_x_feats: sorted_df = feat_data.sort_values('sort_col', ascending=False) cutoff_contribution = abs(sorted_df.iloc[4]['Shapley Value']) feat_data = feat_data[np.logical_or(feat_data['Explanation'] >= cutoff_contribution, feat_data['Explanation'] <= -cutoff_contribution)] a = alt.Chart(feat_data).mark_bar(size=15, thickness=1).encode( y=alt.Y("Feature", axis=alt.Axis(title="Feature", labelFontSize=15, titleFontSize=15, titleX=-61), sort=alt.SortField(field='sort_col', order='descending')), x=alt.X('Shapley Value', axis=alt.Axis(grid=True, title="Shapley Value", labelFontSize=15, titleFontSize=15), scale=alt.Scale(domain=[-0.1, 0.4])), ) line = alt.Chart(pd.DataFrame({'x': [0]})).mark_rule( color='#798184').encode(x='x') feature_plot = (a + line).properties( width=190, height=225 ) return feature_plot
5,348,234
def RK2(state,arrayTimeIndex,globalTimeStep,dt): """Use this method to solve a function with RK2 in time.""" #globals global gamma, mG gamma = 1.4 mG = 0.4 #step data dx = 1/(state.shape[-1]-4-1) dtdx = dt/dx #Creating pressure vector if globalTimeStep%2==0: P = eqnStateQ(state[arrayTimeIndex,0,:],state[arrayTimeIndex,1,:],state[arrayTimeIndex,3,:]) state[arrayTimeIndex+1] = state[arrayTimeIndex-1]+dtdx*fv5p(state[arrayTimeIndex],P) else: P = eqnStateQ(state[arrayTimeIndex,0,:],state[arrayTimeIndex,1,:],state[arrayTimeIndex,3,:]) state[arrayTimeIndex+1] = state[arrayTimeIndex]+dtdx*0.5*fv5p(state[arrayTimeIndex],P)
5,348,235
async def post_ir_remote_key(device_id: str, remote_id: str, payload: dict) -> dict: # fmt: off """ Trigger key / code on the remote bound to IR device. There are 2 types of keys on Tuya IR devices: * native - out of the box keys, provided with remotes for different brands * custom - DIY keys learned by the IR device Body for "custom" key (e.g. DIY): { "type": "custom", "code": "<value>" } Body for "native" key: { "type": "custom", "key": "<value>" } :param device_id: Unique id of the Tuya device (in Tuya's API it is called 'infrared_id') :param remote_id: Unique remote id bound to the IR device, returned by 'get_ir_device_remotes'. :param payload: Request body in JSON format :return: Dictionary with HTTP response """ # fmt: on url = _url_format(id=device_id, endpoint="remotes", url=IR_URL) # {"code" : <value>} for "custom", {"key": <value>} for "native" if payload.get("type", "native") == "custom": lc_url = f"{url}/{remote_id}/learning-codes" response = _request(url=lc_url, payload=payload) else: k_url = f"{url}/{remote_id}/command" response = _request(url=k_url, payload=payload) return response
5,348,236
def login(): """ The function for the front-end client to log in. Use the following command to test: $ curl -d '{"custom_id":"id"}' -H "Content-Type: application/json" -X POST http://0.0.0.0:5000/login/ Parameters ---------- google_id_token : str The token obtained from the Google Sign-In API. client_id : str The client ID string returned by the Google Analytics tracker or created by the front-end client. Returns ------- user_token : str The encoded JWT that stores user information. """ client_id = None request_json = request.get_json() if request_json is not None: if "google_id_token" in request_json: # google_id_token is obtained from the Google Sign-In API google_id_token = request_json["google_id_token"] # Verify the google_id_token using Google Sign-In API try: id_info = id_token.verify_oauth2_token(google_id_token, requests.Request(), config.GOOGLE_SIGNIN_CLIENT_ID) # Token is valid client_id = "google.%s" % id_info["sub"] except ValueError: traceback.print_exc() e = InvalidUsage("Invalid Google ID token.", status_code=401) return handle_invalid_usage(e) except: traceback.print_exc() e = InvalidUsage(traceback.format_exc(), status_code=401) return handle_invalid_usage(e) else: if "client_id" in request_json: # obtained from the Google Analytics tracker or created by the front-end client client_id = request_json["client_id"] # Get user id by client id, and issued an user jwt if client_id is None: e = InvalidUsage("Must have either 'google_id_token' or 'client_id'.", status_code=400) return handle_invalid_usage(e) else: user_token = get_user_token_by_client_id(client_id) if user_token is None: e = InvalidUsage("Permission denied.", status_code=403) return handle_invalid_usage(e) else: return_json = {"user_token": user_token} return jsonify(return_json)
5,348,237
def listThingTypes(): """ Return a list of C{unicode} strings each of which gives the name of a type which can be created with the create command. """ return sorted([type.type for type in getPlugins(IThingType, imaginary.plugins)])
5,348,238
def transform(item_paths, output_dir, experiment_code, compresslevel=0): """Read medable csv and writes gen3 json.""" cases_emitter = emitter('case', output_dir=output_dir) cases = set([]) with open(item_paths[0], newline='') as csvfile: reader = csv.DictReader(csvfile) for row in reader: if exclude_row(row): continue submitter_id = row['c_public_user._id'] if len(submitter_id) == 0: continue cases.add(submitter_id) for submitter_id in cases: case = {'type': 'case', 'experiments': {'submitter_id': experiment_code}, 'submitter_id': submitter_id, 'project_id': DEFAULT_PROJECT_ID} cases_emitter.write(case) cases_emitter.close()
5,348,239
def _parse_boolean(value): """ :param value: The string to parse :type value: str :returns: The parsed value :rtype: bool """ try: boolean = json.loads(value) if boolean is None or isinstance(boolean, bool): return boolean else: raise DCOSException( 'Unable to parse {!r} as a boolean'.format(value)) except ValueError as error: logger.exception('Error parsing value as a JSON boolean') msg = 'Unable to parse {!r} as a boolean: {}'.format(value, error) raise DCOSException(msg)
5,348,240
def train(args, model, train_data_loader, dev_data_loader, accuracy, device): """ Train the current model Keyword arguments: args: arguments model: model to be trained train_data_loader: pytorch build-in data loader output for training examples dev_data_loader: pytorch build-in data loader output for dev examples accuracy: previous best accuracy device: cpu of gpu """ model.train() optimizer = torch.optim.Adamax(model.parameters()) criterion = nn.CrossEntropyLoss() print_loss_total = 0 epoch_loss_total = 0 start = time.time() #### modify the following code to complete the training funtion for idx, batch in enumerate(train_data_loader): question_text = batch['text'].to(device) question_len = batch['len'] labels = batch['labels'] #### Your code here clip_grad_norm_(model.parameters(), args.grad_clipping) print_loss_total += loss.data.numpy() epoch_loss_total += loss.data.numpy() if idx % args.checkpoint == 0 and idx > 0: print_loss_avg = print_loss_total / args.checkpoint print('number of steps: %d, loss: %.5f time: %.5f' % (idx, print_loss_avg, time.time()- start)) print_loss_total = 0 curr_accuracy = evaluate(dev_data_loader, model, device) if accuracy < curr_accuracy: torch.save(model, args.save_model) accuracy = curr_accuracy return accuracy
5,348,241
def get_category_embeddings(word_table, embeds): """Calculate embeddings from word labels for each category.""" category_words = read_categories_as_json() word_ids = word_table.lookup(tf.constant(category_words)) glove_embeds = tf.nn.embedding_lookup(embeds, word_ids) # Calculate category embedding by summing word vectors in each category # tf.reduce_sum is used as the category embedding will be normalized later category_embeds = tf.reduce_sum(glove_embeds, axis=1) expand_category_embeds = tf.expand_dims(category_embeds, axis=1) return expand_category_embeds
5,348,242
def find_download_links(soup, title, language): """Examine all download links per law document and create respective filepaths.""" vbfile = soup.find("div", "vbFile") fulltext = soup.find("div", "fulltext") # check if file attachment elements exist if vbfile is not None: attach = vbfile.select("ul li a") metadata_list = [] # collect metadata for link and download_path # some laws have multiple doc links, so we want to alter the saved doc's filename to prevent overwriting multiple = len(attach) > 1 if multiple: title += "__" i = 1 # loop through every available file attachment for a in attach: # ignore "Xem nhanh"/Quick View links as they're invalid if "iFrame" in a["href"]: continue # all other links are javascript fpath = re.findall(r"([^']*)" , a["href"])[6] url = BASE_URL + fpath doc = requests.get(url) ext = re.split("\.", fpath)[-1] # some laws have multiple doc links, so we alter the saved doc's filename to prevent overwriting if multiple: title = title[:-1] + str(i) i += 1 fname = create_filename(title, language, ext) with open(fname, "wb") as f: for chunk in doc.iter_content(1024 * 1024): f.write(chunk) print("downloaded", ext, "for", title) metadata_list.append({"link": url, "download_path": fname, "language": language}) # alternative for "download_path": [fname.index("data"):] return metadata_list # if file attachment elements don't exist, scrape the text off the page and save as txt elif fulltext is not None: doc = fulltext.get_text() fname = create_filename(title, language, "txt") with open(fname, "w", encoding = "utf-8") as f: f.write(doc) print("downloaded txt for", title) return [{"download_path": fname, "language": language}] # alternative for "download_path": [fname.index("data"):] # if neither exists, don't save law document else: return None
5,348,243
def ignore(): """An empty function with a big string. Make the compression algorithm work a little harder. """ """ LAERTES O, fear me not. I stay too long: but here my father comes. Enter POLONIUS A double blessing is a double grace, Occasion smiles upon a second leave. LORD POLONIUS Yet here, Laertes! aboard, aboard, for shame! The wind sits in the shoulder of your sail, And you are stay'd for. There; my blessing with thee! And these few precepts in thy memory See thou character. Give thy thoughts no tongue, Nor any unproportioned thought his act. Be thou familiar, but by no means vulgar. Those friends thou hast, and their adoption tried, Grapple them to thy soul with hoops of steel; But do not dull thy palm with entertainment Of each new-hatch'd, unfledged comrade. Beware Of entrance to a quarrel, but being in, Bear't that the opposed may beware of thee. Give every man thy ear, but few thy voice; Take each man's censure, but reserve thy judgment. Costly thy habit as thy purse can buy, But not express'd in fancy; rich, not gaudy; For the apparel oft proclaims the man, And they in France of the best rank and station Are of a most select and generous chief in that. Neither a borrower nor a lender be; For loan oft loses both itself and friend, And borrowing dulls the edge of husbandry. This above all: to thine ownself be true, And it must follow, as the night the day, Thou canst not then be false to any man. Farewell: my blessing season this in thee! LAERTES Most humbly do I take my leave, my lord. LORD POLONIUS The time invites you; go; your servants tend. LAERTES Farewell, Ophelia; and remember well What I have said to you. OPHELIA 'Tis in my memory lock'd, And you yourself shall keep the key of it. LAERTES Farewell. """
5,348,244
def find_replace(input_path, old_string, new_string, name_id, platform, fix_cff, exclude_namerecord, output_dir, recalc_timestamp, overwrite): """Replaces a string in the name table with a new string. If the '-cff' option is passed, the string will be replaced also in the 'CFF' table: ftcli names find-replace MyFont-Black.otf --os "Black" --ns "Heavy" --cff To simply remove a string, use an empty string as new string: ftcli names find-replace MyFont-Black.otf --os "RemoveMe" --ns "" To replace the string in a specific platform ('win' or 'mac'): ftcli names find-replace MyFont-Black.otf -os "Black" -ns "Heavy" -p win To replace the string in a specific namerecord: ftcli names find-replace MyFont-Black.otf -os "Black" -ns "Heavy" -n 6 The -p / --platform and -n / --name-id options can be combined: ftcli names find-replace MyFont-Black.otf -os "Black" -ns "Heavy" -p win -n 6 To exclude one or more namerecords, use the -ex / --exclude-namerecord option: ftcli names find-replace MyFont-Black.otf -os "Black" -ns "Heavy" -ex 1 -ex 6 If a namerecord in explicitly included but also explicitly excluded, it wont be changed: ftcli names find-replace MyFont-Black.otf -os "Black" -ns "Heavy" -n 1 -ex 1 -ex 6 The above command will replace the string only in nameID 6 in both platforms. """ files = getFontsList(input_path) for f in files: try: font = Font(f, recalcTimestamp=recalc_timestamp) fix_count = font.findReplace( old_string, new_string, fixCFF=fix_cff, nameID=name_id, platform=platform, namerecords_to_ignore=exclude_namerecord) if fix_count > 0: output_file = makeOutputFileName(f, outputDir=output_dir, overWrite=overwrite) font.save(output_file) click.secho(f'{os.path.basename(output_file)} --> saved', fg='green') else: click.secho(f'{os.path.basename(f)} --> no changes made', fg='yellow') except Exception as e: click.secho(f'ERROR: {e}', fg='red')
5,348,245
def fake_feature_date(days=365): """Generate fake feature_date.""" start_date = date.today() random_number_of_days = random.randrange(days) _date = start_date + timedelta(days=random_number_of_days) return _date.strftime("%Y-%m-%d")
5,348,246
def freq_dom(inf, outf): """ Dominance filter: When there are multiple annotations of the exact same anchor, (position + content) keep only the one with the highest count, breaking ties with alphabetical-ness. TODO: Could use some other measure of frequency -- or lemma numbering/graph centrality measures """ return FreqRankDom().proc_stream(inf, outf)
5,348,247
def download_recurse(url, path, filenames): """download files from url :param str url: the url to download from, ended with a '/' :param str path: the directory to save the files to :param list files: list of filenames to download """ path = Path(path) with click.progressbar(filenames, label='downloading drawing dataset:') as files: for file in files: site = url + file.replace(' ', '%20') + '.bin' fpath = download(site, file + '.bin', path)
5,348,248
def export_graph(checkpoint_path, output_nodes): """ Export a graph stored in a checkpoint as a *.pb file. :param checkpoint_path: The checkpoint path which should be frozen. :param output_nodes: The output nodes you care about as a list of strings (their names). :return: """ if not tf.gfile.Exists(checkpoint_path): raise AssertionError( "Export directory doesn't exists. Please specify an export " "directory: %s" % checkpoint_path) if not output_nodes: print("You need to supply the name of a node to --output_node_names.") return -1 # We retrieve our checkpoint fullpath checkpoint = tf.train.get_checkpoint_state(checkpoint_path) input_checkpoint = checkpoint.model_checkpoint_path # We precise the file fullname of our freezed graph output_graph = checkpoint_path + "/frozen_model.pb" # We clear devices to allow TensorFlow to control on which device it will load operations clear_devices = True # We start a session using a temporary fresh Graph with tf.Session(graph=tf.Graph()) as sess: # We import the meta graph in the current default Graph saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices) # We restore the weights saver.restore(sess, input_checkpoint) # We use a built-in TF helper to export variables to constants output_graph_def = tf.graph_util.convert_variables_to_constants( sess, # The session is used to retrieve the weights tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes output_nodes # The output node names are used to select the useful nodes ) # Finally we serialize and dump the output graph to the filesystem with tf.gfile.GFile(output_graph, "wb") as f: f.write(output_graph_def.SerializeToString()) print("%d ops in the final graph." % len(output_graph_def.node)) return output_graph_def
5,348,249
def scss(**conf): """ Render all SCSS/SASS files into CSS. The input directory will be searched for *.scss files, which will be compiled to corresponding *.css files in the output directory. """ compiler = pyScss.Scss(scss_opts={'compress': 0}) logging.getLogger("scss").addHandler(logging.StreamHandler()) def compile(path, source): css = compiler.compile(source) outfile = open(path, 'w') outfile.write(css) outfile.close() files = readfiles(conf['indir'], "scss") pyScss.config.LOAD_PATHS = [ conf['indir'], os.path.join(conf['indir'], 'lib'), # FIXME: Why aren't these paths automatically picked up on Windows? os.path.join(conf['indir'], 'lib', 'compass'), os.path.join(conf['indir'], 'lib', 'compass', 'css3'), ] for name, source in files.items(): if isinstance(source, dict): continue path = "%s/%s.css" % (conf['outdir'], name) compile(path, source) click.echo("%s compiled from %s/%s.scss" % (path, conf['indir'], name))
5,348,250
def GetTrackingBranch(git_repo, branch=None, for_checkout=True, fallback=True, manifest=None, for_push=False): """Gets the appropriate push branch for the specified directory. This function works on both repo projects and regular git checkouts. Assumptions: 1. We assume the manifest defined upstream is desirable. 2. No manifest? Assume tracking if configured is accurate. 3. If none of the above apply, you get 'origin', 'master' or None, depending on fallback. Args: git_repo: Git repository to operate upon. branch: Find the tracking branch for this branch. Defaults to the current branch for |git_repo|. for_checkout: Whether to return localized refspecs, or the remotes view of it. fallback: If true and no remote/branch could be discerned, return 'origin', 'master'. If False, you get None. Note that depending on the remote, the remote may differ if for_push is True or set to False. for_push: Controls whether the remote and refspec returned is explicitly for pushing. manifest: A Manifest instance if one is available, else a ManifestCheckout is created and used. Returns: A RemoteRef, or None. """ result = GetTrackingBranchViaManifest(git_repo, for_checkout=for_checkout, manifest=manifest, for_push=for_push) if result is not None: return result if branch is None: branch = GetCurrentBranch(git_repo) if branch: result = GetTrackingBranchViaGitConfig(git_repo, branch, for_checkout=for_checkout) if result is not None: if (result.ref.startswith('refs/heads/') or result.ref.startswith('refs/remotes/')): return result if not fallback: return None if for_checkout: return RemoteRef('origin', 'refs/remotes/origin/master') return RemoteRef('origin', 'master')
5,348,251
def test_isupport_getitem_case_insensitive(): """Test access to parameters is case insensitive.""" instance = isupport.ISupport(awaylen=50) assert 'AWAYLEN' in instance assert 'awaylen' in instance assert instance['AWAYLEN'] == 50 assert instance['awaylen'] == 50
5,348,252
def inception_crop(image, **kw): """Perform an "inception crop", without resize.""" begin, size, _ = tf.image.sample_distorted_bounding_box( tf.shape(image), tf.zeros([0, 0, 4], tf.float32), use_image_if_no_bounding_boxes=True, **kw) crop = tf.slice(image, begin, size) # Unfortunately, the above operation loses the depth-dimension. So we need # to Restore it the manual way. crop.set_shape([None, None, image.shape[-1]]) return crop
5,348,253
def _clear_port_access_clients_limit_v1(port_name, **kwargs): """ Perform GET and PUT calls to clear a port's limit of maximum allowed number of authorized clients. :param port_name: Alphanumeric name of Port :param kwargs: keyword s: requests.session object with loaded cookie jar keyword url: URL in main() function :return: True if successful, False otherwise """ port_name_percents = common_ops._replace_special_characters(port_name) port_data = port.get_port(port_name_percents, depth=0, selector="configuration", **kwargs) port_data.pop('port_access_clients_limit', None) port_data.pop('name', None) port_data.pop('origin', None) port_data.pop('vrf', None) target_url = kwargs["url"] + "system/ports/%s" % port_name_percents put_data = json.dumps(port_data, sort_keys=True, indent=4) response = kwargs["s"].put(target_url, data=put_data, verify=False) if not common_ops._response_ok(response, "PUT"): logging.warning("FAIL: Removing maximum allowable clients limit on Port '%s' failed with status code %d: %s" % (port_name, response.status_code, response.text)) return False else: logging.info("SUCCESS: Removing maximum allowable clients limit on Port '%s' succeeded" % port_name) return True
5,348,254
def yield_abspath_from_fofn(fofn_fn): """Yield each filename. Relative paths are resolved from the FOFN directory. """ try: basedir = os.path.dirname(fofn_fn) for line in open(fofn_fn): fn = line.strip() if not os.path.isabs(fn): fn = os.path.abspath(os.path.join(basedir, fn)) yield fn except Exception: LOG.error('Problem resolving paths in FOFN {!r}'.format(fofn_fn)) raise
5,348,255
def cvGetHistValue_1D(hist, i1): """Returns pointer to histogram bin""" return cast(cvPtr1D(hist.bins, i1), c_float_p)
5,348,256
def contains_order_by(query): """Returns true of the query contains an 'order by' clause""" return re.search( r'order\s+by\b', query, re.M|re.I) is not None
5,348,257
def handler(event, context): """Called by Lambda""" try: send( event, context, 'SUCCESS', main(event), event['LogicalResourceId'] ) except Exception as e: send(event, context, "FAILED", {"Message": str(e)})
5,348,258
def detach(l): """\ Set a layer as detached, excluding it from gradient computation. :param l: layer or list of layers to detach :return: detached layer(s) """ # core module has multiple overloads for this: # 1. detach(l) where l is a Layer and the return value is a Layer # 2. detach(l) where l is a [Layer] and the return value is a [Layer] return _eddl.detach(l)
5,348,259
def validate_params_int(params: dict) -> None: """Validates the parameters for the chart based on the integer value :param params: Dictionary of parameters :type params: dict :raises ValueError: """ variables = ["line_width", "point_size", "bucket_size"] for var in variables: if var in params.keys() and params[var]: if type(params[var]) != int or params[var] < 0: raise ValueError( f"{params[var]} is not a valid parameter for {var}. " f"Accepted values are any integer greater than 0" )
5,348,260
def update_data(p_state, idx_chain=-1): """Updates various data of the chain, including: - Energies of images - Reaction coordinates of images - Interpolated energy and reaction coordinate values """ _Update_Data(ctypes.c_void_p(p_state), ctypes.c_int(idx_chain))
5,348,261
def test_R1(): """ Test R for dependent variables """ d = D(['00', '11'], [1/2, 1/2]) assert R(d) == pytest.approx(0)
5,348,262
def __shorten_floats(source): """ Use short float notation whenever possible :param source: The source GLSL string :return: The GLSL string with short float notation applied """ # Strip redundant leading digits source = re.sub(re.compile(r'(?<=[^\d.])0(?=\.)'), '', source) # Strip redundant trailing digits return re.sub(re.compile(r'(?<=\d\.)0(?=\D)'), '', source)
5,348,263
def cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28, channels=1, nb_filters=64, nb_classes=10): """ Defines a CNN model using Keras sequential model :param logits: If set to False, returns a Keras model, otherwise will also return logits tensor :param input_ph: The TensorFlow tensor for the input (needed if returning logits) ("ph" stands for placeholder but it need not actually be a placeholder) :param img_rows: number of row in the image :param img_cols: number of columns in the image :param channels: number of color channels (e.g., 1 for MNIST) :param nb_filters: number of convolutional filters per layer :param nb_classes: the number of output classes :return: """ model = Sequential() # Define the layers successively (convolution layers are version dependent) if keras.backend.image_dim_ordering() == 'th': input_shape = (channels, img_rows, img_cols) else: input_shape = (img_rows, img_cols, channels) layers = [conv_2d(nb_filters, (5, 5), (1, 1), "same", input_shape=input_shape), Activation('relu'), conv_2d(nb_filters, (5, 5), (1, 1), "valid"), Activation('relu'), Flatten(), Dropout(0.25), Dense(128), Activation('relu'), Dropout(0.5), Dense(nb_classes)] for layer in layers: model.add(layer) if logits: logits_tensor = model(input_ph) model.add(Activation('softmax')) if logits: return model, logits_tensor else: return model
5,348,264
def _mvnormal_halton(sample_shape, mean, randomized, seed=None, covariance_matrix=None, scale_matrix=None, validate_args=False, dtype=None, **kwargs): """Returns normal draws using Halton low-discrepancy sequences.""" random_type = (RandomType.HALTON_RANDOMIZED if randomized else RandomType.HALTON) return _mvnormal_quasi(sample_shape, mean, random_type, seed=seed, covariance_matrix=covariance_matrix, scale_matrix=scale_matrix, validate_args=validate_args, dtype=dtype, **kwargs)
5,348,265
def get_meals(bouts_sec: np.ndarray, max_gap_sec: float = 60.0, min_overlap: float = 0.25): """ Computes a sequence of meal intervals from a sequence of chewing-bout intervals. :param bouts_sec: The sequence of chewing-bout intervals (see ``get_bouts`` output) :param max_gap_sec: Maximum gap-duration that is merged between consecutive chewing-bouts :param min_overlap: Minimum allowed overlap of chewing-bout duration with meal duration :return: The 2-column (start & stop, in seconds) matrix of meals """ assert is_numpy_matrix(bouts_sec, cols=2) assert isinstance(max_gap_sec, float) assert isinstance(min_overlap, float) meals_sec, orig_durations_sec = merge_gaps(bouts_sec, max_gap_sec, True) overlap = orig_durations_sec / (meals_sec[:, 1] - meals_sec[:, 0]) return meals_sec[overlap >= min_overlap, :]
5,348,266
def certificate_request_delete(handle, name): """ Deletes a certificate request from keyring Args: handle (UcsHandle) name (string): KeyRing name Returns: None Raises: UcsOperationError: If PkiCertReq is not present Example: certificate_request_delete(handle, name="mykeyring") """ mo = certificate_request_get(handle, name, caller="certificate_request_delete") handle.remove_mo(mo) handle.commit()
5,348,267
def test_evaluate_1d_data(input_data_file, output_data_file, index): """ Test ability to evaluate one dimensional data. """ # Initialize model from spring-mass example data files: data_model = ModelFromData(input_data_file, output_data_file, 1.) input_data = np.genfromtxt(input_data_file) output_data = np.genfromtxt(output_data_file) # Model expects arrays as inputs/outputs. model_output = data_model.evaluate(input_data[index]) true_output = output_data[index] assert np.all(np.isclose(model_output, true_output))
5,348,268
def _fail(msg): """Output failure message when auto configuration fails.""" red = "\033[0;31m" no_color = "\033[0m" fail("%sPython Configuration Error:%s %s\n" % (red, no_color, msg))
5,348,269
def ThreePaneView4DFrames(It): """ Pass in a series of fImages, fArray3Ds, python lists, or numpy array and we'll show a three-pane view with a slider on the bottom for visualizing 4D datasets as 3D frames """ print("4D frame-based threepane view not implemented yet, just an idea.")
5,348,270
def test_input_sanity(): """ Check incorrect input do fail """ with assert_raises(NotImplementedError) as exception: MonteCarlo(temperature=0e0) with assert_raises(ValueError) as exception: MonteCarlo(temperature=-1e0) mc = MonteCarlo() with assert_raises(TypeError) as exception: mc(lambda x: 0, [1.0, 2, 3]) with assert_raises(ValueError) as exception: mc(lambda x: 0, [-1, 2, 3]) with assert_raises(ValueError) as exception: mc(lambda x: 0, [[1, 2, 3], [3, 4, 5]]) with assert_raises(ValueError) as exception: mc(lambda x: 0, [3]) with assert_raises(ValueError) as exception: mc(lambda x: 0, [0, 0])
5,348,271
def torch_load(path, model): """Load torch model states. Args: path (str): Model path or snapshot file path to be loaded. model (torch.nn.Module): Torch model. """ if "snapshot" in os.path.basename(path): model_state_dict = torch.load(path, map_location=lambda storage, loc: storage)[ "model" ] else: model_state_dict = torch.load(path, map_location=lambda storage, loc: storage) if hasattr(model, "module"): model.module.load_state_dict(model_state_dict) else: model.load_state_dict(model_state_dict) del model_state_dict
5,348,272
def setPerformanceLevel(source, level): """Sets a given performance level for the GPU Core and Memory. Args: source: string containing word "core" or "mem" level: an integer between 0-7 for core and 0-3 memory Returns: True - if action is sucessful. False - not possible to apply configuration. """ if source == "core": assert level in list(range( 0, 8)), "Core Performance Level betwen 0 and 7." result = runDVFSscript("-P " + str(level)) if "ERROR" in result: return False elif source == "mem": assert level in list(range( 0, 4)), "Core Performance Level betwen 0 and 3." result = runDVFSscript("-p " + str(level)) if "ERROR" in result: return False else: print("Not valid source used - core or mem") return False return True
5,348,273
def test_n_submodels(): """ Test that CompoundModel.n_submodels properly returns the number of components. """ g2 = Gaussian1D() + Gaussian1D() assert g2.n_submodels() == 2 g3 = g2 + Gaussian1D() assert g3.n_submodels() == 3 g5 = g3 | g2 assert g5.n_submodels() == 5 g7 = g5 / g2 assert g7.n_submodels() == 7 # make sure it works as class method p = Polynomial1D + Polynomial1D assert p.n_submodels() == 2
5,348,274
def elem_props_template_init(templates, template_type): """ Init a writing template of given type, for *one* element's properties. """ ret = OrderedDict() tmpl = templates.get(template_type) if tmpl is not None: written = tmpl.written[0] props = tmpl.properties ret = OrderedDict((name, [val, ptype, anim, written]) for name, (val, ptype, anim) in props.items()) return ret
5,348,275
def to_density(x, bins=5, bounds=None): """"Turn into density based nb of bins""" p_x = np.histogram(x, bins=bins, density=True, range=bounds)[0] p_x = p_x / np.sum(p_x) return p_x
5,348,276
def update_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValueList=None, PartitionInput=None): """ Updates a partition. See also: AWS API Documentation Exceptions :example: response = client.update_partition( CatalogId='string', DatabaseName='string', TableName='string', PartitionValueList=[ 'string', ], PartitionInput={ 'Values': [ 'string', ], 'LastAccessTime': datetime(2015, 1, 1), 'StorageDescriptor': { 'Columns': [ { 'Name': 'string', 'Type': 'string', 'Comment': 'string', 'Parameters': { 'string': 'string' } }, ], 'Location': 'string', 'InputFormat': 'string', 'OutputFormat': 'string', 'Compressed': True|False, 'NumberOfBuckets': 123, 'SerdeInfo': { 'Name': 'string', 'SerializationLibrary': 'string', 'Parameters': { 'string': 'string' } }, 'BucketColumns': [ 'string', ], 'SortColumns': [ { 'Column': 'string', 'SortOrder': 123 }, ], 'Parameters': { 'string': 'string' }, 'SkewedInfo': { 'SkewedColumnNames': [ 'string', ], 'SkewedColumnValues': [ 'string', ], 'SkewedColumnValueLocationMaps': { 'string': 'string' } }, 'StoredAsSubDirectories': True|False }, 'Parameters': { 'string': 'string' }, 'LastAnalyzedTime': datetime(2015, 1, 1) } ) :type CatalogId: string :param CatalogId: The ID of the Data Catalog where the partition to be updated resides. If none is provided, the AWS account ID is used by default. :type DatabaseName: string :param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table in question resides.\n :type TableName: string :param TableName: [REQUIRED]\nThe name of the table in which the partition to be updated is located.\n :type PartitionValueList: list :param PartitionValueList: [REQUIRED]\nA list of the values defining the partition.\n\n(string) --\n\n :type PartitionInput: dict :param PartitionInput: [REQUIRED]\nThe new partition object to update the partition to.\n\nValues (list) --The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.\nThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise AWS Glue will add the values to the wrong keys.\n\n(string) --\n\n\nLastAccessTime (datetime) --The last time at which the partition was accessed.\n\nStorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nParameters (dict) --These key-value pairs define partition parameters.\n\n(string) --\n(string) --\n\n\n\n\nLastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\n\n\n :rtype: dict ReturnsResponse Syntax {} Response Structure (dict) -- Exceptions Glue.Client.exceptions.EntityNotFoundException Glue.Client.exceptions.InvalidInputException Glue.Client.exceptions.InternalServiceException Glue.Client.exceptions.OperationTimeoutException Glue.Client.exceptions.GlueEncryptionException :return: {} :returns: (dict) -- """ pass
5,348,277
def pro_bar(ts_code='', api=None, start_date='', end_date='', freq='D', asset='E', exchange='', adj = None, ma = [], factors = None, adjfactor = False, contract_type = '', retry_count = 3): """ BAR数据 Parameters: ------------ ts_code:证券代码,支持股票,ETF/LOF,期货/期权,港股,数字货币 start_date:开始日期 YYYYMMDD end_date:结束日期 YYYYMMDD freq:支持1/5/15/30/60分钟,周/月/季/年 asset:证券类型 E:股票和交易所基金,I:沪深指数,C:数字货币,FT:期货 FD:基金/O期权/H港股/中概美国/中证指数/国际指数 exchange:市场代码,用户数字货币行情 adj:复权类型,None不复权,qfq:前复权,hfq:后复权 ma:均线,支持自定义均线频度,如:ma5/ma10/ma20/ma60/maN factors因子数据,目前支持以下两种: vr:量比,默认不返回,返回需指定:factor=['vr'] tor:换手率,默认不返回,返回需指定:factor=['tor'] 以上两种都需要:factor=['vr', 'tor'] retry_count:网络重试次数 Return ---------- DataFrame code:代码 open:开盘close/high/low/vol成交量/amount成交额/maN均价/vr量比/tor换手率 期货(asset='X') code/open/close/high/low/avg_price:均价 position:持仓量 vol:成交总量 """ today= datetime.datetime.today().date() today = str(today)[0:10] start_date = '' if start_date is None else start_date end_date = today if end_date == '' or end_date is None else end_date ts_code = ts_code.strip().upper() if asset != 'C' else ts_code.strip().lower() start_date = start_date.replace('-', '') end_date = end_date.replace('-', '') if len(freq.strip())>=3: freq = freq.strip().lower() else: freq = freq.strip().upper() if asset != 'C' else freq.strip().lower() asset = asset.strip().upper() api = api if api is not None else pro_api() for _ in range(retry_count): try: if asset == 'E': if freq == 'D': data = api.daily(ts_code=ts_code, start_date=start_date, end_date=end_date) if factors is not None and len(factors) >0 : ds = api.daily_basic(ts_code=ts_code, start_date=start_date, end_date=end_date)[['trade_date', 'turnover_rate', 'volume_ratio']] ds = ds.set_index('trade_date') data = data.set_index('trade_date') data = data.merge(ds, left_index=True, right_index=True) data = data.reset_index() if ('tor' in factors) and ('vr' not in factors): data = data.drop('volume_ratio', axis=1) if ('vr' in factors) and ('tor' not in factors): data = data.drop('turnover_rate', axis=1) if freq == 'W': data = api.weekly(ts_code=ts_code, start_date=start_date, end_date=end_date) if freq == 'M': data = api.monthly(ts_code=ts_code, start_date=start_date, end_date=end_date) if 'min' in freq: data = api.mins(ts_code=ts_code, start_time=start_date, end_time=end_date, freq=freq) data['trade_date'] = data['trade_time'].map(lambda x: x.replace('-', '')[0:8]) data['pre_close'] = data['close'].shift(-1) if adj is not None: fcts = api.adj_factor(ts_code=ts_code, start_date=start_date, end_date=end_date)[['trade_date', 'adj_factor']] data = data.set_index('trade_date', drop=False).merge(fcts.set_index('trade_date'), left_index=True, right_index=True, how='left') if 'min' in freq: data = data.sort_values('trade_time', ascending=False) data['adj_factor'] = data['adj_factor'].fillna(method='bfill') for col in PRICE_COLS: if adj == 'hfq': data[col] = data[col] * data['adj_factor'] if adj == 'qfq': data[col] = data[col] * data['adj_factor'] / float(fcts['adj_factor'][0]) data[col] = data[col].map(FORMAT) for col in PRICE_COLS: data[col] = data[col].astype(float) if adjfactor is False: data = data.drop('adj_factor', axis=1) if 'min' not in freq: data['change'] = data['close'] - data['pre_close'] data['pct_chg'] = data['change'] / data['pre_close'] * 100 data['pct_chg'] = data['pct_chg'].map(lambda x: FORMAT(x)).astype(float) else: data = data.drop(['trade_date', 'pre_close'], axis=1) elif asset == 'I': if freq == 'D': data = api.index_daily(ts_code=ts_code, start_date=start_date, end_date=end_date) if freq == 'W': data = api.index_weekly(ts_code=ts_code, start_date=start_date, end_date=end_date) if freq == 'M': data = api.index_monthly(ts_code=ts_code, start_date=start_date, end_date=end_date) if 'min' in freq: data = api.mins(ts_code=ts_code, start_time=start_date, end_time=end_date, freq=freq) elif asset == 'FT': if freq == 'D': data = api.fut_daily(ts_code=ts_code, start_date=start_date, end_date=end_date, exchange=exchange) if 'min' in freq: data = api.mins(ts_code=ts_code, start_time=start_date, end_time=end_date, freq=freq) elif asset == 'O': if freq == 'D': data = api.opt_daily(ts_code=ts_code, start_date=start_date, end_date=end_date, exchange=exchange) if 'min' in freq: data = api.mins(ts_code=ts_code, start_time=start_date, end_time=end_date, freq=freq) elif asset == 'FD': if freq == 'D': data = api.fund_daily(ts_code=ts_code, start_date=start_date, end_date=end_date) if 'min' in freq: data = api.mins(ts_code=ts_code, start_time=start_date, end_time=end_date, freq=freq) if asset == 'C': if freq == 'd': freq = 'daily' elif freq == 'w': freq = 'week' data = api.coinbar(exchange=exchange, symbol=ts_code, freq=freq, start_dae=start_date, end_date=end_date, contract_type=contract_type) if ma is not None and len(ma) > 0: for a in ma: if isinstance(a, int): data['ma%s'%a] = MA(data['close'], a).map(FORMAT).shift(-(a-1)) data['ma%s'%a] = data['ma%s'%a].astype(float) data['ma_v_%s'%a] = MA(data['vol'], a).map(FORMAT).shift(-(a-1)) data['ma_v_%s'%a] = data['ma_v_%s'%a].astype(float) data = data.reset_index(drop=True) except Exception as e: return None else: return data raise IOError('ERROR.')
5,348,278
def convert(comment, mode): """Convert documentation from a supported syntax into reST.""" # FIXME: try to preserve whitespace better if mode == 'javadoc-basic' or mode == 'javadoc-liberal': # @param comment = re.sub(r"(?m)^([ \t]*)@param([ \t]+)([a-zA-Z0-9_]+|\.\.\.)([ \t]+)", "\n\\1:param\\2\\3:\\4", comment) # @param[direction] comment = re.sub(r"(?m)^([ \t]*)@param\[([^]]*)\]([ \t]+)([a-zA-Z0-9_]+|\.\.\.)([ \t]+)", "\n\\1:param\\3\\4: *(\\2)* \\5", comment) # @return comment = re.sub(r"(?m)^([ \t]*)@returns?([ \t]+|$)", "\n\\1:return:\\2", comment) # @code/@endcode blocks. Works if the code is indented. comment = re.sub(r"(?m)^([ \t]*)@code([ \t]+|$)", "\n::\n", comment) comment = re.sub(r"(?m)^([ \t]*)@endcode([ \t]+|$)", "\n", comment) # Ignore @brief. comment = re.sub(r"(?m)^([ \t]*)@brief[ \t]+", "\n\\1", comment) # Ignore groups comment = re.sub(r"(?m)^([ \t]*)@(defgroup|addtogroup)[ \t]+[a-zA-Z0-9_]+[ \t]*", "\n\\1", comment) comment = re.sub(r"(?m)^([ \t]*)@(ingroup|{|}).*", "\n", comment) if mode == 'javadoc-liberal': # Liberal conversion of any @tags, will fail for @code etc. but don't # care. comment = re.sub(r"(?m)^([ \t]*)@([a-zA-Z0-9_]+)([ \t]+)", "\n\\1:\\2:\\3", comment) if mode == 'kernel-doc': # Basic kernel-doc convert, will document struct members as params, etc. comment = re.sub(r"(?m)^([ \t]*)@(returns?|RETURNS?):([ \t]+|$)", "\n\\1:return:\\3", comment) comment = re.sub(r"(?m)^([ \t]*)@([a-zA-Z0-9_]+|\.\.\.):([ \t]+)", "\n\\1:param \\2:\\3", comment) return comment
5,348,279
def test_trigger_endpoint_uses_existing_dagbag(admin_client): """ Test that Trigger Endpoint uses the DagBag already created in views.py instead of creating a new one. """ url = 'trigger?dag_id=example_bash_operator' resp = admin_client.post(url, data={}, follow_redirects=True) check_content_in_response('example_bash_operator', resp)
5,348,280
def output_using_scrapingtoolkit( analysis_output: AnalysisOutput, ) -> None: """Outputs the analysis output using ScrapingToolKit.""" for component_type in analysis_output["component_types"]: Evidence.Add.COMPONENT_TYPE( identifier=component_type, ) for format_ in analysis_output["formats"]: Evidence.Add.FORMAT( identifier=format_.identifier, ) files = analysis_output["files"] for file_identifier in files: file: ontology.File = files[file_identifier] Evidence.Add.FILE( fileFormat_identifier=format_.identifier, filename=file.name, identifier=file_identifier, ) components = analysis_output["components"] for component_identifier in components: component: ontology.SoftwareComponent = components[component_identifier] Evidence.Add.SWCOMPONENT( identifier=component_identifier, componentType_identifier=component.component_type, title=escape(component.title), ) for callee in component.mentions: Evidence.Add.SWCOMPONENT( identifier=component_identifier, mentions_identifier=callee.identifier, )
5,348,281
def ticket_qr_code(request, ticket_id): """ Generates a qr code data url to validate a ticket with the id passed """ return segno.make( validate_ticket_url(request, ticket_id), micro=False ).svg_data_uri(scale=2)
5,348,282
def add_to_histogram(key, histogram): """ stop bugging me """ if key in histogram: histogram[key] = int(histogram[key]) + 1 else: histogram[key] = 1
5,348,283
def test_prefix_printer_prefix_whitespace(prefix_string, capsys, text_string): """Test if prefix_printer has the correct prefix.""" p_test = prefix_printer(prefix=f"{prefix_string}", whitespace=4) p_test(text_string) captured = capsys.readouterr() assert captured.out == f"[{prefix_string.upper()}]: {text_string}\n"
5,348,284
def _iou(box_a, box_b): """ :param box_a: [c, A, 4] :param box_b: [c, B, 4] :return: [c, A, B] 两两之间的iou """ # 变成左上角坐标、右下角坐标 boxes1 = tf.concat([box_a[..., :2] - box_a[..., 2:] * 0.5, box_a[..., :2] + box_a[..., 2:] * 0.5], axis=-1) boxes2 = tf.concat([box_b[..., :2] - box_b[..., 2:] * 0.5, box_b[..., :2] + box_b[..., 2:] * 0.5], axis=-1) c = tf.shape(boxes1)[0] A = tf.shape(boxes1)[1] B = tf.shape(boxes2)[1] box_a = tf.reshape(boxes1, (c, A, 1, 4)) box_b = tf.reshape(boxes2, (c, 1, B, 4)) expand_box_a = tf.tile(box_a, [1, 1, B, 1]) expand_box_b = tf.tile(box_b, [1, A, 1, 1]) # 两个矩形的面积 boxes1_area = (expand_box_a[..., 2] - expand_box_a[..., 0]) * ( expand_box_a[..., 3] - expand_box_a[..., 1]) boxes2_area = (expand_box_b[..., 2] - expand_box_b[..., 0]) * ( expand_box_b[..., 3] - expand_box_b[..., 1]) # 相交矩形的左上角坐标、右下角坐标 left_up = tf.maximum(expand_box_a[:, :, :, :2], expand_box_b[:, :, :, :2]) right_down = tf.minimum(expand_box_a[:, :, :, 2:], expand_box_b[:, :, :, 2:]) # 相交矩形的面积inter_area。iou inter_section = tf.maximum(right_down - left_up, 0.0) inter_area = inter_section[..., 0] * inter_section[..., 1] union_area = boxes1_area + boxes2_area - inter_area iou = inter_area / (union_area + 1e-9) return iou
5,348,285
def subsequent_mask(size, device=device): """ Mask out subsequent positions. upper diagonal elements should be zero :param size: :return: mask where positions are filled with zero for subsequent positions """ # upper diagonal elements are 1s, lower diagonal and the main diagonal are zeroed triu = torch.triu(torch.ones(size, size, dtype=torch.int8, device=device), diagonal=1) # invert it mask = triu == 0 mask = mask.unsqueeze(0) return mask
5,348,286
def read_gcs_file_if_exists(gcs_client: storage.Client, gsurl: str) -> Optional[str]: """return string of gcs object contents or None if the object does not exist """ try: return read_gcs_file(gcs_client, gsurl) except google.cloud.exceptions.NotFound: return None
5,348,287
def collapse_multigraph_to_nx( graph: Union[gr.MultiDiGraph, gr.OrderedMultiDiGraph]) -> nx.DiGraph: """ Collapses a directed multigraph into a networkx directed graph. In the output directed graph, each node is a number, which contains itself as node_data['node'], while each edge contains a list of the data from the original edges as its attribute (edge_data[0...N]). :param graph: Directed multigraph object to be collapsed. :return: Collapsed directed graph object. """ # Create the digraph nodes. digraph_nodes: List[Tuple[int, Dict[str, nd.Node]]] = ([None] * graph.number_of_nodes()) node_id = {} for i, node in enumerate(graph.nodes()): digraph_nodes[i] = (i, {'node': node}) node_id[node] = i # Create the digraph edges. digraph_edges = {} for edge in graph.edges(): src = node_id[edge.src] dest = node_id[edge.dst] if (src, dest) in digraph_edges: edge_num = len(digraph_edges[src, dest]) digraph_edges[src, dest].update({edge_num: edge.data}) else: digraph_edges[src, dest] = {0: edge.data} # Create the digraph result = nx.DiGraph() result.add_nodes_from(digraph_nodes) result.add_edges_from(digraph_edges) return result
5,348,288
def main(path_images, dimension, overwrite, nb_workers): """ main entry point :param path_images: path to images :param int dimension: for 2D inages it is 0 or 1 :param bool overwrite: whether overwrite existing image on output :param int nb_workers: nb jobs running in parallel """ image_paths = sorted(glob.glob(path_images)) if not image_paths: logging.info('No images found on "%s"', path_images) return _wrap_split = partial(split_image, cut_dim=dimension, overwrite=overwrite) list(iterate_mproc_map(_wrap_split, image_paths, desc='Cut image tissues', nb_workers=nb_workers))
5,348,289
def validate_inputs(scenario_id, subscenarios, subproblem, stage, conn): """ Get inputs from database and validate the inputs :param subscenarios: SubScenarios object with all subscenario info :param subproblem: :param stage: :param conn: database connection :return: """ pass # Validation to be added
5,348,290
def get_encoder_type(encoder_name): """ gets the class of the encoer of the given name """ if encoder_name == 'Dense': return DenseEncoder elif encoder_name == 'CNN': return CNNEncoder else: raise ValueError(encoder_name)
5,348,291
def stateless_truncated_normal(shape, seed, mean=0.0, stddev=1.0, dtype=dtypes.float32, name=None): """Outputs deterministic pseudorandom values, truncated normally distributed. This is a stateless version of `tf.random.truncated_normal`: if run twice with the same seeds, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) mean: A 0-D Tensor or Python value of type `dtype`. The mean of the truncated normal distribution. stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the normal distribution, before truncation. dtype: The type of the output. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values. """ with ops.name_scope(name, "stateless_truncated_normal", [shape, seed, mean, stddev]) as name: shape = tensor_util.shape_tensor(shape) mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean") stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev") rnd = gen_stateless_random_ops.stateless_truncated_normal( shape, seed, dtype) result = math_ops.add(rnd * stddev, mean, name=name) tensor_util.maybe_set_static_shape(result, shape) return result
5,348,292
def mean(l): """ Returns the mean value of the given list """ sum = 0 for x in l: sum = sum + x return sum / float(len(l))
5,348,293
def send_arduinos(packet : bytes): """Sends packets to light controllers.""" for ip in config.ARDUINO_IPS: try: _sock.sendto(packet,(ip, config.UDP_PORT)) except Exception: continue
5,348,294
def daqmx_tsm_s(tsm, tests_pins): """Returns LabVIEW Cluster equivalent data""" print(tests_pins) daqmx_tsms = [] sessions = [] for test_pin_group in tests_pins: print(test_pin_group) data = ni_daqmx.pins_to_session_sessions_info(tsm, test_pin_group) daqmx_tsms.append(data) sessions += data.sessions print(sessions) test = (tsm, daqmx_tsms) yield test
5,348,295
def tryLoadingFrom(tryPath,moduleName='swhlab'): """if the module is in this path, load it from the local folder.""" if not 'site-packages' in swhlab.__file__: print("loaded custom swhlab module from", os.path.dirname(swhlab.__file__)) return # no need to warn if it's already outside. while len(tryPath)>5: sp=tryPath+"/swhlab/" # imaginary swhlab module path if os.path.isdir(sp) and os.path.exists(sp+"/__init__.py"): if not os.path.dirname(tryPath) in sys.path: sys.path.insert(0,os.path.dirname(tryPath)) print("#"*80) print("# WARNING: using site-packages swhlab module") print("#"*80) tryPath=os.path.dirname(tryPath) return
5,348,296
def test_show_fiff(): """Test show_fiff.""" # this is not exhaustive, but hopefully bugs will be found in use info = show_fiff(fname_evoked) keys = ['FIFF_EPOCH', 'FIFFB_HPI_COIL', 'FIFFB_PROJ_ITEM', 'FIFFB_PROCESSED_DATA', 'FIFFB_EVOKED', 'FIFF_NAVE', 'FIFF_EPOCH'] assert (all(key in info for key in keys)) info = show_fiff(fname_raw, read_limit=1024) assert ('COORD_TRANS' in show_fiff(fname_fsaverage_trans))
5,348,297
def create( trial_def: Type[det.Trial], config: Optional[Dict[str, Any]] = None, local: bool = False, test: bool = False, context_dir: str = "", command: Optional[List[str]] = None, master_url: Optional[str] = None, ) -> Any: # TODO: Add a reference to the local development tutorial. """ Create an experiment. Arguments: trial_def: A class definition implementing the :class:`determined.Trial` interface. config: A dictionary representing the experiment configuration to be associated with the experiment. local: A boolean indicating if training should be done locally. When ``False``, the experiment will be submitted to the Determined cluster. Defaults to ``False``. test: A boolean indicating if the experiment should be shortened to a minimal loop of training on a small amount of data, performing validation, and checkpointing. ``test=True`` is useful for quick iteration during model porting or debugging because common errors will surface more quickly. Defaults to ``False``. context_dir: A string filepath that defines the context directory. All model code will be executed with this as the current working directory. When ``local=False``, this argument is required. All files in this directory will be uploaded to the Determined cluster. The total size of this directory must be under 96 MB. When ``local=True``, this argument is optional and defaults to the current working directory. command: A list of strings that is used as the entrypoint of the training script in the Determined task environment. When executing this function via a Python script, this argument is inferred to be ``sys.argv`` by default. When executing this function via IPython or Jupyter notebook, this argument is required. Example: When creating an experiment by running ``python train.py --flag value``, the default command is inferred as ``["train.py", "--flag", "value"]``. master_url: An optional string to use as the Determined master URL when ``local=False``. If not specified, will be inferred from the environment variable ``DET_MASTER``. """ if local and not test: raise NotImplementedError( "det.create(local=True, test=False) is not yet implemented. Please set local=False " "or test=True." ) determined.common.set_logger( util.debug_mode() or det.ExperimentConfig(config or {}).debug_enabled() ) if local: # Local test mode. with det._local_execution_manager(pathlib.Path(context_dir).resolve()): return test_one_batch( trial_class=trial_def, config=config, ) elif not load.in_runpy: # Cluster mode, but still running locally; submit the experiment. _submit_experiment( config=config, test=test, context_dir=context_dir, command=command, master_url=master_url, ) else: # Cluster mode, now on the cluster; actually train. load.runpy_trial_class = trial_def raise det.errors.StopLoadingImplementation()
5,348,298
def collapse_quadratic(velax, data, rms): """ Collapse the cube using the quadratic method presented in `Teague & Foreman-Mackey (2018)`_. Will return the line center, ``v0``, and the uncertainty on this, ``dv0``, as well as the line peak, ``Fnu``, and the uncertainty on that, ``dFnu``. This provides the sub-channel precision of :func:`bettermoments.collapse_cube.collapse_first` with the robustness to noise from :func:`bettermoments.collapse_cube.collapse_ninth`. .. _Teague & Foreman-Mackey (2018): https://iopscience.iop.org/article/10.3847/2515-5172/aae265 Args: velax (ndarray): Velocity axis of the cube. data (ndarray): Flux density or brightness temperature array. Assumes that the zeroth axis is the velocity axis. rms (float): Noise per pixel in same units as ``data``. Returns: ``v0`` (`ndarray`), ``dv0`` (`ndarray`), ``Fnu`` (`ndarray`), ``dFnu`` (`ndarray`): ``v0``, the line center in the same units as ``velax`` with ``dv0`` as the uncertainty on ``v0`` in the same units as ``velax``. ``Fnu`` is the line peak in the same units as the ``data`` with associated uncertainties, ``dFnu``. """ from bettermoments.quadratic import quadratic chan = np.diff(velax).mean() return np.squeeze(quadratic(data, x0=velax[0], dx=chan, uncertainty=rms))
5,348,299