content
stringlengths
22
815k
id
int64
0
4.91M
def load_spectr_folder(path, result_format="xy"): """ Load a folder containing demod scope files. Return a list of 6 elements (one pere demod), which are either ``None``, if there's not data for this demod, or contain that demod's trace. """ data=[] for demod in range(1,7): file_path=os.path.join(path,"Freq{}.csv".format(demod)) if os.path.exists(file_path): data.append(load_spectr_file(file_path,result_format=result_format)) else: data.append(None) return data
5,354,600
def play_game(player_count: int, board: BattleshipBoard) -> None: """Play a game of Battleship with a given number of players.""" os.system("clear") total_guesses = 0 won_game = False while total_guesses < GUESSES_COUNT * player_count: # determine the current player and the remaining guesses current_player = (total_guesses % player_count) + 1 remaining_guesses = GUESSES_COUNT - total_guesses // player_count print(f"Player {current_player}'s turn: {remaining_guesses} guesses left.") if turn(board): print(f"Congratulations! Player {current_player} sank the ship!") won_game = True break else: print("Sorry, you missed!") total_guesses += 1 # print the board one last time, showing the ship if not won_game: print("Game over, you didn't find the ship in time.") print(board.to_string(show_ship=True))
5,354,601
def make_row(filename, num_cols, col_names): """ Given a genome file, create and return a row of kmer counts to be inerted into the mer matrix. """ # Filepath thefile = str(filename[0]) # Get the genome id from the filepath genomeid = filename[0].split('/')[-1] genomeid = genomeid.split('.')[-2] # Create a temp row to fill and return (later placed in the kmer_matrix) temp_row = [0]*num_cols # Walk through the file for record in SeqIO.parse(thefile, "fasta"): # Retrieve the sequence as a string kmerseq = record.seq #kmerseq = kmerseq._get_seq_str_and_check_alphabet(kmerseq) kmerseq = str(kmerseq) # Retrieve the kmer count as an int kmercount = record.id kmercount = int(kmercount) if kmercount>255: kmercount = 255 # Lookup the seq in the column list for the index col_index = col_names[kmerseq] # Put the kmercount in the right spot in the row temp_row[col_index] = kmercount return genomeid,temp_row
5,354,602
def analytic_overlap_NM( DQ: float, w1: float, w2: float, n1: int, n2: int ) -> float: """Compute the overlap between two displaced harmonic oscillators. This function computes the overlap integral between two harmonic oscillators with frequencies w1, w2 that are displaced by DQ for the quantum numbers n1, n2. The integral is computed using an analytic formula for the overlap of two displaced harmonic oscillators. The method comes from B.P. Zapol, Chem. Phys. Lett. 93, 549 (1982). Parameters ---------- DQ : float displacement between harmonic oscillators in amu^{1/2} Angstrom w1, w2 : float frequencies of the harmonic oscillators in eV n1, n2 : integer quantum number of the overlap integral to calculate Returns ------- np.longdouble overlap of the two harmonic oscillator wavefunctions """ w = np.double(w1 * w2 / (w1 + w2)) rho = np.sqrt(factor) * np.sqrt(w / 2) * DQ sinfi = np.sqrt(w1) / np.sqrt(w1 + w2) cosfi = np.sqrt(w2) / np.sqrt(w1 + w2) Pr1 = (-1)**n1 * np.sqrt(2 * cosfi * sinfi) * np.exp(-rho**2) Ix = 0. k1 = n2 // 2 k2 = n2 % 2 l1 = n1 // 2 l2 = n1 % 2 for kx in range(k1+1): for lx in range(l1+1): k = 2 * kx + k2 l = 2 * lx + l2 # noqa: E741 Pr2 = (fact(n1) * fact(n2))**0.5 / \ (fact(k)*fact(l)*fact(k1-kx)*fact(l1-lx)) * \ 2**((k + l - n2 - n1) / 2) Pr3 = (sinfi**k)*(cosfi**l) # f = hermval(rho, [0.]*(k+l) + [1.]) f = herm(np.float64(rho), k+l) Ix = Ix + Pr1*Pr2*Pr3*f return Ix
5,354,603
def init_manager(mocker): """Fixture to initialize a style constant.""" mocker.patch.object(manager.StyleManager, "__init__", lambda x: None) def _create(): return manager.StyleManager() return _create
5,354,604
def GaussLegendre(f, n): """Gauss-Legendre integration on [-1, 1] with n points.""" x, w = numint.GaussLegendre(n) I = np.dot(f(x), w) return I
5,354,605
def directory_item_groups( items: List[Item], level: int ) -> Dict[str, List[Item]]: """Split items into groups per directory at the given level. The level is relative to the root directory, which is at level 0. """ module_items = OrderedDict() for item in items: module_items.setdefault(item.parent_path(level), []).append(item) return module_items
5,354,606
def mergeSort(x): """ Function to sort an array using merge sort algorithm """ if len(x) == 0 or len(x) == 1: return x else: middle = len(x)//2 a = mergeSort(x[:middle]) b = mergeSort(x[middle:]) return merge(a,b)
5,354,607
async def join( db, query: Union[dict, str], document: Optional[Dict[str, Any]] = None, session: Optional[AsyncIOMotorClientSession] = None, ) -> Optional[Dict[str, Any]]: """ Join the otu associated with the supplied ``otu_id`` with its sequences. If an OTU is passed, the document will not be pulled from the database. :param db: the application database client :param query: the id of the otu to join or a Mongo query. :param document: use this otu document as a basis for the join :param session: a Motor session to use for database operations :return: the joined otu document """ # Get the otu entry if a ``document`` parameter was not passed. document = document or await db.otus.find_one(query, session=session) if document is None: return None cursor = db.sequences.find({"otu_id": document["_id"]}, session=session) # Merge the sequence entries into the otu entry. return virtool.otus.utils.merge_otu(document, [d async for d in cursor])
5,354,608
def cp_solve(V, E, lb, ub, col_cov, cuts=[], tl=999999): """Solves a partial problem with a CP model. Args: V: List of vertices (columns). E: List of edges (if a transition between two columns is allowed). col_cov: Matrix of the zone coverages of the columns (c[i][j] == 1 if zone i is covered by column j). Returns: - Objective value of the best Hamiltonian path, -1 if there is no Hamiltonian path within the LB/UB limits, -2 if the graph is not connected (this latter case has been removed). - A feasible solution for this objective value. """ cp_start_time = time.time() num_cols = len(V) num_zones = len(col_cov) # First, check if the graph is disconnected (in which case no # Hamiltonian path exists). G = networkx.Graph() G.add_nodes_from(V) G.add_edges_from(E) # # If the graph is not connected, no Hamiltonian path can exist. # if not networkx.is_connected(G): # return -2, [] # Variables. model = cp_model.CpModel() x = [model.NewIntVar(0, num_cols-1, 'x'+str(i)) for i in range(num_rounds)] # Alternative for GCC, since the constraint is not available in OR-Tools. x_occs = [] for i in range(num_cols): occs = [] for j in range(num_rounds): boolvar = model.NewBoolVar('') model.Add(x[j] == i).OnlyEnforceIf(boolvar) model.Add(x[j] != i).OnlyEnforceIf(boolvar.Not()) occs.append(boolvar) x_occs.append(sum(occs)) # if mp_integer: # model.AddLinearConstraint(x_occs[i], 1, num_rounds-num_cols+1) # Add the CP cuts. for cut in cuts: model.Add(sum(x_occs[i] for i in range(num_cols) if i in cut) <= num_rounds-1) # Objective. if ub == 9999: ub = num_rounds+1 phi = model.NewIntVar(int(lb), math.floor(ub)-1, 'phi') coverages = [model.NewIntVar(0, num_rounds, 'c'+str(i)) for i in range(num_zones)] for i in range(num_zones): model.Add(cp_model.LinearExpr.ScalProd(x_occs, col_cov[i]) == coverages[i]) phi_low = model.NewIntVar(0, num_rounds, 'phi_low') phi_high = model.NewIntVar(0, num_rounds, 'phi_high') model.AddMinEquality(phi_low, coverages) model.AddMaxEquality(phi_high, coverages) model.Add(phi == phi_high-phi_low) model.Minimize(phi) # Regular constraint (Hamiltonian path). # For the initial state, we use a dummy node which is connected to # all other nodes. dummy = max(V)+1 start = dummy end = V arcs = [(dummy, i, i) for i in V] for e in E: arcs.append((e[0], e[1], e[1])) # Node self-loops for v in V: arcs.append((v, v, v)) # If there is only one vertex then a Hamiltonian path exists. if len(V) > 1: model.AddAutomaton(x, start, end, arcs) # Solve the model. solver = cp_model.CpSolver() solver.parameters.max_time_in_seconds = tl status = solver.Solve(model) #assert status == cp_model.OPTIMAL or status == cp_model.INFEASIBLE or status == cp_model.FEASIBLE if status == cp_model.OPTIMAL: solution = [solver.Value(x[i]) for i in range(num_rounds)] return solver.ObjectiveValue(), solution, time.time()-cp_start_time elif status == cp_model.INFEASIBLE or status == cp_model.UNKNOWN: return -1, [], time.time()-cp_start_time elif status == cp_model.FEASIBLE: return solver.ObjectiveValue(), [], time.time()-cp_start_time
5,354,609
def file_exists(path: Text): """ Returns true if file exists at path. Args: path (str): Local path in filesystem. """ return file_io.file_exists_v2(path)
5,354,610
def _get_page_num_detail(): """ 东方财富网-数据中心-特色数据-机构调研-机构调研详细 http://data.eastmoney.com/jgdy/xx.html :return: int 获取 机构调研详细 的总页数 """ url = "http://data.eastmoney.com/DataCenter_V3/jgdy/xx.ashx" params = { "pagesize": "5000", "page": "1", "js": "var SZGpIhFb", "param": "", "sortRule": "-1", "sortType": "0", "rt": "52581407", } res = requests.get(url, params=params) data_json = json.loads(res.text[res.text.find("={")+1:]) return data_json["pages"]
5,354,611
def different_name(name: str) -> Iterator[str]: """Look for new names that don't conflict with existing names.""" yield name for n in itertools.count(2): yield name + str(n)
5,354,612
def freeze_session( session, keep_var_names=None, output_names=None, clear_devices=True): """ Freezes the state of a session into a pruned computation graph. """ graph = session.graph with graph.as_default(): freeze_var_names = list(set(v.op.name for v in tf.global_variables()) .difference(keep_var_names or [])) output_names = output_names or [] output_names += [v.op.name for v in tf.global_variables()] # Graph -> GraphDef ProtoBuf input_graph_def = graph.as_graph_def() if clear_devices: for node in input_graph_def.node: node.device = "" frozen_graph = convert_variables_to_constants( session, input_graph_def, output_names, freeze_var_names) frozen_graph = tf.graph_util.remove_training_nodes(frozen_graph) return frozen_graph
5,354,613
def send_mail(subject, body, recipient_list, bcc_list=None, from_email=None, connection=None, attachments=None, fail_silently=False, headers=None, cc_list=None, dc1_settings=None, content_subtype=None): """ Like https://docs.djangoproject.com/en/dev/topics/email/#send-mail Attachment is a list of tuples (filename, content, mime_type), where mime_type can be None. """ if not dc1_settings: dc1_settings = DefaultDc().settings shadow_email = dc1_settings.SHADOW_EMAIL # Global bcc if shadow_email: if bcc_list: bcc_list = list(bcc_list) bcc_list.append(shadow_email) else: bcc_list = [shadow_email] bcc_list = set(bcc_list) # Default "From:" header if not from_email: from_email = dc1_settings.DEFAULT_FROM_EMAIL # Compose message msg = EmailMessage(subject, body, from_email, recipient_list, bcc_list, connection=connection, attachments=attachments, headers=headers, cc=cc_list) if content_subtype: msg.content_subtype = content_subtype # Send mail if attachments: logger.info('Sending mail to "%s" with subject "%s" and attachments "%s"', recipient_list, subject, [i[0] for i in attachments]) else: logger.info('Sending mail to "%s" with subject "%s"', recipient_list, subject) return msg.send(fail_silently=fail_silently)
5,354,614
def distinct_by_t(func): """ Transformation for Sequence.distinct_by :param func: distinct_by function :return: transformation """ def distinct_by(sequence): distinct_lookup = {} for element in sequence: key = func(element) if key not in distinct_lookup: distinct_lookup[key] = element return distinct_lookup.values() return Transformation("distinct_by({0})".format(name(func)), distinct_by, None)
5,354,615
def test_parse_sections_exception(iniparse_tester, info, expected): """ Tests our base function used to parse sections before we do any processing Ensures: * we can handle comments around sections. * caps issues """ iniparse_tester.run_parsing_test(parse_sections, info, expected, Exception)
5,354,616
def are_datasets_created(path, number_of_datasets, suffix='parts'): """Checks existence and reads the dataset ids from the datasets file in the path directory """ dataset_ids = [] try: with open("%s%sdataset_%s" % (path, os.sep, suffix)) as datasets_file: for line in datasets_file: dataset = line.strip() try: dataset_id = bigml.api.get_dataset_id(dataset) dataset_ids.append(dataset_id) except ValueError: return False, dataset_ids if len(dataset_ids) == number_of_datasets: return True, dataset_ids else: return False, dataset_ids except IOError: return False, dataset_ids
5,354,617
def _get_partition_info(freq_unit): """ 根据平台单位获取tdw的单位和格式 :param freq_unit: 周期单位 :return: tdw周期单位, 格式 """ if freq_unit == "m": # 分钟任务 cycle_unit = "I" partition_value = "" elif freq_unit == "H": # 小时任务 cycle_unit = "H" partition_value = "YYYYMMDDHH" elif freq_unit == "d": # 天任务 cycle_unit = "D" partition_value = "YYYYMMDD" elif freq_unit == "w": # 周任务 cycle_unit = "W" partition_value = "YYYYMMDD" elif freq_unit == "M": # 月任务 cycle_unit = "M" partition_value = "YYYYMM" elif freq_unit == "O": # 一次性任务 cycle_unit = "O" partition_value = "" else: # 其他任务 cycle_unit = "R" partition_value = "" return cycle_unit, partition_value
5,354,618
def metadata_property(k: str) -> property: """ Make metadata fields available directly on a base class. """ def getter(self: MetadataClass) -> Any: return getattr(self.metadata, k) def setter(self: MetadataClass, v: Any) -> None: return setattr(self.metadata, k, v) return property(getter, setter)
5,354,619
def write_case_to_yaml(yamFile, data): """ Write the test case to yaml. param: yamFile: Yaml file path. return: There is no. """ with io.open(yamFile, 'w', encoding='utf-8') as fp: ordered_dump(data, fp, Dumper=yaml.SafeDumper, allow_unicode=True, default_flow_style=False, indent=4)
5,354,620
def test_empty_pop(): """ test for pop on empty stack""" with pytest.raises(IndexError): empty = Stack() empty.pop()
5,354,621
def _single_saver(save, pack, outfile, binary, ext, **kwargs): """Call a font saving function, providing a stream.""" # use standard streams if none provided if not outfile or outfile == '-': outfile = sys.stdout.buffer if len(pack) == 1: # we have only one font to deal with, no need to create container _multi_saver(save, [*pack][0], outfile, binary, **kwargs) else: # create container and call saver for each font in the pack with _open_container(outfile, 'w', binary) as out: # save fonts one-by-one for font in pack: # generate unique filename name = font.name.replace(' ', '_') filename = unique_name(out, name, ext) try: with _open_stream(out, filename, 'w', binary) as stream: save(font, stream, **kwargs) except Exception as e: logging.error('Could not save %s: %s', filename, e) raise
5,354,622
def adjust_contrast(img, contrast_factor): """Adjust contrast of an RGB image. Args: img (Tensor): Image to be adjusted. contrast_factor (float): How much to adjust the contrast. Can be any non negative number. 0 gives a solid gray image, 1 gives the original image while 2 increases the contrast by a factor of 2. Returns: Tensor: Contrast adjusted image. """ if not F._is_tensor_image(img): raise TypeError('tensor is not a torch image.') mean = torch.mean(rgb_to_grayscale(img).to(torch.float)) return _blend(img, mean, contrast_factor)
5,354,623
async def handle_private_message_only(ctx, exc, calls=0): """ Exception handler for :exc:`~discord.ext.commands.PrivateMessageOnly`. Informs the user that the command can only be used in private messages. Has a cooldown of 10 seconds per user. """ if calls > 0: return _ = ctx.locale # NOTE: Title of the error message for commands that can only be used in DMs. title = _("{e:error} Private Messages Only") # NOTE: Text of the error message for commands that can only be used in DMs. text = _( "**{user}**, the `{command}` command can only be used in " "private messages." ) command = ctx.command.get_qualified_name(_) # Format text title = ctx.bot.emotes.format(title) text = text.format(user=ctx.display_name, command=command) await ctx.embed( title=title, description=text, colour=senko.Colour.error(), delete_after=15 )
5,354,624
def license_wtfpl(): """ Create a license object called WTF License. """ return mixer.blend(cc.License, license_name="WTF License")
5,354,625
def _add_embedding_column_map_fn( k_v, original_example_key, delete_audio_from_output, audio_key, label_key, speaker_id_key): """Combine a dictionary of named embeddings with a tf.train.Example.""" k, v_dict = k_v if original_example_key not in v_dict: raise ValueError( f'Original key not found: {original_example_key} vs {v_dict.keys()}') ex_l = v_dict[original_example_key] assert len(ex_l) == 1, (len(ex_l), k_v[0], ex_l) ex = copy.deepcopy(ex_l[0]) # Beam does not allow modifying the input. assert isinstance(ex, tf.train.Example), type(ex) for name, embedding_l in v_dict.items(): if name == original_example_key: continue assert len(embedding_l) == 1, embedding_l embedding = embedding_l[0] assert isinstance(embedding, np.ndarray) assert embedding.ndim == 2, embedding.ndim # Store the embedding 2D shape and store the 1D embedding. The original # embedding can be recovered with `emb.reshape(feature['shape'])`. ex = _add_embedding_to_tfexample(ex, embedding, f'embedding/{name}') if delete_audio_from_output: ex.features.feature.pop(audio_key, None) # Assert that the label is present. If it's a integer, convert it to bytes. if label_key: if label_key not in ex.features.feature: raise ValueError(f'Label not found: {label_key} vs {ex.features.feature}') lbl_feat = ex.features.feature[label_key] if lbl_feat.int64_list.value: lbl_val_as_bytes = str(lbl_feat.int64_list.value[0]).encode('utf-8') ex.features.feature.pop(label_key, None) ex.features.feature[label_key].bytes_list.value.append(lbl_val_as_bytes) # If provided, assert that the speaker_id field is present, and of type # `bytes`. if speaker_id_key: feats = ex.features.feature assert speaker_id_key in feats, (speaker_id_key, feats.keys()) assert feats[speaker_id_key].bytes_list.value, feats[speaker_id_key] return k, ex
5,354,626
def modelf(input_shape): """ Function creating the model's graph in Keras. Argument: input_shape -- shape of the model's input data (using Keras conventions) Returns: model -- Keras model instance """ X_input = Input(shape = input_shape) ### START CODE HERE ### # Step 1: CONV layer (≈4 lines) X = Conv1D(196, kernel_size = 15, strides = 4)(X_input) # CONV1D X = BatchNormalization()(X) # Batch normalization X = Activation("relu")(X) # ReLu activation X = Dropout(0.8)(X) # dropout (use 0.8) # Step 2: First GRU Layer (≈4 lines) X = GRU(units = 128, return_sequences = True)(X) # GRU (use 128 units and return the sequences) X = Dropout(0.8)(X) # dropout (use 0.8) X = BatchNormalization()(X) # Batch normalization # Step 3: Second GRU Layer (≈4 lines) X = GRU(units = 128, return_sequences = True)(X) # GRU (use 128 units and return the sequences) X = Dropout(0.8)(X) # dropout (use 0.8) X = BatchNormalization()(X) # Batch normalization X = Dropout(0.8)(X) # dropout (use 0.8) # Step 4: Time-distributed dense layer (≈1 line) X = TimeDistributed(Dense(1, activation = "sigmoid"))(X) # time distributed (sigmoid) ### END CODE HERE ### model = Model(inputs = X_input, outputs = X) return model
5,354,627
def test_perform_prevalidation_option(postgres, db_conn, tmpdir, monkeypatch, mocked_config): """Test pre-validation is not performed if option is turned off in the config.""" files_to_zip = ['unittest_data/operator/operator1_with_rat_info_20160701_20160731.csv'] zip_files_to_tmpdir(files_to_zip, tmpdir) catalog_config_dict = { 'prospectors': [ { 'file_type': 'operator', 'paths': [str(tmpdir.join('operator1_with_rat_info_20160701_20160731.zip'))], 'schema_filename': 'OperatorImportSchema_v2.csvs' } ], 'perform_prevalidation': False } catalog_config = CatalogConfig(ignore_env=True, **catalog_config_dict) monkeypatch.setattr(mocked_config, 'catalog_config', catalog_config) # Run dirbs-catalog using db args from the temp postgres instance runner = CliRunner() result = runner.invoke(dirbs_catalog_cli, obj={'APP_CONFIG': mocked_config}) assert result.exit_code == 0 # This test basically checks that when pre-validation is disabled, then it is skipped during catalog. # The is_valid_format field would be NULL is that scenario as tested below. The scenario with # pre-validation enabled is implicitly tested in test_all_files_are_harvested test case. with db_conn.cursor() as cursor: cursor.execute('SELECT is_valid_format FROM data_catalog WHERE filename = ' '\'operator1_with_rat_info_20160701_20160731.zip\'') assert cursor.fetchone().is_valid_format is None
5,354,628
def merge_parameters(col_orig, col_new, klass, name_attr="name", value_attr="value"): """This method updates col_orig removing any that aren't in col_new, updating those that are, and adding new ones using klass as the constructor col_new is a dict col_orig is a list klass is a type """ working = col_new.copy() to_del = [] for obj in col_orig: if getattr(obj,name_attr) in working: # Update setattr(obj, value_attr, working[obj.name]) del working[obj.name] else: # Delete pending to_del.append(obj) # Delete for obj in to_del: col_orig.remove(obj) # Add for k in working: obj = klass() setattr(obj, name_attr, k) setattr(obj, value_attr, working[k]) col_orig.append(obj)
5,354,629
def get_mbed_official_psa_release(target=None): """ Creates a list of PSA targets with default toolchain and artifact delivery directory. :param target: Ask for specific target, None for all targets. :return: List of tuples (target, toolchain, delivery directory). """ psa_targets_release_list = [] psa_secure_targets = [t for t in TARGET_NAMES if Target.get_target(t).is_PSA_secure_target] if target is not None: if target not in psa_secure_targets: raise Exception("{} is not a PSA secure target".format(target)) psa_targets_release_list.append(_get_target_info(target)) else: for t in psa_secure_targets: psa_targets_release_list.append(_get_target_info(target)) return psa_targets_release_list
5,354,630
def diff_files(expectfile, actualfile): """Diff the files, and display detailed output if any""" cmd = ['diff', '-wB', expectfile, actualfile] proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=sys.stderr) (stdout, _unused) = proc.communicate() if isinstance(stdout, bytes): stdout = stdout.decode('utf-8') if not stdout: print('no difference') return print('Expect: %s' % expectfile) print('Actual: %s' % actualfile) print('command: diff -w %s %s' % (expectfile, actualfile)) print(stdout)
5,354,631
def assignSentenceIds(tokLayer: TokenLayer): """ propagate sentence IDs to the given layer from a higher layer """ if not tokLayer: return for tok in tokLayer: currSentId = None if tok.linksHigher and not isinstance(tok.linksHigher[0], DeletionToken): currSentId = tok.linksHigher[0].sentenceId tok.sentenceId = currSentId firstOkIdx = None # in case there were unlinked nodes at the beginning for i, tok in enumerate(tokLayer): if tok.sentenceId: firstSentenceId = tok.sentenceId firstOkIdx = i break if firstOkIdx is None: print('could not assign sentence ID to tokens', ', '.join(t.tid for t in tokLayer), file=sys.stderr) for tok in tokLayer: tok.sentenceId = 'unknown' return for i in range(firstOkIdx): tokLayer.tokens[i].sentenceId = firstSentenceId for i, tok in enumerate(tokLayer): if tok.sentenceId is None: tok.sentenceId = tokLayer.tokens[i - 1].sentenceId assert tok.sentenceId
5,354,632
def match_l2(X, Y, match_rows=False, normalize=True): """Return the minimum Frobenius distance between X and Y over permutations of columns (or rows).""" res = _match_factors(X, Y, l2_similarity, match_rows) res['score'] = np.sqrt(-res['score']) if normalize: res['score'] = res['score'] / np.linalg.norm(X, 'fro') return res
5,354,633
def subparser(subparsers): """Define the `kevlar mutate` command-line interface.""" desc = 'Apply the specified mutations to the genome provided.' subparser = subparsers.add_parser('mutate', description=desc) subparser.add_argument('-o', '--out', metavar='FILE', help='output file; default is terminal (stdout)') subparser.add_argument('mutations', help='mutations file') subparser.add_argument('genome', help='genome to mutate')
5,354,634
def doNMFDriedger(V, W, L, r = 7, p = 10, c = 3, plotfn = None, plotfnw = None): """ Implement the technique from "Let It Bee-Towards NMF-Inspired Audio Mosaicing" :param V: M x N target matrix :param W: An M x K matrix of template sounds in some time order\ along the second axis :param L: Number of iterations :param r: Width of the repeated activation filter :param p: Degree of polyphony; i.e. number of values in each column\ of H which should be un-shrunken :param c: Half length of time-continuous activation filter """ N = V.shape[1] K = W.shape[1] tic = time.time() H = np.random.rand(K, N) print("H.shape = ", H.shape) print("Time elapsed H initializing: %.3g"%(time.time() - tic)) errs = np.zeros(L+1) errs[0] = getKLError(V, W.dot(H)) if plotfnw: plt.figure(figsize=(12, 3)) plotfnw(W) plt.savefig("Driedger_W.svg", bbox_inches='tight') if plotfn: res=4 plt.figure(figsize=(res*2, res*2)) for l in range(L): print("NMF Driedger iteration %i of %i"%(l+1, L)) iterfac = 1-float(l+1)/L tic = time.time() #Step 1: Avoid repeated activations print("Doing Repeated Activations...") MuH = scipy.ndimage.filters.maximum_filter(H, size=(1, r)) H[H<MuH] = H[H<MuH]*iterfac #Step 2: Restrict number of simultaneous activations print("Restricting simultaneous activations...") #Use partitions instead of sorting for speed colCutoff = -np.partition(-H, p, 0)[p, :] H[H < colCutoff[None, :]] = H[H < colCutoff[None, :]]*iterfac #Step 3: Supporting time-continuous activations if c > 0: print("Supporting time-continuous activations...") di = K-1 dj = 0 for k in range(-H.shape[0]+1, H.shape[1]): z = np.cumsum(np.concatenate((np.zeros(c), np.diag(H, k), np.zeros(c)))) x2 = z[2*c::] - z[0:-2*c] H[di+np.arange(len(x2)), dj+np.arange(len(x2))] = x2 if di == 0: dj += 1 else: di -= 1 #KL Divergence Version WH = W.dot(H) WH[WH == 0] = 1 VLam = V/WH WDenom = np.sum(W, 0) WDenom[WDenom == 0] = 1 H = H*((W.T).dot(VLam)/WDenom[:, None]) print("Elapsed Time H Update %.3g"%(time.time() - tic)) errs[l+1] = getKLError(V, W.dot(H)) #Output plots every 20 iterations if plotfn and ((l+1)==L or (l+1)%20 == 0): plt.clf() plotfn(V, W, H, l+1, errs) plt.savefig("NMFDriedger_%i.png"%(l+1), bbox_inches = 'tight') return H
5,354,635
def extract_red(image): """ Returns the red channel of the input image. It is highly recommended to make a copy of the input image in order to avoid modifying the original array. You can do this by calling: temp_image = np.copy(image) Args: image (numpy.array): Input RGB (BGR in OpenCV) image. Returns: numpy.array: Output 2D array containing the red channel. """ # Since Red is last index, we want all rows, columns, and the last channel. return np.copy(image[:, :, 2])
5,354,636
def _cleaned_data_to_key(cleaned_data): """ Return a tuple representing a unique key for the cleaned data of an InteractionCSVRowForm. """ # As an optimisation we could just track the pk for model instances, # but that is omitted for simplicity key = tuple(cleaned_data.get(field) for field in DUPLICATE_FIELD_MAPPING) if all(key): return key # Some of the fields are missing (this happens if they did not pass validation) return None
5,354,637
def add_xspice_to_circuit(part, circuit): """ Add an XSPICE part to a PySpice Circuit object. Args: part: SKiDL Part object. circuit: PySpice Circuit object. """ # The device reference is always the first positional argument. args = [_get_spice_ref(part)] # Add the pins to the argument list. for pin in part.pins: if isinstance(pin, Pin): # Add a non-vector pin. Use _xspice_node() in case pin is unconnected. args.append(_xspice_node(pin)) elif isinstance(pin, PinList): # Add pins from a pin vector. args.append("[" + " ".join([node(p) for p in pin]) + "]") else: logger.error("Illegal XSPICE argument: {}".format(pin)) # The XSPICE model name should be the only keyword argument. kwargs = {"model": part.model.name} # Add the part to the PySpice circuit. getattr(circuit, part.pyspice["name"])(*args, **kwargs)
5,354,638
def schedule_for_cleanup(request, syn): """Returns a closure that takes an item that should be scheduled for cleanup. The cleanup will occur after the module tests finish to limit the residue left behind if a test session should be prematurely aborted for any reason.""" items = [] def _append_cleanup(item): items.append(item) def cleanup_scheduled_items(): _cleanup(syn, items) request.addfinalizer(cleanup_scheduled_items) return _append_cleanup
5,354,639
async def startup_event(): """Startup event. This is called after the server is started and: - connects to the database - loads roles """ retry_interval = 5 while True: try: await Postgres.connect() except Exception as e: msg = " ".join([str(k) for k in e.args]) logging.error(f"Unable to connect to the database ({msg})") logging.info(f"Retrying in {retry_interval} seconds") await asyncio.sleep(retry_interval) else: break await Roles.load() logging.goodnews("Server started")
5,354,640
def build_cooccurrences(sequences: List[List[int]], cache: defaultdict, window=3): """ It updates a shared cache for by iteratively calling 'bigram_count' :param sequences: The input sequences :param cache: The current cache :param window: The size of window to look around a central word """ for seq in sequences: bigram_count(token_list=seq, cache=cache, window_size=window)
5,354,641
def unpickle_context(content, pattern=None): """ Unpickle the context from the given content string or return None. """ pickle = get_pickle() if pattern is None: pattern = pickled_context_re match = pattern.search(content) if match: return pickle.loads(base64.standard_b64decode(match.group(1))) return None
5,354,642
def ask_openid(request, openid_url, redirect_to, on_failure=None, sreg_request=None): """ basic function to ask openid and return response """ on_failure = on_failure or signin_failure trust_root = getattr( settings, 'OPENID_TRUST_ROOT', get_url_host(request) + '/' ) if xri.identifierScheme(openid_url) == 'XRI' and getattr( settings, 'OPENID_DISALLOW_INAMES', False ): msg = _("i-names are not supported") return on_failure(request, msg) consumer = Consumer(request.session, DjangoOpenIDStore()) try: auth_request = consumer.begin(openid_url) except DiscoveryFailure: msg = _("The password or OpenID was invalid") return on_failure(request, msg) if sreg_request: auth_request.addExtension(sreg_request) redirect_url = auth_request.redirectURL(trust_root, redirect_to) return HttpResponseRedirect(redirect_url)
5,354,643
def get_accuracy_ANIL(logits, targets): """Compute the accuracy (after adaptation) of MAML on the test/query points Parameters ---------- logits : `torch.FloatTensor` instance Outputs/logits of the model on the query points. This tensor has shape `(num_examples, num_classes)`. targets : `torch.LongTensor` instance A tensor containing the targets of the query points. This tensor has shape `(num_examples,)`. Returns ------- accuracy : `torch.FloatTensor` instance Mean accuracy on the query points """ _, predictions = torch.max(logits, dim=-1) return torch.mean(predictions.eq(targets).float())
5,354,644
def stream_logger(): """ sets up the logger for the Simpyl object to log to the output """ logger = logging.Logger('stream_handler') handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(asctime)s %(message)s')) logger.addHandler(handler) return logger
5,354,645
def sql_coordinate_frame_lookup_key(bosslet_config, coordinate_frame): """ Get the lookup key that identifies the coordinate fram specified. Args: bosslet_config (BossConfiguration): Bosslet configuration object coordinate_frame: Identifies coordinate frame. Returns: coordinate_set(str): Coordinate Frame lookup key. """ query = "SELECT id FROM coordinate_frame WHERE name = %s" with bosslet_config.call.connect_rds() as cursor: cursor.execute(query, (coordinate_frame,)) coordinate_set = cursor.fetchall() if len(coordinate_set) != 1: raise Exception( "Can't find coordinate frame: {}".format(coordinate_frame)) else: LOGGER.info("{} coordinate frame id: {}".format(coordinate_frame, coordinate_set[0][0])) return coordinate_set[0][0]
5,354,646
def entry_from_resource(resource, client, loggers): """Detect correct entry type from resource and instantiate. :type resource: dict :param resource: One entry resource from API response. :type client: :class:`~google.cloud.logging.client.Client` :param client: Client that owns the log entry. :type loggers: dict :param loggers: A mapping of logger fullnames -> loggers. If the logger that owns the entry is not in ``loggers``, the entry will have a newly-created logger. :rtype: :class:`~google.cloud.logging.entries._BaseEntry` :returns: The entry instance, constructed via the resource """ if 'textPayload' in resource: return TextEntry.from_api_repr(resource, client, loggers) if 'jsonPayload' in resource: return StructEntry.from_api_repr(resource, client, loggers) if 'protoPayload' in resource: return ProtobufEntry.from_api_repr(resource, client, loggers) return EmptyEntry.from_api_repr(resource, client, loggers)
5,354,647
def make_preprocesser(training_data): """ Constructs a preprocessing function ready to apply to new dataframes. Crucially, the interpolating that is done based on the training data set is remembered so it can be applied to test datasets (e.g the mean age that is used to fill in missing values for 'Age' will be fixed based on the mean age within the training data set). Summary by column: ['PassengerId', 'Survived', # this is our target, not a feature 'Pclass', # keep as is: ordinal value should work, even though it's inverted (higher number is lower class cabin) 'Name', # omit (could try some fancy stuff like inferring ethnicity, but skip for now) 'Sex', # code to 0 / 1 'Age', # replace missing with median 'SibSp', 'Parch', 'Ticket', # omit (doesn't seem like low hanging fruit, could look more closely for pattern later) 'Fare', # keep, as fare could be finer grained proxy for socio economic status, sense of entitlement / power in getting on boat 'Cabin', # one hot encode using first letter as cabin as the cabin sector 'Embarked'] # one hot encode Params: df: pandas.DataFrame containing the training data Returns: fn: a function to preprocess a dataframe (either before training or fitting a new dataset) """ def pick_features(df): return df[['PassengerId', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked']] # save median Age so we can use it to fill in missing data consistently # on any dataset median_age_series = training_data[['Age', 'Fare']].median() def fix_missing(df): return df.fillna(median_age_series) def map_sex(df): df['Sex'] = df['Sex'].map({'male': 0, 'female': 1}) return df def one_hot_cabin(df): def cabin_sector(cabin): if isinstance(cabin, str): return cabin[0].lower() else: return cabin df[['cabin_sector']] = df[['Cabin']].applymap(cabin_sector) one_hot = pd.get_dummies(df['cabin_sector'], prefix="cabin_sector") interesting_cabin_sectors = ["cabin_sector_{}".format(l) for l in 'bcde'] for column, _ in one_hot.iteritems(): if column.startswith('cabin_sector_') and column not in interesting_cabin_sectors: one_hot = one_hot.drop(column, axis=1) df = df.join(one_hot) df = df.drop('Cabin', axis=1) df = df.drop('cabin_sector', axis=1) return df def one_hot_embarked(df): one_hot = pd.get_dummies(df['Embarked'], prefix="embarked") df = df.join(one_hot) df = df.drop('Embarked', axis=1) return df # We want standard scaling fit on the training data, so we get a scaler ready # for application now. It needs to be applied to data that already has the other # pre-processing applied. training_data_all_but_scaled = map_sex(fix_missing(pick_features(training_data))) stdsc = StandardScaler() stdsc.fit(training_data_all_but_scaled[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']]) def scale_df(df): df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']] = \ stdsc.transform(df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']]) df[['Sex']] = df[['Sex']].applymap(lambda x: 1 if x == 1 else -1) for column, _ in df.iteritems(): if column.startswith('cabin_sector_') or column.startswith('embarked_'): df[[column]] = df[[column]].applymap(lambda x: 1 if x == 1 else -1) return df def preprocess(df, scale=True): """ Preprocesses a dataframe so it is ready for use with a model (either for training or prediction). Params: scale: whether to apply feature scaling. E.g with random forests feature scaling isn't necessary. """ all_but_scaled = one_hot_embarked(one_hot_cabin(map_sex(fix_missing(pick_features(df))))) if scale: return scale_df(all_but_scaled) else: return all_but_scaled return preprocess
5,354,648
def get_breakeven_prob(predicted, threshold = 0): """ This function calculated the probability of a stock being above a certain threshhold, which can be defined as a value (final stock price) or return rate (percentage change) """ predicted0 = predicted.iloc[0,0] predicted = predicted.iloc[-1] predList = list(predicted) over = [(i*100)/predicted0 for i in predList if ((i-predicted0)*100)/predicted0 >= threshold] less = [(i*100)/predicted0 for i in predList if ((i-predicted0)*100)/predicted0 < threshold] return (len(over)/(len(over) + len(less)))
5,354,649
def test_comoving_vol_init_invalid_cosmology(comoving_vol_converter): """Test the init method when an invalid cosmology is given""" with pytest.raises(RuntimeError) as excinfo: ComovingDistanceConverter.__init__( comoving_vol_converter, cosmology='Planck' ) assert 'Could not get specified cosmology' in str(excinfo.value)
5,354,650
def trim_whitespace(sub_map, df, source_col, op_col): """Trims whitespace on all values in the column""" df[op_col] = df[op_col].transform( lambda x: x.strip() if not pd.isnull(x) else x) return df
5,354,651
def beneficiary(): """ RESTful CRUD controller """ # Normally only used in Report # - make changes as component of Project s3db.configure("project_beneficiary", deletable = False, editable = False, insertable = False, ) list_btn = A(T("Beneficiary Report"), _href=URL(c="project", f="beneficiary", args="report", vars=get_vars), _class="action-btn") #def prep(r): # if r.method in ("create", "create.popup", "update", "update.popup"): # # Coming from Profile page? # location_id = r.get_vars.get("~.(location)", None) # if location_id: # field = r.table.location_id # field.default = location_id # field.readable = field.writable = False # if r.record: # field = r.table.location_id # field.comment = None # field.writable = False # return True #s3.prep = prep return s3_rest_controller(hide_filter=False)
5,354,652
def test__hr_grad(): """ test automech.py hr_scan """ tsks = '\tspc hr_grad runlvl=lvl_scf inplvl=lvl_scf tors_model=1dhrfa' drivers = '\tes' # Format the run.dat with the run-save dir paths with open(RUN_TEMP_PATH, 'r') as file_obj: run_str = file_obj.read() with open(RUN_DAT_PATH, 'w') as file_obj: file_obj.write(run_str.format(DB_PATH, tsks, drivers)) subprocess.call(CMD_LINE.split())
5,354,653
def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() #cfg.merge_from_file(args.config_file) #cfg.merge_from_file(model_zoo.get_config_file("/data/mostertrij/tridentnet/detectron2/configs/COCO-Detection/my_script_faster_rcnn_X_101_32x8d_FPN_3x.yaml")) cfg.merge_from_file("/data/mostertrij/tridentnet/detectron2/configs/COCO-Detection/my_script_faster_rcnn_X_101_32x8d_FPN_3x.yaml") DATASET_NAME= "LGZ_v5_more_rotations" cfg.DATASETS.TRAIN = (f"{DATASET_NAME}_train",) cfg.DATASETS.VAL = (f"{DATASET_NAME}_val",) cfg.DATASETS.TEST = (f"{DATASET_NAME}_test",) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) return cfg
5,354,654
def get_distinct_quotation_uid(*args, **kwargs): """ 获取用户 :param args: :param kwargs: :return: List """ field = 'uid' return map(lambda x: getattr(x, field), db_instance.get_distinct_field(Quotation, field, *args, **kwargs))
5,354,655
def present_from(ref: pathlib.Path, obs: pathlib.Path) -> pathlib.Path: """Build a somehow least surprising difference folder from ref and obs.""" ref_code = ref.parts[-1] if obs.is_file(): return pathlib.Path(*obs.parts[:-1], f'diff-of-{obs.parts[-1]}') present = pathlib.Path(*obs.parts[:-1], f'diff-of-{ref_code}_{obs.parts[-1]}') present.mkdir(parents=True, exist_ok=True) return present
5,354,656
def dataQ_feeding(filename_queue, feat_dim, seq_len): """ Reads and parse the examples from alignment dataset Args: filename_queue: A queue of strings with the filenames to read from. Returns: An object representing a single example, with the following fields: MFCC sequence: 200 * 39 dimensions """ class MFCCRECORD(object): pass result = MFCCRECORD() ### use the line reader ### reader = tf.TextLineReader() #values = [] #for i in range(NUM_UP_TO): # key, value = reader.read(filename_queue) # values.append(value) key, value = reader.read(filename_queue) ### try to read NUM_UP_TO lines in one time ### ### read the csv file into features ### # seq = [] record_defaults = [[1.] for i in range(feat_dim*seq_len)] # for value in values: # seq.append(tf.decode_csv(value, record_defaults=record_defaults)) tmp_result = tf.decode_csv(value, record_defaults=record_defaults) ### so we have (NUM_UP_TO, seq_len *feat_dim ) ### ### reshape it into (NUM_UP_TO, seq_len, feat_dim) ### ### result.mfcc: sequence ### mfcc = tf.cast(tf.reshape(tmp_result, shape=(seq_len , \ feat_dim)),tf.float32) ### result.rev_mfcc: reverse of sequence ### # result.rev_mfcc = tf.reverse(result.mfcc, [False, True]) return mfcc, mfcc
5,354,657
def dummy_backend(_, **kwargs): """ Dummy backend always returning stats with 0 """ return _default_statement()
5,354,658
def comp_mass(self): """Compute the mass of the Frame Parameters ---------- self : Frame A Frame object Returns ------- Mfra: float Mass of the Frame [kg] """ Vfra = self.comp_volume() # Mass computation return Vfra * self.mat_type.struct.rho
5,354,659
def write_DS9reg(x, y, filename=None, coord='IMAGE', ptype='x', size=20, c='green', tag='all', width=1, text=None): """Write a region file for ds9 for a list of coordinates. Taken from Neil Crighton's barak.io Parameters ---------- x, y : arrays of floats, shape (N,) The coordinates. These may be image or WCS. Please make sure to update the coord keyword accordingly. filename : str, optional A filename to write to. coord : str (`IMAGE` or `J2000`) The coordinate type: `IMAGE` (pixel coordinates) or `J2000` (celestial coordinates). ptype : str or np.array of shape (N,) DS9 point type (e.g. `circle`, `box`, `diamond`, `cross`, `x`, `arrow`, `boxcircle`) size : int or np.array of shape (N,) DS9 point size. c : str or np.array of shape (N,) point colour: `cyan` `blue` `magenta` `red` `green` `yellow` `white` `black`}. tag : str or np.array of shape (N,) DS9 tag. e.g. 'all' width : int or np.array of shape (N,) DS9 width text : str or np.array of shape (N,) Text """ header = ['global font="helvetica 10 normal" select=1 highlite=1 ' 'edit=0 move=1 delete=1 include=1 fixed=0 source\n'] header.append(coord + '\n') x = np.array(x) y = np.array(y) if isinstance(ptype, basestring): ptype = [ptype] * len(x) if isinstance(size, int): size = [size] * len(x) if isinstance(width, int): width = [width] * len(x) if isinstance(text, basestring): text = [text] * len(x) elif text is None: text = list(range(len(x))) if isinstance(tag, basestring): tag = [tag] * len(x) if isinstance(c, basestring): c = [c] * len(x) regions = [] # fmt = ('point(%12.8f,%12.8f) # \ # point=%s %s width=%s text={%s} color=%s tag={%s}\n') for i in xrange(len(x)): s = 'point({:.8f},{:.8f}) # point={} {} width={} text={{{}}} color={} tag={}\n'\ .format(x[i], y[i], ptype[i], size[i], width[i], text[i], c[i], tag[i]) regions.append(s) if filename is not None: fh = open(filename,'w') fh.writelines(header + regions) fh.close() return header, regions
5,354,660
def bug_xmltoolkit3(): """ Check that close doesn't accept optional argument >>> parser = sgmlop.XMLParser() >>> parser.feed("foo") 0 >>> parser.close("bar") Traceback (most recent call last): TypeError: close() takes exactly 0 arguments (1 given) >>> parser.close() 0 """
5,354,661
def checkInputDataValid(lstX:list=None,lstY:list=None,f:object=None)->(int,tuple): """ :param lstX: :param lstY: :param f: :return: int, (int,list, int,int) """ ret=-1 rettuple=(-1,[],-1,-1) if lstX is None or lstY is None: msg = "No input lists of arrays" msg2log(None, msg, f) return ret,rettuple if not lstX or not lstY: msg = "Empty input lists of arrays" msg2log(None, msg, f) return ret,rettuple k=len(lstX) k1=len(lstY) if (k1 != k): msg = "The input lists have a different naumber items: {} vs {}".format(k,k1) msg2log(None, msg, f) return ret,rettuple lstP=[] lstN=[] lstNy=[] for item in lstX: X:np.array=item (n,p)=X.shape lstP.append(p) lstN.append(n) for item in lstY: y:np.array=item (n,)=y.shape lstNy.append(n) p=lstP[0] for i in range(len(lstP)): if p!=lstP[i]: msg="The feature nimbers are different: {} vs {}".format(p,lstP[i]) msg2log(None,msg,f) return ret,rettuple if lstN!=lstNy: msg="Different sample sizes:\n{}\n{}".format(lstN,lstNy) msg2log(None, msg, f) return ret,rettuple rettuple=(k,lstN,p,sum(lstN)) ret=0 return ret,rettuple
5,354,662
def parseManualTree(node): """Parses a tree of the manual Main_Page and returns it through a list containing tuples: [(title, href, [(title, href, [...]), ...]), ...]""" if node.nodeType != Node.ELEMENT_NODE: return [] result = [] lastadded = None for e in node.childNodes: if e.nodeType == Node.ELEMENT_NODE: if e.localName == "ol": assert lastadded != None for i in xrange(len(result)): if result[i][:2] == lastadded: result[i] = lastadded + (parseManualTree(e),) elif e.localName == "a": href, title = parseAnchor(e) lastadded = title, href result.append((title, href, None)) return result
5,354,663
def test_rf_frequency_view(exopy_qtbot, root_view, task_workbench): """Test SetRFFrequencyTask widget outisde of a LoopTask. """ task = SetRFFrequencyTask(name='Test') root_view.task.add_child_task(0, task) show_and_close_widget(exopy_qtbot, RFFrequencyView(task=task, root=root_view))
5,354,664
def main(): """ Main program entry point. """ config_manager.register(MainConfig) parser = argparse.ArgumentParser(description='Bandit and Bayesian Optimization Experimental Framework.') parser.add_argument("task", type=parse_task) parser.add_argument("experiment", nargs='?') parser.add_argument("--save", help='Path where to save to.') parser.add_argument("--config", required=False, default=None) parser.add_argument("--include", required=False, default=None, type=str, nargs='+') parser.add_argument("--overwrite", required=False, action='store_true') parser.add_argument("--plots", required=False, help='Immediately plot after run.', nargs='*') parser.add_argument("--aggregator", required=False, help='Immediately plot after run.', nargs='*') parser.add_argument("--remote", required=False, action='store_true', help='Run remotely using ceramo.') parser.add_argument("-m", "--message", required=False, help='Message.') args = parser.parse_args() initialize_framework() args.task(args)
5,354,665
def _create_hub_module(save_path): """Create a TensorFlow Hub module for testing. Args: save_path: The directory path in which to save the model. """ # Module function that doubles its input. def double_module_fn(): w = tf.Variable([2.0, 4.0]) x = tf.compat.v1.placeholder(dtype=tf.float32) hub.add_signature(inputs=x, outputs=x*w) graph = tf.Graph() with graph.as_default(): spec = hub.create_module_spec(double_module_fn) m = hub.Module(spec) # Export the module. with tf.compat.v1.Session(graph=graph) as sess: sess.run(tf.compat.v1.global_variables_initializer()) m.export(save_path, sess)
5,354,666
def validation_by_method(mapping_input: Union[List, Dict[str, List]], graph: nx.Graph, kernel: Matrix, k: Optional[int] = 100 ) -> Tuple[Dict[str, list], Dict[str, list]]: """Repeated holdout validation by diffustion method. :param mapping_input: List or value dictionary of labels {'label':value}. :param graph: Network as a graph object. :param kernel: Network as a kernel. :param k: Iterations for the repeated_holdout validation. """ auroc_metrics = defaultdict(list) auprc_metrics = defaultdict(list) for _ in tqdm(range(k)): input_diff, validation_diff = _get_random_cv_split_input_and_validation( mapping_input, kernel ) scores_z = diffuse_raw(graph=None, scores=input_diff, k=kernel, z=True) scores_raw = diffuse_raw(graph=None, scores=input_diff, k=kernel, z=False) scores_page_rank = generate_pagerank_baseline(graph, kernel) method_validation_scores = { 'raw': (validation_diff, scores_raw ), 'z': (validation_diff, scores_z ), 'random': ( validation_diff, _generate_random_score_ranking(kernel) ), 'page_rank': ( validation_diff, scores_page_rank ), } for method, validation_set in method_validation_scores.items(): try: auroc, auprc = _get_metrics(*validation_set) except ValueError: auroc, auprc = (0, 0) print(f'ROC AUC unable to calculate for {validation_set}') auroc_metrics[method].append(auroc) auprc_metrics[method].append(auprc) return auroc_metrics, auprc_metrics
5,354,667
async def test_async_setup_raises_entry_not_ready(opp: OpenPeerPower): """Test that it throws ConfigEntryNotReady when exception occurs during setup.""" config_entry = MockConfigEntry( domain=DOMAIN, data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"}, ) config_entry.add_to_opp(opp) with patch_bond_version(side_effect=ClientConnectionError()): await opp.config_entries.async_setup(config_entry.entry_id) assert config_entry.state is ConfigEntryState.SETUP_RETRY
5,354,668
def headnode_connect_network(headnode, hnic, network): """Connect a headnode's hnic to a network. Raises IllegalStateError if the headnode has already been started. Raises ProjectMismatchError if the project does not have access rights to the given network. Raises BadArgumentError if the network is a non-allocated network. This is currently unsupported due to an implementation limitation, but will be supported in a future release. See issue #333. """ db = model.Session() headnode = _must_find(db, model.Headnode, headnode) hnic = _must_find_n(db, headnode, model.Hnic, hnic) network = _must_find(db, model.Network, network) if not network.allocated: raise BadArgumentError("Headnodes may only be connected to networks " "allocated by the project.") if not headnode.dirty: raise IllegalStateError project = headnode.project if (network.access is not None) and (network.access is not project): raise ProjectMismatchError("Project does not have access to given network.") hnic.network = network db.commit()
5,354,669
def get_about_agent(): """ This method returns general information of the agent, like the name and the about. Args: @param: token: Authentication token. """ data = request.get_json() if "token" in data: channel = get_channel_id(data["token"]) if channel is not None: agent = channel.agent return {"about": agent.about, "name": agent.name} else: return {"message": "token is no correct", "status": False} else: return {"message": "token is no correct", "status": False}
5,354,670
def add_namespace(tree, new_ns_name, new_ns_uri): """Add a namespace to a Schema. Args: tree (etree._ElementTree): The ElementTree to add a namespace to. new_ns_name (str): The name of the new namespace. Must be valid against https://www.w3.org/TR/REC-xml-names/#NT-NSAttName new_ns_uri (str): The URI for the new namespace. Must be non-empty and valid against https://www.ietf.org/rfc/rfc2396.txt Returns: etree.ElementTree: A copy of the provided `tree`, modified to include the specified namespace. Raises: TypeError: If an attempt is made to add a namespace to something other than a ElementTree. ValueError: If the namespace name or URI are invalid values. ValueError: If the namespace name already exists. Note: lxml does not allow modification of namespaces within a tree that already exists. As such, string manipulation is used. https://bugs.launchpad.net/lxml/+bug/555602 Todo: Also add new namespaces to Datasets. Add checks for the format of new_ns_name - for syntax, see: https://www.w3.org/TR/REC-xml-names/#NT-NSAttName Add checks for the format of new_ns_uri - for syntax, see: https://www.ietf.org/rfc/rfc2396.txt Tidy this up. """ if not isinstance(tree, etree._ElementTree): # pylint: disable=protected-access msg = "The `tree` parameter must be of type `etree._ElementTree` - it was of type {0}".format(type(tree)) iati.utilities.log_error(msg) raise TypeError(msg) if not isinstance(new_ns_name, str) or not new_ns_name: msg = "The `new_ns_name` parameter must be a non-empty string." iati.utilities.log_error(msg) raise ValueError(msg) if not isinstance(new_ns_uri, str) or not new_ns_uri: msg = "The `new_ns_uri` parameter must be a valid URI." iati.utilities.log_error(msg) raise ValueError(msg) initial_nsmap = tree.getroot().nsmap # prevent modification of existing namespaces if new_ns_name in initial_nsmap: if new_ns_uri == initial_nsmap[new_ns_name]: return tree else: msg = "There is already a namespace called {0}.".format(new_ns_name) iati.utilities.log_error(msg) raise ValueError(msg) # to add new namespace, use algorithm from http://stackoverflow.com/a/11350061 schema_str = etree.tostring(tree.getroot(), pretty_print=True).decode('unicode_escape') interim_tree = etree.ElementTree(element=None, file=StringIO(schema_str)) root = interim_tree.getroot() nsmap = root.nsmap nsmap[new_ns_name] = new_ns_uri new_root = etree.Element(root.tag, nsmap=nsmap) new_root[:] = root[:] new_tree = etree.ElementTree(new_root) return new_tree
5,354,671
def RZ(angle, invert): """Return numpy array with rotation gate around Z axis.""" gate = np.zeros(4, dtype=complex).reshape(2, 2) if not invert: gate[0, 0] = np.cos(-angle/2) + np.sin(-angle/2) * 1j gate[1, 1] = np.cos(angle/2) + np.sin(angle/2) * 1j else: gate[0, 0] = np.cos(-angle/2) - np.sin(-angle/2) * 1j gate[1, 1] = np.cos(angle/2) - np.sin(angle/2) * 1j return gate
5,354,672
def _create_unicode(code: str) -> str: """ Добавление экранизирующего юникод кода перед кодом цвета :param code: Код, приоритетно ascii escape color code :return: """ return u'\u001b[{}m'.format(code)
5,354,673
def compact_axis_angle_from_matrix(R): """Compute compact axis-angle from rotation matrix. This operation is called logarithmic map. Note that there are two possible solutions for the rotation axis when the angle is 180 degrees (pi). We usually assume active rotations. Parameters ---------- R : array-like, shape (3, 3) Rotation matrix strict_check : bool, optional (default: True) Raise a ValueError if the rotation matrix is not numerically close enough to a real rotation matrix. Otherwise we print a warning. Returns ------- a : array-like, shape (3,) Axis of rotation and rotation angle: angle * (x, y, z). The angle is constrained to [0, pi]. """ a = axis_angle_from_matrix(R) return compact_axis_angle(a)
5,354,674
def _generate_IPRange(Range): """ IP range to CIDR and IPNetwork type Args: Range: IP range Returns: an array with CIDRs """ if len(Range.rsplit('.')) == 7 and '-' in Range and '/' not in Range: if len(Range.rsplit('-')) == 2: start_ip, stop_ip = Range.rsplit('-') if isIP(start_ip) and isIP(stop_ip): return iprange_to_cidrs(start_ip, stop_ip) else: return [] else: return [] elif len(Range.rsplit('.')) == 4 and '-' not in Range and '/' in Range: return IPNetwork(Range) else: return []
5,354,675
def is_dict_homogeneous(data): """Returns True for homogeneous, False for heterogeneous. An empty dict is homogeneous. ndarray behaves like collection for this purpose. """ if len(data) == 0: return True k0, v0 = next(iter(data.items())) ktype0 = type(k0) vtype0 = type(v0) if ktype0 in collection_types or ktype0 == np.ndarray or vtype0 in collection_types or vtype0 == np.ndarray: return False for k, v in data.items(): ktype = type(k) vtype = type(v) if (ktype != ktype0 or ktype in collection_types or ktype == np.ndarray) or \ (vtype != vtype0 or vtype in collection_types or vtype == np.ndarray): return False return True
5,354,676
def translate(item: Union[Callable[P, T], Request]) -> Union[Generator[Any, Any, None], Callable[P, T]]: """Override current language with one from language header or 'lang' parameter. Can be used as a context manager or a decorator. If a function is decorated, one of the parameters for the function must be a `rest_framework.Request` object. """ if not isinstance(item, Request): @wraps(item) def decorator(*args: P.args, **kwargs: P.kwargs) -> Any: request = None for arg in chain(args, kwargs.values()): if isinstance(arg, Request): request = arg break if request is None: raise ValueError("No Request-object in function parameters.") with override(get_language(request)): return item(*args, **kwargs) # type: ignore return decorator @contextmanager def context_manager(request: Request) -> Generator[Any, Any, None]: with override(get_language(request)): yield return context_manager(item)
5,354,677
def get_all(isamAppliance, check_mode=False, force=False, ignore_error=False): """ Retrieving the current runtime template files directory contents """ return isamAppliance.invoke_get("Retrieving the current runtime template files directory contents", "/mga/template_files?recursive=yes", ignore_error=ignore_error)
5,354,678
def build_path(dirpath, outputfile): """ Build function """ #some checks if not path.exists(dirpath): print("Path does not exist!") return 1 if not path.isdir(dirpath): print("Path is not folder") return 1 #for now SQLite try: output = create_engine("sqlite:///{}".format(outputfile)) except: print("Cannot create output file") return 1 SQLABase.metadata.create_all(output) session = sessionmaker(bind=output)() def record_wrapper(filename): record = record_from_file(filename) session.add(record) session.commit() chdir(dirpath) recursiveListing(".", record_wrapper)
5,354,679
def print_symbols(f, name, node, level=0, *, tree_parser_doc, tree_parser_xpath, ignore_dirs_for_coverage): """ Prints C++ code for relevant documentation. """ indent = ' ' * level def iprint(s): f.write((indent + s).rstrip() + "\n") name_var = name if not node.first_symbol: assert level == 0, name_var full_name = name else: name_chain = node.first_symbol.name_chain assert name == name_chain[-1] full_name = "::".join(name_chain) # Override variable. if node.first_symbol.cursor.kind == CursorKind.CONSTRUCTOR: name_var = "ctor" name_var = sanitize_name(name_var) # We may get empty symbols if `libclang` produces warnings. assert len(name_var) > 0, node.first_symbol.sorting_key() iprint('// Symbol: {}'.format(full_name)) modifier = "" if level == 0: modifier = "constexpr " iprint('{}struct /* {} */ {{'.format(modifier, name_var)) root = tree_parser_xpath[-1] kind = node.first_symbol.cursor.kind if node.first_symbol else None tree_parser_doc.append(name_var) new_ele = None # Print documentation items. symbol_iter = sorted(node.doc_symbols, key=Symbol.sorting_key) doc_vars = choose_doc_var_names(symbol_iter) # New element in the XML tree. new_ele = None for symbol, doc_var in zip(symbol_iter, doc_vars): if doc_var is None: continue assert name_chain == symbol.name_chain comment = re.sub( r'@pydrake_mkdoc[a-z_]*\{.*\}', '', symbol.comment) delim = "\n" if "\n" not in comment and len(comment) < 40: delim = " " iprint(' // Source: {}:{}'.format(symbol.include, symbol.line)) iprint(' const char* {} ={}R"""({})""";'.format( doc_var, delim, comment.strip())) tree_doc_var = ".".join(tree_parser_doc + [doc_var]) ignore_xpath = False if ignore_dirs_for_coverage: ignore_xpath = symbol.include.startswith(ignore_dirs_for_coverage) new_ele = ET.SubElement(root, "Node", { "kind": str(kind), "name": name_var, "full_name": full_name, "ignore": str(int(ignore_xpath)), "doc_var": tree_doc_var, "file_name": symbol.include, }) # If the node has no doc_var's if new_ele is None: new_ele = ET.SubElement(root, "Node", { "kind": str(kind), "name": name_var, "full_name": full_name, "ignore": "", "doc_var": "", "file_name": "", }) tree_parser_xpath.append(new_ele) # Recurse into child elements. keys = sorted(node.children_map.keys()) for key in keys: child = node.children_map[key] tree_parser_args = { "tree_parser_doc": tree_parser_doc, "tree_parser_xpath": tree_parser_xpath, "ignore_dirs_for_coverage": ignore_dirs_for_coverage } print_symbols(f, key, child, level=level + 1, **tree_parser_args) iprint('}} {};'.format(name_var)) tree_parser_doc.pop() tree_parser_xpath.pop()
5,354,680
def prepare_seed(seed): """Set Random Seed""" print("Random Seed: ", seed) mindspore.set_seed(seed)
5,354,681
def partitioned_cov(x, y, c=None): """Covariance of groups. Partition the rows of `x` according to class labels in `y` and take the covariance of each group. Parameters ---------- x : array_like, shape (`n`, `dim`) The data to group, where `n` is the number of data points and `dim` is the dimensionality of each data point. y : array_like, shape (`n`,) The class label for each data point. c : int The number of components in `y`. Returns ------- cov : array_like The covariance of each group. Examples -------- >>> partitioned_cov() .. note:: Adapted from Matlab: | Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_. | Copyright (2010) Kevin Murphy and Matt Dunham | License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_ .. warning:: Implementation of this function is not finished yet. """ c = nunique(y) if c is None else c dim = x.shape[1] cov = np.zeros((c, dim, dim)) for i in range(c): cov[i] = np.cov(x[y == c])
5,354,682
def _fix_importname(mname): """ :param mname: """ mname = os.path.normpath(mname) mname = mname.replace(".", "") mname = mname.replace("-", "") mname = mname.replace("_", "") mname = mname.replace(os.path.sep, "") mname = mname.replace(os.path.pathsep, "") return mname
5,354,683
def main(args, out, err): """ This wraps GURepair's real main function so that we can handle exceptions and trigger our own exit commands. This is the entry point that should be used if you want to use this file as a module rather than as a script. """ cleanUpHandler = BatchCaller(args.verbose, out) gr_instance = GPURepairInstance(args, out, err, cleanUpHandler) def handleTiming(exitCode): if gr_instance.time: print(gr_instance.getTiming(exitCode), file = out) def doCleanUp(timing, exitCode): if timing: # We must call this before cleaning up globals # because it depends on them cleanUpHandler.register(handleTiming, exitCode) # We should call this last. cleanUpHandler.call() try: returnCode = gr_instance.invoke() except Exception: # Something went very wrong doCleanUp(timing = False, exitCode = 0) # It doesn't matter what the exitCode is raise doCleanUp(timing = True, exitCode = returnCode) # Do this outside try block so we don't call twice! return returnCode
5,354,684
def getSymbolData(symbol, sDate=(2000,1,1), adjust=False, verbose=True, dumpDest=None): """ get data from Yahoo finance and return pandas dataframe Parameters ----------- symbol : str Yahoo finanance symbol sDate : tuple , default (2000,1,1) start date (y,m,d) adjust : bool , default False use adjusted close values to correct OHLC. adj_close will be ommited verbose : bool , default True print output dumpDest : str, default None dump raw data for debugging Returns --------- DataFrame """ period1 = int(dt.datetime(*sDate).timestamp()) # convert to seconds since epoch period2 = int(dt.datetime.now().timestamp()) params = (symbol, period1, period2, _token['crumb']) url = "https://query1.finance.yahoo.com/v7/finance/download/{0}?period1={1}&period2={2}&interval=1d&events=history&crumb={3}".format(*params) data = requests.get(url, cookies={'B':_token['cookie']}) data.raise_for_status() # raise error in case of bad request if dumpDest is not None: fName = symbol+'_dump.csv' with open(os.path.join(dumpDest, fName),'w') as fid: fid.write(data.text) buf = io.StringIO(data.text) # create a buffer df = pd.read_csv(buf,index_col=0,parse_dates=True, na_values=['null']) # convert to pandas DataFrame # rename columns newNames = [c.lower().replace(' ','_') for c in df.columns] renames = dict(zip(df.columns,newNames)) df = df.rename(columns=renames) # remove duplicates df = df[~df.index.duplicated(keep='first')] if verbose: print(('Got %i days of data' % len(df))) if adjust: df = _adjust(df,removeOrig=True) return df
5,354,685
def generateLouvainCluster(edgeList): """ Louvain Clustering using igraph """ Gtmp = nx.Graph() Gtmp.add_weighted_edges_from(edgeList) W = nx.adjacency_matrix(Gtmp) W = W.todense() graph = Graph.Weighted_Adjacency( W.tolist(), mode=ADJ_UNDIRECTED, attr="weight", loops=False) # ignore the squiggly underline, not errors louvain_partition = graph.community_multilevel( weights=graph.es['weight'], return_levels=False) size = len(louvain_partition) hdict = {} count = 0 for i in range(size): tlist = louvain_partition[i] for j in range(len(tlist)): hdict[tlist[j]] = i count += 1 listResult = [] for i in range(count): listResult.append(hdict[i]) return listResult, size
5,354,686
def exprvars(name, *dims): """Return a multi-dimensional array of expression variables. The *name* argument is passed directly to the :func:`pyeda.boolalg.expr.exprvar` function, and may be either a ``str`` or tuple of ``str``. The variadic *dims* input is a sequence of dimension specs. A dimension spec is a two-tuple: (start index, stop index). If a dimension is given as a single ``int``, it will be converted to ``(0, stop)``. The dimension starts at index ``start``, and increments by one up to, but not including, ``stop``. This follows the Python slice convention. For example, to create a 4x4 array of expression variables:: >>> vs = exprvars('a', 4, 4) >>> vs farray([[a[0,0], a[0,1], a[0,2], a[0,3]], [a[1,0], a[1,1], a[1,2], a[1,3]], [a[2,0], a[2,1], a[2,2], a[2,3]], [a[3,0], a[3,1], a[3,2], a[3,3]]]) """ return _vars(Expression, name, *dims)
5,354,687
def cell_segmenter(im, thresh='otsu', radius=20.0, image_mode='phase', area_bounds=(0,1e7), ecc_bounds=(0, 1)): """ This function segments a given image via thresholding and returns a labeled segmentation mask. Parameters ---------- im : 2d-array Image to be segmented. This may be of either float or integer data type. thresh : int, float, or 'otsu' Value used during thresholding operation. This can either be a value ('int' or 'float') or 'otsu', the threshold value will be determined automatically using Otsu's thresholding method. radius : float Radius for gaussian blur for background subtractino. Default value is 20. image_mode : 'phase' or 'fluorescence' Mode of microsocopy used to capture the image. If 'phase', objects with intensity values *lower* than the provided threshold will be selected. If 'fluorescence', values *greater* than the provided threshold will be selected. Default value is 'phase'. area_bounds : tuple of ints. Range of areas of acceptable objects. This should be probided in units of square pixels. eec_bounds : tuple of floats Range of eccentricity values of acceptable objects. These values should range between 0.0 and 1.0. Returns ------- im_labeled : 2d-array, int Labeled segmentation mask. """ # Apply a median filter to remove hot pixels med_selem = skimage.morphology.square(3) im_filt = skimage.filters.median(im, selem=med_selem) # Perform gaussian subtraction im_sub = bg_subtract(im_filt, radius) # Determine the thresholding method if thresh is 'otsu': thresh = skimage.filters.threshold_otsu(im_sub) # Determine the image mode and apply threshold if image_mode is 'phase': im_thresh = im_sub < thresh elif image_mode is 'fluorescence': im_thresh = im_sub > thresh else: raise ValueError("Image mode not recognized. Must be 'phase'" + "or 'fluorescence'.") # Label the objects im_label = skimage.measure.label(im_thresh) # Apply the area and eccentricity bounds im_filt = area_ecc_filter(im_label, area_bounds, ecc_bounds) # Remove objects touching the border im_border = skimage.segmentation.clear_border(im_filt, buffer_size=5) # Relabel the image im_border = im_border > 0 im_label = skimage.measure.label(im_border) return im_label
5,354,688
def generate_report(start_date, end_date): """Generate the text report""" pgconn = get_dbconn('isuag', user='nobody') days = (end_date - start_date).days + 1 totalobs = days * 24 * 17 df = read_sql(""" SELECT station, count(*) from sm_hourly WHERE valid >= %s and valid < %s GROUP by station ORDER by station """, pgconn, params=(start_date, end_date + datetime.timedelta(days=1)), index_col='station') performance = min([100, df['count'].sum() / float(totalobs) * 100.]) return """ Iowa Environmental Mesonet Data Delivery Report =============================================== Dataset: ISU Soil Moisture Network Performance Period: %s thru %s Reported Performance: %.1f%% Reporting Platforms: %.0f Additional Details ================== Total Required Obs: %.0f (24 hourly obs x 17 platforms x %.0f days) Observations Delivered: %.0f Report Generated: %s .END """ % (start_date.strftime("%d %b %Y"), end_date.strftime("%d %b %Y"), performance, len(df.index), totalobs, days, df['count'].sum(), datetime.datetime.now().strftime("%d %B %Y %H:%M %p"))
5,354,689
def whole(eventfile,par_list,tbin_size,mode,ps_type,oversampling,xlims,vlines): """ Plot the entire power spectrum without any cuts to the data. eventfile - path to the event file. Will extract ObsID from this for the NICER files. par_list - A list of parameters we'd like to extract from the FITS file (e.g., from eventcl, PI_FAST, TIME, PI,) tbin_size - the size of the time bins (in seconds!) >> e.g., tbin_size = 2 means bin by 2s >> e.g., tbin_size = 0.05 means bin by 0.05s! mode - whether we want to show or save the plot. ps_type - obtain power spectrum through the periodogram method ('period') or the manual FFT way ('manual') or both ('both') oversampling - whether to perform oversampling. Array will consist of [True/False, oversampling factor] xlims - a list or array: first entry = True/False as to whether to impose an xlim; second and third entry correspond to the desired x-limits of the plot vlines - a list or array: first entry = True/False as to whether to draw a vertical line in the plot; second entry is the equation for the vertical line """ if type(eventfile) != str: raise TypeError("eventfile should be a string!") if 'TIME' not in par_list: raise ValueError("You should have 'TIME' in the parameter list!") if type(par_list) != list and type(par_list) != np.ndarray: raise TypeError("par_list should either be a list or an array!") if mode != 'show' and mode != 'save': raise ValueError("Mode should either be 'show' or 'save'!") if ps_type != 'period' and ps_type != 'manual' and ps_type != 'both': raise ValueError("ps_type should either be 'period' or 'show' or 'save'!") if type(oversampling) != list and type(oversampling) != np.ndarray: raise TypeError("oversampling should either be a list or an array!") if type(xlims) != list and type(xlims) != np.ndarray: raise TypeError("xlims should either be a list or an array!") if type(vlines) != list and type(vlines) != np.ndarray: raise TypeError("vlines should either be a list or an array!") parent_folder = str(pathlib.Path(eventfile).parent) data_dict = Lv0_fits2dict.fits2dict(eventfile,1,par_list) times = data_dict['TIME'] counts = np.ones(len(times)) shifted_t = times-times[0] t_bins = np.linspace(0,np.ceil(shifted_t[-1]),int(np.ceil(shifted_t[-1])*1/tbin_size+1)) summed_data, bin_edges, binnumber = stats.binned_statistic(shifted_t,counts,statistic='sum',bins=t_bins) #binning the time values in the data event_header = fits.open(eventfile)[1].header obj_name = event_header['OBJECT'] obsid = event_header['OBS_ID'] if ps_type == 'period': plt.figure() pdgm_f,pdgm_ps = Lv2_ps_method.pdgm(t_bins,summed_data,xlims,vlines,True,oversampling) plt.title('Power spectrum for ' + obj_name + ', ObsID: ' + str(obsid) + '\n Periodogram method' + '\n Includes whole time interval and energy range',fontsize=12) if mode == 'show': plt.show() elif mode == 'save': filename = 'ps_' + obsid + '_bin' + str(tbin_size) + 's_pdgm.pdf' plt.savefig(parent_folder+'/'+filename,dpi=900) plt.close() return pdgm_f, pdgm_ps if ps_type == 'manual': plt.figure() manual_f,manual_ps = Lv2_ps_method.manual(t_bins,summed_data,xlims,vlines,True,oversampling) plt.title('Power spectrum for ' + obj_name + ', ObsID ' + str(obsid) + '\n Manual FFT method' + '\n Includes whole time interval and energy range',fontsize=12) if mode == 'show': plt.show() elif mode == 'save': filename = 'ps_' + obsid + '_bin' + str(tbin_size) + 's_manual.pdf' plt.savefig(parent_folder+'/'+filename,dpi=900) plt.close() return manual_f, manual_ps if ps_type == 'both': pdgm_f,pdgm_ps = Lv2_ps_method.pdgm(t_bins,summed_data,xlims,vlines,False,oversampling) manual_f,manual_ps = Lv2_ps_method.manual(t_bins,summed_data,xlims,vlines,False,oversampling) fig, (ax1,ax2) = plt.subplots(2,1) fig.suptitle('Power spectra for ' + obj_name + ', ObsID ' + str(obsid) + '\n both periodogram and manual FFT method' + '\n Includes whole time interval and energy range' , fontsize=12) ax1.semilogy(pdgm_f,pdgm_ps,'b-')#/np.mean(pdgm_ps),'b-') #periodogram; arrays already truncated! ax1.set_xlabel('Hz',fontsize=12) ax1.set_ylabel('Normalized power spectrum',fontsize=10) ax2.semilogy(manual_f,manual_ps,'r-')#/np.mean(manual_ps),'r-') #manual FFT; arrays already truncated! ax2.set_xlabel('Hz',fontsize=12) ax2.set_ylabel('Normalized power spectrum',fontsize=10) if xlims[0] == True: ax1.set_xlim([xlims[1],xlims[2]]) ax2.set_xlim([xlims[1],xlims[2]]) if vlines[0] == True: ax1.axvline(x=vlines[1],color='k',alpha=0.5,lw=0.5) ax2.axvline(x=vlines[1],color='k',alpha=0.5,lw=0.5) ax2.axhline(y=2,color='k',alpha=0.3,lw=0.3) plt.subplots_adjust(hspace=0.2) if mode == 'show': plt.show() elif mode == 'save': filename = 'ps_' + obsid + '_bin' + str(tbin_size) + 's_both.pdf' plt.savefig(parent_folder+'/'+filename,dpi=900) plt.close() return pdgm_f, pdgm_ps, manual_f, manual_ps
5,354,690
def handle_postback(): """Handles a postback.""" # we need to set an Access-Control-Allow-Origin for use with the test AJAX postback sender # in normal operations this is NOT needed response.set_header('Access-Control-Allow-Origin', '*') args = request.json loan_id = args['request_token'] merchant_loan_id = args.get('merchant_transaction_id') action = args['updates'].get('action') if action == 'refund': # process a refund amount = args['updates']['amount'] return handle_refund(loan_id, amount) loan_status = args['updates']['status'] return handle_status_update(loan_id, loan_status)
5,354,691
def metadata_mogrifier(folder): """Utility function transforming metadata stored in txt file into a format usable by :py:class:`Metadata <livius.video.processing.jobs.meta.Metadata>` usable one. This is just an example reading 2 files in a JSON format and creating the appropriate metadata input for the :py:class:`Metadata <livius.video.processing.jobs.meta.Metadata>` class. It is possible to run directly this function by providing the location of the folder to parse, in the following way:: python -m livius.video.processing.jobs.meta some_folder """ import json list_videos = os.listdir(folder) for current_video in list_videos: dout = {} full_path = os.path.abspath(os.path.join(folder, current_video)) video_filename = current_video metadata_1 = os.path.join(full_path, video_filename + '.txt') # this one should exist: main metadata assert(os.path.exists(metadata_1)) if os.path.exists(metadata_1): with open(metadata_1) as f: d = json.load(f)['TalkDetail'] dout['title'] = d[1] dout['speaker'] = d[0] dout['date'] = d[3] segment_file = os.path.join(full_path, 'processing_%s_Time_Total_Seconds.json' % video_filename) if os.path.exists(segment_file): with open(segment_file) as f: d = json.load(f)['CuttingTimes'] if(d[0].lower() == 'yes'): dout['video_begin'] = d[1] dout['video_end'] = d[2] out_file = os.path.join(full_path, video_filename + '_metadata_input.json') with open(out_file, 'w') as f: json.dump(dout, f, indent=4)
5,354,692
def process_responses(response_queue, msg_in): """ Pulls responses off of the queue. """ log_name = '{0} :: {1}'.format(__name__, process_responses.__name__) logging.debug(log_name + ' - STARTING...') while 1: stream = '' # Block on the response queue try: res = response_queue.get(True) request_meta = rebuild_unpacked_request(res) except Exception: logging.error(log_name + ' - Could not get request meta') continue data = response_queue.get(True) while data: stream += data try: data = response_queue.get(True, timeout=1) except Empty: break try: data = eval(stream) except Exception as e: # Report a fraction of the failed response data directly in the # logger if len(unicode(stream)) > 2000: excerpt = stream[:1000] + ' ... ' + stream[-1000:] else: excerpt = stream logging.error(log_name + ' - Request failed. {0}\n\n' \ 'data excerpt: {1}'.format(e.message, excerpt)) # Format a response that will report on the failed request stream = "OrderedDict([('status', 'Request failed.'), " \ "('exception', '" + escape(unicode(e.message)) + "')," \ "('request', '" + escape(unicode(request_meta)) + "'), " \ "('data', '" + escape(unicode(stream)) + "')])" key_sig = build_key_signature(request_meta, hash_result=True) # Set request in list to "not alive" req_cb_flag_job_complete(key_sig, REQ_NCB_LOCK) logging.debug(log_name + ' - Setting data for {0}'.format( str(request_meta))) set_data(stream, request_meta) logging.debug(log_name + ' - SHUTTING DOWN...')
5,354,693
def selfcal_apcal(imc, trial='1'): """ Self Calibration. Apply the calibration from the phase only solution """ flagmanager(imc.vislf, mode='restore', versionname='startup') applycal(vis=imc.vislf, spwmap=np.zeros(54), interp='linearPDperobs', gaintable=[imc.path_base+'.pcal'+trial], calwt=False, flagbackup=False)
5,354,694
def get_bb_bev_from_obs(dict_obs, pixor_size=128): """Input dict_obs with (B,H,W,C), return (B,H,W,3)""" vh_clas = tf.squeeze(dict_obs['vh_clas'], axis=-1) # (B,H,W,1) # vh_clas = tf.gather(vh_clas, 0, axis=-1) # (B,H,W) vh_regr = dict_obs['vh_regr'] # (B,H,W,6) decoded_reg = decode_reg(vh_regr, pixor_size) # (B,H,W,8) lidar = dict_obs['lidar'] B = vh_regr.shape[0] images = [] for i in range(B): corners, _ = pixor_postprocess(vh_clas[i], decoded_reg[i]) # (N,4,2) image = get_bev(lidar, corners, pixor_size) # (H,W,3) images.append(image) images = tf.convert_to_tensor(images, dtype=np.uint8) # (B,H,W,3) return images
5,354,695
def get_hard_edges(obj): """ :param str obj: :returns: all hard edges from the given mesh in a flat list :rtype: list of str """ return [obj + '.e[' + str(i) + ']' for i, edgeInfo in enumerate(cmds.polyInfo(obj + '.e[*]', ev=True)) if edgeInfo.endswith('Hard\n')]
5,354,696
def make_system(l=70): """ Making and finalizing a kwant.builder object describing the system graph of a closed, one-dimensional wire with l number of sites. """ sys = kwant.Builder() lat = kwant.lattice.chain() sys[(lat(x) for x in range(l))] = onsite sys[lat.neighbors()] = hopping return sys.finalized()
5,354,697
def jsontabledump(f: TextIO, c: Tuple[str, Dict[str, Tuple[str, str]]], name: str) -> None: """ Dump table schema to the given file. :param f: File object to dump the table to. :param c: Table schema. :param name: Table name. """ f.write("{}\n---------\n".format(name)) f.write("\n" + c[0] + "\n") f.write("```\n") f.write('{}{}\n'.format(name, '{')) for key in c[1].keys(): f.write(' {:27}{} -- {}\n'.format('"' + key + '":', c[1][key][0], c[1][key][1])) f.write("{}\n```\n".format('}'))
5,354,698
def _gen_test_methods_for_rule( rule: Type[CstLintRule], fixture_dir: Path, rules_package: str ) -> TestCasePrecursor: """Aggregates all of the cases inside a single CstLintRule's VALID and INVALID attributes and maps them to altered names with a `test_` prefix so that 'unittest' can discover them later on and an index postfix so that individual tests can be selected from the command line. :param CstLintRule rule: :param Path fixture_dir: :param str rules_package: :returns: :rtype: TestCasePrecursor """ valid_tcs = {} invalid_tcs = {} requires_fixtures = False fixture_paths: Dict[str, Path] = {} fixture_subdir: Path = get_fixture_path(fixture_dir, rule.__module__, rules_package) if issubclass(rule, CstLintRule): if rule.requires_metadata_caches(): requires_fixtures = True if hasattr(rule, "VALID"): for idx, test_case in enumerate(getattr(rule, "VALID")): name = f"test_VALID_{idx}" valid_tcs[name] = test_case if requires_fixtures: fixture_paths[name] = fixture_subdir / f"{rule.__name__}_VALID_{idx}.json" if hasattr(rule, "INVALID"): for idx, test_case in enumerate(getattr(rule, "INVALID")): name = f"test_INVALID_{idx}" invalid_tcs[name] = test_case if requires_fixtures: fixture_paths[name] = fixture_subdir / f"{rule.__name__}_INVALID_{idx}.json" return TestCasePrecursor( rule=rule, test_methods={**valid_tcs, **invalid_tcs}, fixture_paths=fixture_paths, )
5,354,699