content
stringlengths
22
815k
id
int64
0
4.91M
def findx(mu, lnum): """Obtains the Hill sphere and x-coordinate for a mu-value and lnum.""" hill = (mu/3)**(1.0/3.0) if lnum == 1: #lnum is used to request one of the collinear Lagrange points. guess = 1 - mu - hill * (1 - (1.0/3.0) * hill - (1.0/9.0) * hill ** 2) elif lnum == 2: guess = 1 - mu + hill * (1 + (1.0/3.0) * hill - (1.0/9.0) * hill ** 2) elif lnum == 3: guess = -1 #I know this isn't the formal guess the Mission Handbook might prescribe, but it should suffice #as the L3 Lagrange point is the only collinear point with x < 0 else: return "Invalid" return optimize.fsolve(xroot, guess, mu, xtol = 0.0)[0], hill
5,348,000
def plot_op_states( df: pd.DataFrame, agent0: str, agent1: str, state: str, level: int = 0, agent: int = 0, show: bool = True, ): """ df (ResultsDf): an outcome from the compete() function agent0 (str): an agent name in the agent0 column in the df agent1 (str): an agent name in the agent1 column in the df agent (0|1): An indicate of which agent of agent 0 and 1 you wish to plot the indicated agent must be a theory of mind agent (ToM) state (str): a state of the simulated opponent you wish to plot. level (str): level of the similated opponent you wish to plot. show (bool, optional): Should plt.show be run at the end. Defaults to True. """ plot_history( df, agent0, agent1, state="p_op", agent=agent, fun=lambda x: x["internal_states"]["opponent_states"][level]["own_states"][ state ], show=show, )
5,348,001
def _f1_div_ ( self , other ) : """Operator for ``1D-function / other''""" return _f1_op_ ( self , other , Ostap.MoreRooFit.Division , "Divide_" )
5,348,002
def test_interrupted_late_wait(): """Test we can interrupt the wait during the timeout period. """ called = 0 def cond(): nonlocal called called += 1 if called == 3: return True job = InstrJob(cond, 0) assert not job.wait_for_completion(lambda: True, refresh_time=0.1) assert called == 2
5,348,003
def set_prior_6(para): """ set prior before the first data came in doc details to be added """ n_shape = para['n_shape'] log_prob = [ [] for i_shape in range(n_shape) ] delta_mean = [ [] for i_shape in range(n_shape) ] delta_var = [ [] for i_shape in range(n_shape) ] time_since_last_cp = [ [] for i_shape in range(n_shape) ] return log_prob, delta_mean, delta_var, time_since_last_cp
5,348,004
def inf_set_af2(*args): """ inf_set_af2(_v) -> bool """ return _ida_ida.inf_set_af2(*args)
5,348,005
def showOverlapTable(modes_x, modes_y, **kwargs): """Show overlap table using :func:`~matplotlib.pyplot.pcolor`. *modes_x* and *modes_y* are sets of normal modes, and correspond to x and y axes of the plot. Note that mode indices are incremented by **1**. List of modes is assumed to contain a set of contiguous modes from the same model. Default arguments for :func:`~matplotlib.pyplot.pcolor`: * ``cmap='jet'`` * ``norm=matplotlib.colors.Normalize(0, 1)``""" import matplotlib.pyplot as plt import matplotlib if isinstance(modes_x, np.ndarray): num_modes_x = modes_x.shape[1] else: num_modes_x = modes_x.numModes() if isinstance(modes_y, np.ndarray): num_modes_y = modes_y.shape[1] else: num_modes_y = modes_y.numModes() overlap = abs(calcOverlap(modes_y, modes_x)) if overlap.ndim == 0: overlap = np.array([[overlap]]) elif overlap.ndim == 1: overlap = overlap.reshape((num_modes_y, num_modes_x)) cmap = kwargs.pop('cmap', 'jet') norm = kwargs.pop('norm', matplotlib.colors.Normalize(0, 1)) if SETTINGS['auto_show']: plt.figure() x_range = np.arange(1, num_modes_x+1) if isinstance(modes_x, ModeSet): x_ticklabels = modes_x._indices+1 else: x_ticklabels = x_range x_ticklabels = kwargs.pop('xticklabels', x_ticklabels) y_range = np.arange(1, num_modes_y+1) if isinstance(modes_y, ModeSet): y_ticklabels = modes_y._indices+1 else: y_ticklabels = y_range y_ticklabels = kwargs.pop('yticklabels', y_ticklabels) if not isinstance(modes_x, np.ndarray): xlabel = str(modes_x) else: xlabel = '' xlabel = kwargs.pop('xlabel', xlabel) if not isinstance(modes_y, np.ndarray): ylabel = str(modes_y) else: ylabel = '' ylabel = kwargs.pop('ylabel', ylabel) allticks = kwargs.pop('allticks', True) show = showMatrix(overlap, cmap=cmap, norm=norm, xticklabels=x_ticklabels, yticklabels=y_ticklabels, allticks=allticks, **kwargs) plt.xlabel(xlabel) plt.ylabel(ylabel) if SETTINGS['auto_show']: showFigure() return show
5,348,006
def initdb(ctx): """Initialize database, dropping all tables""" global STORAGE app = ctx.obj if STORAGE: ctx.fail("Database already initialized.") db.create_all() if not app.config.get('USE_SQLITE'): from alembic import command command.stamp(ALEMBIC_CONFIG, 'head') STORAGE = Storage.new_storage()
5,348,007
def json_find_matches_dataframe(df, filter_path, reverse_selectivity=False): """Iteratively filters a pandas.DataFrame df using the same sort of filter_path used by json_extract. Because of the tabular nature of pandas DataFrames, filters are treated as being either 'down' or 'check'; a filter either refines both the rows and columns returned (essentially a 'down' action) or refines only the rows returned (essentially a 'check' action).""" import pandas as pd for layer in filter_path: if isinstance(layer, str): if layer == "!!": reverse_selectivity = not reverse_selectivity continue rows = pd.Series([True] * df.shape[0]) for filt in layer: new_rows, new_cols = filt.filter_dataframe(df) rows &= new_rows if filt.action != "check": cols = new_cols else: cols = df.columns df = df.loc[rows, cols] return df
5,348,008
def contrast(arr, amount=0.2, split=0.5, normalize=True): """ General contrast booster or diffuser of normalized array-like data. Parameters ---------- arr : ndarray Input array (of floats on range [0, 1] if ``normalize=False``). If values exist outside this range, with ``normalize=True`` the image will be normalized for calculation. amount : float or length-2 iterable of floats Controls the exponential contrast mechanism for values above and below ``split`` in ``I``. If positive, the curve provides added contrast; if negative, the curve provides reduced contrast. If provided as a lenth-2 iterable of floats, they control the regions (below, above) ``split`` separately. split : float Positive scalar, on range [0, 1], determining the midpoint of the exponential contrast. Default of 0.5 is reasonable for well-exposed images. normalize : bool, default True Controls normalization to the range [0, 1]. Returns ------- focused : ndarray Contrast adjusted, normalized, floating-point image on range [0, 1]. Notes ----- The result of this algorithm is like applying a Curves adjustment in the GIMP or Photoshop. Algorithm for curves adjustment at a given pixel, x, is given by:: | split * (x/split)^below, 0 <= x <= split y(x) = | | 1 - (1-split) * ((1-x) / (1-split))^above, split < x <= 1.0 See Also -------- skfuzzy.fuzzymath.sigmoid """ # Ensure scalars are floats, to avoid truncating division in Python 2.x split = float(split) im = arr.astype(float) amount_ = np.asarray(amount, dtype=np.float64).ravel() if len(amount_) == 1: # One argument -> Equal amount applied on either side of `split` above = below = amount_[0] else: # Two arguments -> Control contrast separately in light/dark regions below = amount_[0] above = amount_[1] # Normalize if required if im.max() > 1. and normalize is True: ma = float(im.max()) im /= float(im.max()) else: ma = 1. focused = np.zeros_like(im, dtype=np.float64) # Simplified array-wise algorithm using fancy indexing rather than looping focused[im <= split] = split * (im[im <= split] / split) ** below focused[im > split] = (1 - (1. - split) * ((1 - im[im > split]) / (1. - split)) ** above) # Reapply multiplicative factor return focused * ma
5,348,009
def get_group(items, total_groups, group_id): """ Get the items from the passed in group based on group size. """ if not 0 < group_id <= total_groups: raise ValueError("Invalid test-group argument") start, size = get_group_size_and_start(len(items), total_groups, group_id) selected = items[start : start + size] deselected = items[:start] + items[start + size :] assert len(selected) + len(deselected) == len(items) return selected, deselected
5,348,010
def read_nq_entry(entry, is_training): """ Converts a NQ entry into a list of NqExamples. :param entry: dict :param is_training: bool :return: list[NqExample] """ def is_whitespace(c): return c in " \t\r\n" or ord(c) == 0x202F examples = [] contexts_id = entry["id"] contexts = entry["contexts"] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in contexts: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) questions = [] for i, question in enumerate(entry["questions"]): qas_id = "{}".format(contexts_id) question_text = question["input_text"] start_position = None end_position = None answer = None if is_training: answer_dict = entry["answers"][i] answer = make_nq_answer(contexts, answer_dict) # For now, only handle extractive, yes, and no. if answer is None or answer.offset is None: continue start_position = char_to_word_offset[answer.offset] end_position = char_to_word_offset[answer.offset + len(answer.text) - 1] # Only add answers where the text can be exactly recovered from the # document. If this CAN'T happen it's likely due to weird Unicode # stuff so we will just skip the example. # # Note that this means for training mode, every example is NOT # guaranteed to be preserved. actual_text = " ".join(doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( tokenization.whitespace_tokenize(answer.text)) if actual_text.find(cleaned_answer_text) == -1: logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue questions.append(question_text) example = NqExample( example_id=int(contexts_id), qas_id=qas_id, questions=questions[:], doc_tokens=doc_tokens, doc_tokens_map=entry.get("contexts_map", None), answer=answer, start_position=start_position, end_position=end_position) examples.append(example) return examples
5,348,011
def main(): """ Main """ run_module()
5,348,012
def calc_atoms(psi, vol_elem=1.0): """Calculate the total number of atoms. Parameters ---------- psi : :obj:`list` of 2D NumPy :obj:`array` or PyTorch :obj:`Tensor` The input spinor wavefunction. vol_elem : :obj:`float` 2D volume element of the space. Returns ------- atom_num : :obj:`float` The total atom number in both spin components. """ pops = calc_pops(psi, vol_elem=vol_elem) atom_num = sum(pops) return atom_num
5,348,013
def get_tcp_packet_payload_len(pkt: dpkt.ethernet.Ethernet) -> int: """ Return the length of only payload without options :param pkt: dpkt.ethernet.Ethernet packet containing TCP header :return: int """ if isinstance(pkt, dpkt.ethernet.Ethernet): ip = pkt.data elif isinstance(pkt, dpkt.ip.IP): ip = pkt else: return None return ip.len - (ip.hl * 4 + ip.data.off * 4)
5,348,014
def overviewUsage(err=''): """ default overview information highlighting active scripts""" m = '%s\n' %err m += ' The following scripts allow you to manage Team Branches (TmB) on SalesForce.\n' m += ' Use one of the scripts below to meet your needs.\n' m += ' \n' m += ' 1. First link Task Branches to Team Branches \n' m += ' teamaddbranch -s4.1 -n<RTL|SI|Timing> -t<Team_branch> -b<branch_Name> \n' m += ' \n' m += ' 2. List Task Branches linked to a Team Branches \n' m += ' teamaddbranch -s4.1 -n<RTL|SI|Timing> -t<Team_branch> -b<branch_Name> -d \n' m += ' \n' m += ' 3. First link Task Branches to Team Branches \n' m += ' teamaddbranch -s4.1 -n<RTL|SI|Timing> -t<Team_branch> -b<branch_Name> -p <low|medium|high|urgent|critical> \n' m += ' \n' return m
5,348,015
def min_distance(z_i, z_j, sc_size): """Calculates the minimum distance between the particle at ``z_i`` and all of the images of the particle at ``z_j``, including this. The minimum distance is always less than half of the size of the simulation supercell ``sc_size``. :param z_i: :param z_j: :param sc_size: :return: """ sc_half = 0.5 * sc_size z_ij = z_i - z_j if fabs(z_ij) > sc_half: # Take the image. return -sc_half + (z_ij + sc_half) % sc_size return z_ij
5,348,016
def prf(gold: str, pred: str, dic) -> tuple: """ 计算P、R、F1 :param gold: 标准答案文件,比如“商品 和 服务” :param pred: 分词结果文件,比如“商品 和服 务” :param dic: 词典 :return: (P, R, F1, OOV_R, IV_R) """ A_size, B_size, A_cap_B_size, OOV, IV, OOV_R, IV_R = 0, 0, 0, 0, 0, 0, 0 with open(gold,encoding='utf8') as gd, open(pred,encoding='utf8') as pd: for g, p in zip(gd, pd): A, B = set(to_region(g)), set(to_region(p)) A_size += len(A) B_size += len(B) A_cap_B_size += len(A & B) text = re.sub("\\s+", "", g) for (start, end) in A: word = text[start: end] if dic.containsKey(word): IV += 1 else: OOV += 1 for (start, end) in A & B: word = text[start: end] if dic.containsKey(word): IV_R += 1 else: OOV_R += 1 p, r = A_cap_B_size / B_size * 100, A_cap_B_size / A_size * 100 return p, r, 2 * p * r / (p + r), OOV_R / OOV * 100, IV_R / IV * 100
5,348,017
def lorem(): """Returns some sample latin text to use for prototyping.""" return """ Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. """
5,348,018
def read_gene2species(* filenames): """ Reads a gene2species file Returns a function that will map gene names to species names. """ for filename in filenames: maps = [] for filename in filenames: maps.extend(util.read_delim(util.skip_comments( util.open_stream(filename)))) return make_gene2species(maps)
5,348,019
def ingest( token: str, endpoint: str, method: str = "GET", time_zone: str = "Asia/Tokyo", params: Dict[Any, Any] = {}, data: Dict[Any, Any] = {}, ) -> Optional[Dict[Any, Any]]: """情報を取得する """ url = path.join(GRAPH_ENDPOINT, endpoint) headers = { "Authorization": f"Bearer {token}", "Prefer": f'outlook.timezone="{time_zone}"', } response = None if method == "GET": response = requests.get(url, headers=headers, params=params) elif method == "POST": response = requests.post(url, headers=headers, params=params, json=data) else: logger.error(f"{method=}は対応していない形式です") raise RuntimeError() if response.ok: return response.json() else: status = response.status_code if status == NOT_FOUND: logger.warning("一致する情報が見つかりませんでした") return {} else: logger.error(f"リクエストは無効です:{status=}") response.raise_for_status()
5,348,020
def make_list_table(headers, data, title='', columns=None): """Build a list-table directive. :param headers: List of header values. :param data: Iterable of row data, yielding lists or tuples with rows. :param title: Optional text to show as the table title. :param columns: Optional widths for the columns. """ results = [] add = results.append add('.. list-table:: %s' % title) add(' :header-rows: 1') if columns: add(' :widths: %s' % (','.join(str(c) for c in columns))) add('') add(' - * %s' % headers[0]) for h in headers[1:]: add(' * %s' % h) for row in data: add(' - * %s' % row[0]) for r in row[1:]: add(' * %s' % r) add('') return '\n'.join(results)
5,348,021
def toss_unbaised(): """ toss 2 times: assign 0-1 = 0 assign 1-0 = 1 discard 0-0 and 1-1 """ while True: first, second = toss_biased(), toss_biased() if first == 0 and second == 1: return 0 if first == 1 and second == 0: return 1
5,348,022
def split_val_condition(input_string): """ Split and return a {'value': v, 'condition': c} dict for the value and the condition. Condition is empty if no condition was found. @param input A string of the form XXX @ YYYY """ try: (value, condition) = [x.strip() for x in input_string.split('@')] return {'value': value, 'condition': condition} except ValueError: # no condition was found return {'value': input_string.strip(), 'condition': None}
5,348,023
def autolabel(rects,ax,labels): """ Attach a text label above each bar displaying its height """ for i,rect in enumerate(rects): height = rect.get_height() # pdb.set_trace() ax.text(rect.get_x() + rect.get_width()/2., 1.01*height, labels[i], ha='center', va='bottom')
5,348,024
def while_octagon(): """printing shape of'octagon' using while loop""" i=0 while i<7: j=0 while j<7: if j in(0,6) and i in(2,3,4) or i in(0,6) and j in(2,3,4) or j in(1,5) and i in(1,5) : print("*",end=" ") else: print(" ",end=" ") j+=1 print() i+=1
5,348,025
def shap_scatterplot( sklearn_model: BaseEstimator, X_explain: pd.DataFrame, feature_labels: dict, feature: str = "bac_guess", moderator: Sequence[str] = "episode", output_folder: str = "/mnt/data/figures/shap" ) -> None: """Partial Dependence Plot for SHAP Args: sklearn_model (BaseEstimator): e.g., lightgbm X_explain (pd.DataFrame): feature set feature_labels (dict): maps feature_names onto feature_labels for plotting feature (str): The main feature to scatterplot output_folder (str): prefix for storing plots & parquets """ # Exclude missing data, which distorts visualization mask = X_explain[feature] > -999 X_explain = X_explain.loc[mask] # Compute SHAP values shap_values = shap.TreeExplainer(sklearn_model).shap_values(X_explain) if isinstance(shap_values, list): # Some output a list for each class shap_values = shap_values[1] columns = X_explain.columns.tolist() if feature not in columns: raise ValueError(f"{feature} is not a column in the given feature df.") formatter_params = {'xtick.labelsize': 8, 'ytick.labelsize': 8} plt.rcParams.update(formatter_params) for mod in moderator: ax = shap.dependence_plot(feature, shap_values, X_explain, interaction_index=mod, dot_size=2) if (feature == "bac_guess") or (feature == "bac_cumulative_avg"): plt.axvspan(.06, .10, alpha=.10, color='grey') plt.axvspan(.04, .12, alpha=.10, color='grey') plt.gcf().set_size_inches(6, 3) flabel = feature_labels[feature]["label"] plt.xlabel(flabel, fontsize=10) plt.ylabel(f"SHAP Value for {flabel}", fontsize=10) # cbarlabel = feature_labels[mod]["label"] # cbar = plt.colorbar() # cbar.ax.tick_params(labelsize=7) #plt.colorbar().set_label(label=cbarlabel, fontsize=10) # # Hack to change fontsize on the legend/colorbar # cax = plt.gcf().axes[-1] # cax.tick_params(labelsize=8) # # Hack to change fontsize of the legend label # plt.gcf().figure.axes[-1].yaxis.label.set_size(10)# size of legend label plt.tight_layout() plt.savefig(f"{output_folder}/shap_scatterplot_{feature}_by_{mod}.pdf", bbox_inches="tight") plt.close()
5,348,026
def shimenreservoir_operation_rule_lower_limit(): """ Real Name: ShiMenReservoir Operation Rule Lower Limit Original Eqn: WITH LOOKUP ( Date, ([(1,190)-(366,250)],(1,240),(32,240),(152,220),(182,220),(244,225),(335,240),(365,\ 240) )) Units: m Limits: (None, None) Type: component """ return functions.lookup(date(), [1, 32, 152, 182, 244, 335, 365], [240, 240, 220, 220, 225, 240, 240])
5,348,027
def pre_process(dd, df, dataset_len, batch_size): """Partition one dataframe to multiple small dataframes based on a given batch size.""" df = dd.str2ascii(df, dataset_len) prev_chunk_offset = 0 partitioned_dfs = [] while prev_chunk_offset < dataset_len: curr_chunk_offset = prev_chunk_offset + batch_size chunk = df.iloc[prev_chunk_offset:curr_chunk_offset:1] partitioned_dfs.append(chunk) prev_chunk_offset = curr_chunk_offset return partitioned_dfs
5,348,028
def after_hypothesis_control(context: Hypothesis, state: Dict[str, Any], **kwargs): """ Finishes the span created when the steady-state hypothesis began """ tracer = opentracing.global_tracer() scope = tracer.scope_manager.active span = scope.span try: if not span: return deviated = not state.get("steady_state_met") span.set_tag("deviated", deviated) if deviated and "probes" in state: deviated_probe = state["probes"][-1] span.set_tag("error", True) _log_kv( { "probe": deviated_probe["activity"]["name"], "expected": deviated_probe["activity"]["tolerance"], "computed": deviated_probe["output"], }, tracer, span, ) finally: scope.close()
5,348,029
def fromisoformat(s): """ Hacky way to recover a datetime from an isoformat() string Python 3.7 implements datetime.fromisoformat() which is the proper way There are many other 3rd party modules out there, but should be good enough for testing """ return datetime(*map(int, re.findall('\d+', s)))
5,348,030
def non_repeat(a, decimals=12): """ Функция возвращает матрицу А с различными строками. """ a = np.ascontiguousarray(a) a = np.around(a, decimals = int(decimals)) _, index = np.unique(a.view([('', a.dtype)]*a.shape[1]), return_index=True) index = sorted(index) return a[index]
5,348,031
def softmax_with_cross_entropy(predictions, target_index): """ Computes softmax and cross-entropy loss for model predictions, including the gradient Arguments: predictions, np array, shape is either (N) or (batch_size, N) - classifier output target_index: np array of int, shape is (1) or (batch_size) - index of the true class for given sample(s) Returns: loss, single value - cross-entropy loss dprediction, np array same shape as predictions - gradient of predictions by loss value """ is_batch = predictions.ndim == 2 probs = softmax(predictions) loss = cross_entropy_loss(probs, target_index) dprediction = probs if is_batch: batch_size = target_index.size i = np.arange(batch_size) dprediction[i, target_index] -= 1 dprediction /= batch_size else: dprediction[target_index] -= 1 return loss, dprediction
5,348,032
def scraper_main_olx(url): """ Reads pages with offers from OLX and provides URLS to said offers. """ def __create_url_olx(offs_ids, prefix="https://www.olx.pl"): """ Method creates an olx offer link from parts read from a main page. """ return [ "/".join([ prefix, "oferta", "CID3-ID" + o_id + ".html" ]) for o_id in offs_ids ] # Loading the page page = get_page(url) # Reading the offers' ids offers_ids = [ re.search("[^_]*$", off.attrib["class"]).group()[2:] for off in page.element("table[id=offers_table] table[summary=Ogłoszenie]") ] return { "url": url, "offers_urls": __create_url_olx(offers_ids) }
5,348,033
async def test_user_ignore( hass: HomeAssistant, vizio_connect: pytest.fixture, vizio_bypass_setup: pytest.fixture, ) -> None: """Test user config flow doesn't throw an error when there's an existing ignored source.""" entry = MockConfigEntry( domain=DOMAIN, data=MOCK_SPEAKER_CONFIG, options={CONF_VOLUME_STEP: VOLUME_STEP}, source=SOURCE_IGNORE, ) entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data=MOCK_SPEAKER_CONFIG ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
5,348,034
def process_sha1(dataset_infos): """Computes the SHA-1 hash of the datasets. Removes the datasets for which the SHA-1 hash is already in the database. N.B.: a dataset for which the SHA-1 hash is not in the database represents a new dataset version. :param datasets_infos: A list of DatasetInfos containing to path to the dataset needing a SHA-1 hash verification, and the previous SHA-1 hashes. :return: A list of DatasetInfos for which the SHA-1 hashes are not in the database. """ if not isinstance(dataset_infos, DatasetInfos): raise TypeError("Datasets infos must be a valid DatasetInfos list.") sha1_hash = sha1() path_to_dataset = dataset_infos.zip_path previous_sha1_hashes = dataset_infos.previous_sha1_hashes try: with open(path_to_dataset, "rb") as f: while data := f.read(DATA_CHUNK_BYTE_SIZE): sha1_hash.update(data) except OSError: pass sha1_hash = sha1_hash.hexdigest() if sha1_hash not in previous_sha1_hashes: dataset_infos.sha1_hash = sha1_hash else: print( f"SHA-1 hash {sha1_hash} already exists for {path_to_dataset}, dataset discarded\n", file=sys.stderr, ) return dataset_infos
5,348,035
def accuracy( output: torch.Tensor, target: torch.tensor, topk: Tuple[int] = ( 1, )) -> List[float]: """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res
5,348,036
def _make_unique(key, val): """ Make a tuple of key, value that is guaranteed hashable and should be unique per value :param key: Key of tuple :param val: Value of tuple :return: Unique key tuple """ if type(val).__hash__ is None: val = str(val) return key, val
5,348,037
def replace_caps(x): """Replace all Capitalized tokens in `x` by their lower version and add `TK_MAJ` before.""" res = [] for t in x: if t == '': continue if t[0].isupper(): if len(t) == 1 and t[0] == 'I': res.append(TK_MAJ) if len(t) > 1 and (t[1:].islower() or (t[1] == "’" or t[1] == "'")): res.append(TK_MAJ) res.append(t.lower()) return res
5,348,038
def logout(): """ Deletes the .pypirc file with your login info. Requires you to login again before uploading another package. """ # delete the .pypirc file for better security home = os.path.expanduser("~") path = os.path.join(home, ".pypirc") os.remove(path) print("logged out (.pypirc file removed)")
5,348,039
def computes_ts_coverage(k, outputs, two_symbols): """ Computes the input coverage by Two Symbol schematas. Args: k (int): the number of inputs. outpus (list): the list of transition outputs. two_symbols (list): The final list of Two Symbol permutable schematas. This is returned by `find_two_symbols`. Returns: coverage (dict): a dictionary of coverage where keys are inputs states and values are lists of the Two Symbols covering that input. """ ts_coverage = {} for statenum in range(2**k): binstate = statenum_to_binstate(statenum, base=k) ts_coverage[binstate] = covering_twosymbols = [] output = int(outputs[statenum]) if output == 2: output = [0, 1] else: output = [int(outputs[statenum])] for t in output: for implicant, permut_indxs, same_symbols_indxs in two_symbols[t]: if __ts_covers(implicant, permut_indxs, binstate): covering_twosymbols.append((implicant, permut_indxs, same_symbols_indxs)) # return ts_coverage
5,348,040
def parse_NETU( header: atop_helpers.Header, record: atop_helpers.Record, sstat: atop_helpers.SStat, tstats: list[atop_helpers.TStat], ) -> dict: """Retrieves statistics for Atop 'NET' parseable representing network usage on upper interfaces.""" values = { 'timestamp': record.curtime, 'interval': record.interval, 'name': 'upper', 'tcp_pkt_received': sstat.net.tcp.InSegs, 'tcp_pkt_transmitted': sstat.net.tcp.OutSegs, 'udp_pkt_received': sstat.net.udpv4.InDatagrams + sstat.net.udpv6.Udp6InDatagrams, 'udp_pkt_transmitted': sstat.net.udpv4.OutDatagrams + sstat.net.udpv6.Udp6OutDatagrams, 'ip_pkt_received': sstat.net.ipv4.InReceives + sstat.net.ipv6.Ip6InReceives, 'ip_pkt_transmitted': sstat.net.ipv4.OutRequests + sstat.net.ipv6.Ip6OutRequests, 'ip_pkt_delivered': sstat.net.ipv4.InDelivers + sstat.net.ipv6.Ip6InDelivers, 'ip_pkt_forwarded': sstat.net.ipv4.ForwDatagrams + sstat.net.ipv6.Ip6OutForwDatagrams, } yield values
5,348,041
def create_measurements(nh, nv, offset, measurement_type): """Creates necessary measurement details for a given type on a given lattice. Given the lattice size, whether odd or even pairs are being measured, and the measurement type, this function returns a namedtuple with the pairs of qubits to be measured, the circuit preparation function and the measurement_type to be passed to the analysis function. The measurement_type can be: "onsite", "horiz", "vert", "vert0", "vert1" Args: nh -- number of horizontal sites nv -- number of vertical sites offset -- offset taking care of odd vs even pairing measurement_type -- onsite, horizontal or vertical measurement Returns: Measurements namedtuple with measurement (pairs, preparation circuit, analysis type) """ n = nh * nv if measurement_type == "onsite": pairs = [(i, i+n) for i in range(n)] prep = None if measurement_type == "horiz": pairs = [(i+j, i+j+1) for i in range(0, 2*n, nh) for j in range(offset,nh-1,2)] prep = prepH if measurement_type == "vert": pairst = [(i*nh+j, (i+1)*nh+j) for i in range(offset, nv-1, 2) for j in range(nh)] pairst += [(i*nh+j+n, (i+1)*nh+j+n) for i in range(offset, nv-1, 2) for j in range(0, nh)] pairs = [ (map_site_to_JW(nh, nv, site1), map_site_to_JW(nh, nv, site2)) for (site1, site2) in pairst] prep = prepV if measurement_type == "vert0": pairs = [(i+j, i+j+1) for i in range(0, 2*n, n) for j in range(1,n-1,2)] prep = prepV if measurement_type == "vert1": pairs = [(i+j, i+j+1) for i in range(0, 2*n, n) for j in range(1,n-1,2)] prep = prepV2wrap(nh, nv) print(f"Prepped {measurement_type}, pairs={pairs}") return Measurements(pairs=pairs, prep=prep, analysis=measurement_type)
5,348,042
def headline( in_string, surround = False, width = 72, nr_spaces = 2, spacesym = ' ', char = '=', border = None, uppercase = True, ): """return in_string capitalized, spaced and sandwiched: ============================== T E S T =============================== Parameters are the following: * char (one-letter string, default='='): changes the character the title is put between. * surround (boolean, default=False): adds additional lines above and under in_string: ==================================================== ==================== T E S T ===================== ==================================================== * width (int, default=72): defines the width of each line. * nr_spaces (int, default=2): defines number of nr_spaces between in_string and the char as indicated in ..====__T I T L E__====.. . * spacesym (one-letter string, default=' '): instead of using a whitespace to seperate the 'title' letters, one can use every other character, e.g. '_'. * border (either string or list/tuple of two strings; defaults to char): If this is a single character string, it will be used at the left and right end of the headline. If this is multiple character string, it will be used at the left and mirrored at the right. This way you can easily introduce additional space if you prefer and use, for example c style like inline comments with border="/*". If this is not enough for you, the left and right borders can be given seperately, like in border=("<!--", "-->") * uppercase (boolean, default=True): if True, headline will capitalize the letters given by in_string. if False, in_string will be used as it is given. """ if isinstance(border, tuple) or isinstance(border, list): left_border = border[0] right_border = border[1] else: if border is None: border = char left_border = border right_border = border[::-1] nr_sym_spaces = len(left_border + right_border) headline_text = spacesym.join( l.upper() if uppercase else l for l in in_string ) headline_text_sandwiched = '{:{}^{}}'.format( headline_text, spacesym, 2 * (len(in_string) + nr_spaces) - 1 ) headline_without_sym = '{:{}^{}}'.format( headline_text_sandwiched, char, width - nr_sym_spaces ) headline_full = '{1}{0}{2}'.format( headline_without_sym, left_border, right_border ) if surround: line = '{1}{0}{2}'.format( (width - nr_sym_spaces) * char, left_border, right_border ) output = line + '\n' + headline_full + '\n' + line else: output = headline_full return output
5,348,043
def do_performance_metric_get_os_and_kernel(cs, args): """Gets os and kernel information.""" if not args.server_id: raise exceptions.CommandError("you need specify a server_id") performance_metric = \ cs.performance_metrics.get_os_and_kernel(args.server_id) if isinstance(performance_metric, dict): utils.print_dict(performance_metric) else: utils.print_dict(performance_metric._info)
5,348,044
def rainbow_cmd(bot, trigger): """Make text colored. Options are "rainbow", "usa", "commie", and "spooky".""" text = clean(trigger.group(2) or '') scheme = trigger.group(1).lower() if not text: try: msg = SCHEME_ERRORS[scheme] except KeyError: msg = "How did you do that?!" bot.reply(msg) return module.NOLIMIT try: colors = COLOR_SCHEMES[scheme] except KeyError: # not possible to reach this at time of writing, but who knows? # mistakes happen when updating stuff that needs to be changed in parallel bot.reply("I don't know what color sequence to use for '{}'!".format(scheme)) return module.NOLIMIT color_cycle = itertools.cycle(colors) bot.say( ''.join( char if unicodedata.category(char) == 'Zs' else formatting.color(char, next(color_cycle)) for char in text ) )
5,348,045
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True, errors='ignore', separator='&', cls=None): """Parse a querystring and return it as :class:`MultiDict`. Per default only values are decoded into unicode strings. If `decode_keys` is set to `True` the same will happen for keys. Per default a missing value for a key will default to an empty key. If you don't want that behavior you can set `include_empty` to `False`. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a `HTTPUnicodeError` is raised. .. versionchanged:: 0.5 In previous versions ";" and "&" could be used for url decoding. This changed in 0.5 where only "&" is supported. If you want to use ";" instead a different `separator` can be provided. The `cls` parameter was added. :param s: a string with the query string to decode. :param charset: the charset of the query string. :param decode_keys: set to `True` if you want the keys to be decoded as well. :param include_empty: Set to `False` if you don't want empty values to appear in the dict. :param errors: the decoding error behavior. :param separator: the pair separator to be used, defaults to ``&`` :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used. """ if cls is None: cls = MultiDict result = [] for pair in str(s).split(separator): if not pair: continue if '=' in pair: key, value = pair.split('=', 1) else: key = pair value = '' key = _unquote_plus(key) if decode_keys: key = _decode_unicode(key, charset, errors) result.append((key, url_unquote_plus(value, charset, errors))) return cls(result)
5,348,046
def special_loader(as_type: type) -> Type[FullLoader]: """Construct new loader class supporting current class structure""" class TypedLoader(FullLoader): # pylint: disable=too-many-ancestors """Custom loader with typed resolver""" ... _add_path_resolvers(as_type, TypedLoader) # we need to add resolver only to the root typed item return TypedLoader
5,348,047
def test_invalid_subsample_ratio_warning(): """Assert that the TPOT intitializes raises a ValueError when subsample ratio is not in the range (0.0, 1.0].""" # Invalid ratio tpot_obj = TPOTClassifier(subsample=0.0) assert_raises(ValueError, tpot_obj._fit_init) # Valid ratio TPOTClassifier(subsample=0.1)
5,348,048
def listVotes(bot, trigger): """Listar las propuestas hechas.""" plugins_info = bot.db.get_plugin_value(PLUGIN_INFO, "list", []) if (plugins_info != []): plugins_info = json.loads(plugins_info) id = 0 for voteName in plugins_info: id += 1 vote_info = json.loads(bot.db.get_plugin_value(PLUGIN_NAME, voteName)) id_str = "%d" % (id) if (vote_info["type"] == "single"): positiveVotes_str = "%d" % (vote_info["positiveVotes"]) negativeVotes_str = "%d" % (vote_info["negativeVotes"]) bot.say(VOTE_FORMAT_SINGLE % { "id" : id_str, "voteName" : voteName, "proposer" : vote_info["proposer"], "created" : vote_info["created"], "positiveVotes" : positiveVotes_str, "negativeVotes" : negativeVotes_str, "note" : vote_info["note"] }) else: options = [] for key_id, option_dict in vote_info["multi_value"].items(): positiveVotes_str = "%d" % (option_dict["positiveVotes"]) negativeVotes_str = "%d" % (option_dict["negativeVotes"]) options.append(MULTI_TEMPLATE % { "key_id" : key_id, "name" : option_dict["name"], "positiveVotes" : positiveVotes_str, "negativeVotes" : negativeVotes_str }) options_str = ", ".join(options) bot.say(VOTE_FORMAT_MULTI % { "id" : id_str, "voteName" : voteName, "proposer" : vote_info["proposer"], "created" : vote_info["created"], "note" : vote_info["note"], "options" : options_str }) if (id == 0): bot.reply("Lo siento, pero no hay votaciones.")
5,348,049
def try_(func, *args, **kwargs): """Try to call a function and return `_default` if it fails Note: be careful that in order to have a fallback, you can supply the keyword argument `_default`. If you supply anything other than a keyword arg, it will result in it being passed to the wrapped function and could cause unexpected behavior including always failing with default value of None. """ _default_val = kwargs.pop("_default", None) try: return func(*args, **kwargs) except Exception: # pylint: disable=broad-except return _default_val
5,348,050
def _create_course_and_cohort_with_user_role(course_is_cohorted, user, role_name): """ Creates a course with the value of `course_is_cohorted`, plus `always_cohort_inline_discussions` set to True (which is no longer the default value). Then 1) enrolls the user in that course, 2) creates a cohort that the user is placed in, and 3) adds the user to the given role. Returns: a tuple of the created course and the created cohort """ cohort_course = CourseFactory.create( cohort_config={"cohorted": course_is_cohorted, "always_cohort_inline_discussions": True} ) CourseEnrollmentFactory.create(user=user, course_id=cohort_course.id) cohort = CohortFactory.create(course_id=cohort_course.id, users=[user]) _assign_role_to_user(user=user, course_id=cohort_course.id, role=role_name) return [cohort_course, cohort]
5,348,051
def relative_vorticity( u, v, wrap=None, one_sided_at_boundary=False, radius=6371229.0, cyclic=None ): """Calculate the relative vorticity using centred finite differences. The relative vorticity of wind defined on a Cartesian domain (such as a plane projection) is defined as ζcartesian = δv/δx − δu/δy where x and y are points on along the 'X' and 'Y' Cartesian dimensions respectively; and u and v denote the 'X' and 'Y' components of the horizontal winds. If the wind field field is defined on a spherical latitude-longitude domain then a correction factor is included: ζspherical = δv/δx − δu/δy + (u/a)tan(ϕ) where u and v denote the longitudinal and latitudinal components of the horizontal wind field; a is the radius of the Earth; and ϕ is the latitude at each point. The relative vorticity is calculated using centred finite differences (see the *one_sided_at_boundary* parameter). The grid may be global or limited area. If missing values are present then missing values will be returned at points where the centred finite difference could not be calculated. The boundary conditions may be cyclic in longitude. The non-cyclic boundaries may either be filled with missing values or calculated with off-centre finite differences. Reference: H.B. Bluestein, Synoptic-Dynamic Meteorology in Midlatitudes, 1992, Oxford Univ. Press p113-114 :Parameters: u: `Field` A field containing the x-wind. Must be on the same grid as the y-wind. v: `Field` A field containing the y-wind. Must be on the same grid as the x-wind. radius: optional The radius of the sphere when the winds are on a spherical polar coordinate domain. May be any numeric scalar object that can be converted to a `Data` object (which includes numpy array and `Data` objects). By default *radius* has a value of 6371229.0 metres, representing the Earth's radius. If units are not specified then units of metres are assumed. *Parameter example:* Five equivalent ways to set a radius of 6371200 metres: ``radius=6371200``, ``radius=numpy.array(6371200)``, ``radius=cf.Data(6371200)``, ``radius=cf.Data(6371200, 'm')``, ``radius=cf.Data(6371.2, 'km')``. wrap: `bool`, optional Whether the longitude is cyclic or not. By default this is autodetected. one_sided_at_boundary: `bool`, optional If True then if the field is not cyclic off-centre finite differences are calculated at the boundaries, otherwise missing values are used at the boundaries. :Returns: `Field` The relative vorticity calculated with centred finite differences. """ if cyclic: _DEPRECATION_ERROR_FUNCTION_KWARGS( "relative_vorticity", {"cyclic": cyclic}, "Use the 'wrap' keyword instead", ) # pragma: no cover # Get the standard names of u and v u_std_name = u.get_property("standard_name", None) v_std_name = v.get_property("standard_name", None) # Copy u and v u = u.copy() v = v.copy() # Get the X and Y coordinates (u_x_key, u_y_key), (u_x, u_y) = get_cartesian_coords(u, "u", ("X", "Y")) (v_x_key, v_y_key), (v_x, v_y) = get_cartesian_coords(v, "v", ("X", "Y")) if not u_x.equals(v_x) or not u_y.equals(v_y): raise ValueError("u and v must be on the same grid.") # Check for lat/long is_latlong = (u_x.Units.islongitude and u_y.Units.islatitude) or ( u_x.units == "degrees" and u_y.units == "degrees" ) # Check for cyclicity if wrap is None: if is_latlong: wrap = u.iscyclic(u_x_key) else: wrap = False # Find the relative vorticity if is_latlong: # Save the units of the X and Y coordinates x_units = u_x.Units y_units = u_y.Units # Change the units of the lat/longs to radians radians = Units("radians") u_x.Units = radians u_y.Units = radians v_x.Units = radians v_y.Units = radians # Find cos and tan of latitude cos_lat = u_y.cos() tan_lat = u_y.tan() # Reshape for broadcasting u_shape = [1] * u.ndim u_y_index = u.get_data_axes().index(u_y_key) u_shape[u_y_index] = u_y.size v_shape = [1] * v.ndim v_y_index = v.get_data_axes().index(v_y_key) v_shape[v_y_index] = v_y.size # Calculate the correction term corr = u.copy() corr *= tan_lat.array.reshape(u_shape) # Calculate the derivatives v.derivative( v_x_key, wrap=wrap, one_sided_at_boundary=one_sided_at_boundary, inplace=True, ) v.data /= cos_lat.array.reshape(v_shape) u.derivative( u_y_key, one_sided_at_boundary=one_sided_at_boundary, inplace=True ) radius = Data.asdata(radius).squeeze() radius.dtype = float if radius.size != 1: raise ValueError(f"Multiple radii: radius={radius!r}") if not radius.Units: radius.override_units(Units("metres"), inplace=True) elif not radius.Units.equivalent(Units("metres")): raise ValueError(f"Invalid units for radius: {radius.Units!r}") # Calculate the relative vorticity. Do v-(u-corr) rather than # v-u+corr to be nice with coordinate reference corner cases. rv = v - (u - corr) rv.data /= radius # Convert the units of latitude and longitude to canonical units rv.dimension_coordinate("X").Units = x_units rv.dimension_coordinate("Y").Units = y_units else: v.derivative( v_x_key, one_sided_at_boundary=one_sided_at_boundary, inplace=True ) u.derivative( u_y_key, one_sided_at_boundary=one_sided_at_boundary, inplace=True ) rv = v - u # Convert the units of relative vorticity to canonical units rv.Units = Units("s-1") # Set the standard name if appropriate and delete the long_name if (u_std_name == "eastward_wind" and v_std_name == "northward_wind") or ( u_std_name == "x_wind" and v_std_name == "y_wind" ): rv.standard_name = "atmosphere_relative_vorticity" else: rv.del_property("standard_name", None) rv.del_property("long_name", None) return rv
5,348,052
def coalesce( edge_index: torch.Tensor, edge_attr: _typing.Union[ torch.Tensor, _typing.Iterable[torch.Tensor], None ] = None, num_nodes: _typing.Optional[int] = ..., is_sorted: bool = False, sort_by_row: bool = True ) -> _typing.Union[ torch.Tensor, _typing.Tuple[torch.Tensor, torch.Tensor], _typing.Tuple[torch.Tensor, _typing.Iterable[torch.Tensor]] ]: """ Row-wise sorts :obj:`edge_index` and removes its duplicated entries. Duplicate entries in :obj:`edge_attr` are directly removed, instead of merged. Args: edge_index (LongTensor): The edge indices. edge_attr (Tensor or List[Tensor], optional): Edge weights or multi- dimensional edge features. If given as a list, will re-shuffle and remove duplicates for all its entries. (default: :obj:`None`) num_nodes (int, optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) is_sorted (bool, optional): If set to :obj:`True`, will expect :obj:`edge_index` to be already sorted row-wise. sort_by_row (bool, optional): If set to :obj:`False`, will sort :obj:`edge_index` column-wise. :rtype: :class:`LongTensor` if :attr:`edge_attr` is :obj:`None`, else (:class:`LongTensor`, :obj:`Tensor` or :obj:`Iterable[Tensor]]`) """ if not isinstance(num_nodes, int): num_nodes = None try: import torch_geometric return torch_geometric.utils.coalesce( edge_index, edge_attr, num_nodes, is_sorted=is_sorted, sort_by_row=sort_by_row ) except ModuleNotFoundError: return __coalesce( edge_index, edge_attr, num_nodes, is_sorted=is_sorted, sort_by_row=sort_by_row )
5,348,053
def get_label_names(l_json): """ Get names of all the labels in given json :param l_json: list of labels jsons :type l_json: list :returns: list of labels names :rtype: list """ llist = [] for j in l_json: llist.append(j['name']) return llist
5,348,054
def init_asl_derivatives_wf( bids_root, metadata, output_dir, spaces, scorescrub=False, basil=False, name='asl_derivatives_wf', ): """ Set up a battery of datasinks to store derivatives in the right location. Parameters ---------- bids_root : :obj:`str` Original BIDS dataset path. metadata : :obj:`dict` Metadata dictionary associated to the ASL run. output_dir : :obj:`str` Where derivatives should be written out to. spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences` A container for storing, organizing, and parsing spatial normalizations. Composed of :py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references. Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs (e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references (e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a dictionary with template specifications (e.g., a specification of ``{'resolution': 2}`` would lead to resampling on a 2mm resolution of the space). name : :obj:`str` This workflow's identifier (default: ``func_derivatives_wf``). """ from ...niworkflows.engine.workflows import LiterateWorkflow as Workflow from ...niworkflows.interfaces.utility import KeySelect from ...smriprep.workflows.outputs import _bids_relative nonstd_spaces = set(spaces.get_nonstandard()) workflow = Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=[ 'asl_mask_std', 'asl_mask_t1', 'asl_std', 'asl_std_ref', 'asl_t1', 'asl_t1_ref', 'asl_native', 'asl_native_ref', 'asl_mask_native','confounds', 'confounds_metadata', 'source_file', 'template', 'spatial_reference', 'cbf', 'meancbf', 'score', 'avgscore', 'scrub', 'basil', 'pv', 'cbf_t1', 'meancbf_t1', 'att_t1', 'score_t1', 'avgscore_t1', 'scrub_t1', 'basil_t1', 'pv_t1', 'cbf_std', 'meancbf_std', 'score_std', 'avgscore_std', 'scrub_std', 'basil_std', 'pv_std','att','att_std','qc_file', 'cbf_hvoxf', 'score_hvoxf', 'scrub_hvoxf', 'basil_hvoxf', 'pvc_hvoxf', 'cbf_sc207', 'score_sc207', 'scrub_sc207', 'basil_sc207', 'pvc_sc207', 'cbf_sc217', 'score_sc217', 'scrub_sc217', 'basil_sc217', 'pvc_sc217', 'cbf_sc407', 'score_sc407', 'scrub_sc407', 'basil_sc407', 'pvc_sc407', 'cbf_sc417', 'score_sc417', 'scrub_sc417', 'basil_sc417', 'pvc_sc417' ]), name='inputnode') raw_sources = pe.Node(niu.Function(function=_bids_relative), name='raw_sources') raw_sources.inputs.bids_root = bids_root ds_confounds = pe.Node(DerivativesDataSink( base_directory=output_dir, desc='confounds', suffix='regressors'), name="ds_confounds", run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) workflow.connect([ (inputnode, raw_sources, [('source_file', 'in_files')]), (inputnode, ds_confounds, [('source_file', 'source_file'), ('confounds', 'in_file'), ('confounds_metadata', 'meta_dict')]), ]) qcfile = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='quality_control', suffix='cbf', compress=False), name='qcfile', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) workflow.connect([ (inputnode, qcfile, [('source_file', 'source_file'), ('qc_file', 'in_file')]), ]) cbf_hvoxf = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='HavardOxford', suffix='mean_cbf', compress=False), name='cbf_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) cbf_sc207 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7', suffix='mean_cbf', compress=False), name='cbf_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) cbf_sc217 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17', suffix='mean_cbf', compress=False), name='cbf_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) cbf_sc407 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7', suffix='mean_cbf', compress=False), name='cbf_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) cbf_sc417 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17', suffix='mean_cbf', compress=False), name='cbf_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) workflow.connect([ (inputnode, cbf_hvoxf, [('source_file', 'source_file'), ('cbf_hvoxf', 'in_file')]), (inputnode, cbf_sc207, [('source_file', 'source_file'), ('cbf_sc207', 'in_file')]), (inputnode, cbf_sc217, [('source_file', 'source_file'), ('cbf_sc217', 'in_file')]), (inputnode, cbf_sc407, [('source_file', 'source_file'), ('cbf_sc407', 'in_file')]), (inputnode, cbf_sc417, [('source_file', 'source_file'), ('cbf_sc417', 'in_file')]), ]) if scorescrub: score_hvoxf = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='HavardOxford', suffix='mean_score', compress=False), name='score_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) scrub_hvoxf = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='HavardOxford', suffix='mean_scrub', compress=False), name='scrub_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) score_sc207 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7', suffix='mean_score', compress=False), name='score_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) scrub_sc207 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7', suffix='mean_scrub', compress=False), name='scrub_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) score_sc217 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17', suffix='mean_score', compress=False), name='score_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) scrub_sc217 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17', suffix='mean_scrub', compress=False), name='scrub_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) score_sc407 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7', suffix='mean_score', compress=False), name='score_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) scrub_sc407 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7', suffix='mean_scrub', compress=False), name='scrub_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) score_sc417 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17', suffix='mean_score', compress=False), name='score_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) scrub_sc417 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17', suffix='mean_scrub', compress=False), name='scrub_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) workflow.connect([ (inputnode, score_hvoxf, [('source_file', 'source_file'), ('score_hvoxf', 'in_file')]), (inputnode, scrub_hvoxf, [('source_file', 'source_file'), ('scrub_hvoxf', 'in_file')]), (inputnode, score_sc217, [('source_file', 'source_file'), ('score_sc217', 'in_file')]), (inputnode, score_sc207, [('source_file', 'source_file'), ('score_sc207', 'in_file')]), (inputnode, scrub_sc207, [('source_file', 'source_file'), ('scrub_sc207', 'in_file')]), (inputnode, scrub_sc217, [('source_file', 'source_file'), ('scrub_sc217', 'in_file')]), (inputnode, score_sc417, [('source_file', 'source_file'), ('score_sc417', 'in_file')]), (inputnode, scrub_sc417, [('source_file', 'source_file'), ('scrub_sc417', 'in_file')]), (inputnode, score_sc407, [('source_file', 'source_file'), ('score_sc407', 'in_file')]), (inputnode, scrub_sc407, [('source_file', 'source_file'), ('scrub_sc407', 'in_file')]), ]) if basil: basil_hvoxf = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='HavardOxford', suffix='mean_basil', compress=False), name='basil_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) pvc_hvoxf = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='HavardOxford', suffix='mean_pvc', compress=False), name='pvc_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) basil_sc207 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7', suffix='mean_basil', compress=False), name='basil_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) pvc_sc207 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7', suffix='mean_pvc', compress=False), name='pvc_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) basil_sc217 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17', suffix='mean_basil', compress=False), name='basil_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) pvc_sc217 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17', suffix='mean_pvc', compress=False), name='pvc_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) basil_sc407 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7', suffix='mean_basil', compress=False), name='basil_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) pvc_sc407 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7', suffix='mean_pvc', compress=False), name='pvc_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) basil_sc417 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17', suffix='mean_basil', compress=False), name='basil_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) pvc_sc417 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17', suffix='mean_pvc', compress=False), name='pvc_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) workflow.connect([ (inputnode, basil_hvoxf, [('source_file', 'source_file'), ('basil_hvoxf', 'in_file')]), (inputnode, pvc_hvoxf, [('source_file', 'source_file'), ('pvc_hvoxf', 'in_file')]), (inputnode, basil_sc207, [('source_file', 'source_file'), ('basil_sc207', 'in_file')]), (inputnode, pvc_sc207, [('source_file', 'source_file'), ('pvc_sc207', 'in_file')]), (inputnode, basil_sc217, [('source_file', 'source_file'), ('basil_sc217', 'in_file')]), (inputnode, pvc_sc217, [('source_file', 'source_file'), ('pvc_sc217', 'in_file')]), (inputnode, basil_sc407, [('source_file', 'source_file'), ('basil_sc407', 'in_file')]), (inputnode, pvc_sc407, [('source_file', 'source_file'), ('pvc_sc217', 'in_file')]), (inputnode, basil_sc417, [('source_file', 'source_file'), ('basil_sc417', 'in_file')]), (inputnode, pvc_sc417, [('source_file', 'source_file'), ('pvc_sc417', 'in_file')]), ]) if nonstd_spaces.intersection(('func', 'run', 'asl','sbref')): ds_asl_native = pe.Node( DerivativesDataSink( base_directory=output_dir, desc='preproc', compress=True, SkullStripped=False, RepetitionTime=metadata.get('RepetitionTime'), TaskName=metadata.get('TaskName')), name='ds_asl_native', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) ds_asl_native_ref = pe.Node( DerivativesDataSink(base_directory=output_dir, suffix='aslref', compress=True, dismiss_entities=("echo",)), name='ds_asl_native_ref', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) ds_asl_mask_native = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='brain', suffix='mask', compress=True, dismiss_entities=("echo",)), name='ds_asl_mask_native', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) cbfnative = pe.Node( DerivativesDataSink(base_directory=output_dir, suffix='cbf', compress=True), name='cbfnative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) meancbfnative = pe.Node( DerivativesDataSink(base_directory=output_dir, suffix='mean_cbf', compress=True), name='meancbfnative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) workflow.connect([ (inputnode, ds_asl_native, [('source_file', 'source_file'), ('asl_native', 'in_file')]), (inputnode, ds_asl_native_ref, [('source_file', 'source_file'), ('asl_native_ref', 'in_file')]), (inputnode, ds_asl_mask_native, [('source_file', 'source_file'), ('asl_mask_native', 'in_file')]), (inputnode, cbfnative, [('source_file', 'source_file'), ('cbf', 'in_file')]), (inputnode, meancbfnative, [('source_file', 'source_file'), ('meancbf', 'in_file')]), ]) if scorescrub: scorenative = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='score', suffix='cbf', compress=True), name='scorenative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) meanscorenative = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='score', suffix='mean_cbf', compress=True), name='meanscorenative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) scrubnative = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='scrub', suffix='cbf', compress=True), name='scrubnative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) workflow.connect([ (inputnode, scorenative, [('source_file', 'source_file'), ('score', 'in_file')]), (inputnode, meanscorenative, [('source_file', 'source_file'), ('avgscore', 'in_file')]), (inputnode, scrubnative, [('source_file', 'source_file'), ('scrub', 'in_file')]), ]) if basil: basilnative = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='basil', suffix='cbf', compress=True), name='basilnative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) pvnative = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='pvc', suffix='cbf', compress=True), name='pvcnative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) attnative = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='bat', suffix='cbf', compress=True), name='attcnative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) workflow.connect([ (inputnode, basilnative, [('source_file', 'source_file'), ('basil', 'in_file')]), (inputnode, pvnative, [('source_file', 'source_file'), ('pv', 'in_file')]), (inputnode, attnative, [('source_file', 'source_file'), ('att', 'in_file')]), (raw_sources, ds_asl_mask_native, [('out', 'RawSources')]), ]) # Resample to T1w space if nonstd_spaces.intersection(('T1w', 'anat')): ds_asl_t1 = pe.Node( DerivativesDataSink( base_directory=output_dir, space='T1w', desc='preproc', compress=True, SkullStripped=False, RepetitionTime=metadata.get('RepetitionTime'), TaskName=metadata.get('TaskName'), dismiss_entities=("echo",)), name='ds_asl_t1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) ds_asl_t1_ref = pe.Node( DerivativesDataSink(base_directory=output_dir, space='T1w', suffix='aslref', compress=True, dismiss_entities=("echo",)), name='ds_asl_t1_ref', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) ds_asl_mask_t1 = pe.Node( DerivativesDataSink(base_directory=output_dir, space='T1w', desc='brain', suffix='mask', compress=True, dismiss_entities=("echo",)), name='ds_asl_mask_t1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) cbfnativet1 = pe.Node( DerivativesDataSink(base_directory=output_dir, suffix='cbf', space='T1w', compress=True), name='cbfnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) meancbfnativet1 = pe.Node( DerivativesDataSink(base_directory=output_dir, suffix='mean_cbf', space='T1w', compress=True), name='meancbfnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) workflow.connect([ (inputnode, ds_asl_t1, [('source_file', 'source_file'), ('asl_t1', 'in_file')]), (inputnode, ds_asl_t1_ref, [('source_file', 'source_file'), ('asl_t1_ref', 'in_file')]), (inputnode, ds_asl_mask_t1, [('source_file', 'source_file'), ('asl_mask_t1', 'in_file')]), (inputnode, cbfnativet1, [('source_file', 'source_file'), ('cbf_t1', 'in_file')]), (inputnode, meancbfnativet1, [('source_file', 'source_file'), ('meancbf_t1', 'in_file')]), ]) if scorescrub: scorenativet1 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='score', suffix='cbf', space='T1w', compress=True), name='scorenativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) meanscorenativet1 = pe.Node( DerivativesDataSink(base_directory=output_dir, suffix='mean_cbf', desc='score', space='T1w', compress=True), name='meanscorenativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) scrubnativet1 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='scrub', suffix='cbf', space='T1w', compress=True), name='scrubnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) workflow.connect([ (inputnode, scorenativet1, [('source_file', 'source_file'), ('score_t1', 'in_file')]), (inputnode, meanscorenativet1, [('source_file', 'source_file'), ('avgscore_t1', 'in_file')]), (inputnode, scrubnativet1, [('source_file', 'source_file'), ('scrub_t1', 'in_file')]), ]) if basil: basilnativet1 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='basil', suffix='cbf', space='T1w', compress=True), name='basilnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) pvnativet1 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='pvc', suffix='cbf', space='T1w', compress=True), name='pvcnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) attnativet1 = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='bat', suffix='cbf', space='T1w', compress=True), name='attnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) workflow.connect([ (inputnode, basilnativet1, [('source_file', 'source_file'), ('basil_t1', 'in_file')]), (inputnode, pvnativet1, [('source_file', 'source_file'), ('pv_t1', 'in_file')]), (inputnode, attnativet1, [('source_file', 'source_file'), ('att_t1', 'in_file')]), ]) workflow.connect([ (raw_sources, ds_asl_mask_t1, [('out', 'RawSources')]), ]) if getattr(spaces, '_cached') is None: return workflow # Store resamplings in standard spaces when listed in --output-spaces if spaces.cached.references: from ...niworkflows.interfaces.space import SpaceDataSource spacesource = pe.Node(SpaceDataSource(), name='spacesource', run_without_submitting=True) spacesource.iterables = ('in_tuple', [ (s.fullname, s.spec) for s in spaces.cached.get_standard(dim=(3,)) ]) out_names = ['template', 'asl_std', 'asl_std_ref', 'asl_mask_std', 'cbf_std', 'meancbf_std'] if scorescrub: out_names = out_names + ['score_std', 'avgscore_std', 'scrub_std'] if basil: out_names = out_names + ['basil_std', 'pv_std','att_std'] select_std = pe.Node(KeySelect( fields=out_names), name='select_std', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) ds_asl_std = pe.Node( DerivativesDataSink( base_directory=output_dir, desc='preproc', compress=True, SkullStripped=False, RepetitionTime=metadata.get('RepetitionTime'), TaskName=metadata.get('TaskName'), dismiss_entities=("echo",)), name='ds_asl_std', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) ds_asl_std_ref = pe.Node( DerivativesDataSink(base_directory=output_dir, suffix='aslref', compress=True, dismiss_entities=("echo",)), name='ds_asl_std_ref', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) ds_asl_mask_std = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='brain', suffix='mask', compress=True, dismiss_entities=("echo",)), name='ds_asl_mask_std', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) cbfstd = pe.Node( DerivativesDataSink(base_directory=output_dir, suffix='cbf', compress=True), name='cbfstd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) meancbfstd = pe.Node( DerivativesDataSink(base_directory=output_dir, suffix='mean_cbf', compress=True), name='meancbfstd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) workflow.connect([ (inputnode, ds_asl_std, [('source_file', 'source_file')]), (inputnode, ds_asl_std_ref, [('source_file', 'source_file')]), (inputnode, ds_asl_mask_std, [('source_file', 'source_file')]), (inputnode, cbfstd, [('source_file', 'source_file')]), (inputnode, meancbfstd, [('source_file', 'source_file')]), (inputnode, select_std, [('asl_std', 'asl_std'), ('asl_std_ref', 'asl_std_ref'), ('asl_mask_std', 'asl_mask_std'), ('cbf_std', 'cbf_std'), ('meancbf_std', 'meancbf_std'), ('template', 'template'), ('spatial_reference', 'keys')]), (spacesource, select_std, [('uid', 'key')]), (select_std, ds_asl_std, [('asl_std', 'in_file')]), (spacesource, ds_asl_std, [('space', 'space'), ('cohort', 'cohort'), ('resolution', 'resolution'), ('density', 'density')]), (select_std, ds_asl_std_ref, [('asl_std_ref', 'in_file')]), (spacesource, ds_asl_std_ref, [('space', 'space'), ('cohort', 'cohort'), ('resolution', 'resolution'), ('density', 'density')]), (select_std, ds_asl_mask_std, [('asl_mask_std', 'in_file')]), (spacesource, ds_asl_mask_std, [('space', 'space'), ('cohort', 'cohort'), ('resolution', 'resolution'), ('density', 'density')]), (select_std, cbfstd, [('cbf_std', 'in_file')]), (spacesource, cbfstd, [('space', 'space'), ('cohort', 'cohort'), ('resolution', 'resolution'), ('density', 'density')]), (select_std, meancbfstd, [('meancbf_std', 'in_file')]), (spacesource, meancbfstd, [('space', 'space'), ('cohort', 'cohort'), ('resolution', 'resolution'), ('density', 'density')]), (raw_sources, ds_asl_mask_std, [('out', 'RawSources')]), ]) if scorescrub: scorestd = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='score', suffix='cbf', compress=True), name='scorestd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) meanscorestd = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='score', suffix='mean_cbf', compress=True), name='meanscorestd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) scrubstd = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='scrub', suffix='cbf', compress=True), name='scrubstd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) workflow.connect([ (inputnode, scorestd, [('source_file', 'source_file')]), (inputnode, meanscorestd, [('source_file', 'source_file')]), (inputnode, scrubstd, [('source_file', 'source_file')]), (inputnode, select_std, [ ('score_std', 'score_std'), ('avgscore_std', 'avgscore_std'), ('scrub_std', 'scrub_std')]), (select_std, scorestd, [('score_std', 'in_file')]), (spacesource, scorestd, [('space', 'space'), ('cohort', 'cohort'), ('resolution', 'resolution'), ('density', 'density')]), (select_std, meanscorestd, [('avgscore_std', 'in_file')]), (spacesource, meanscorestd, [('space', 'space'), ('cohort', 'cohort'), ('resolution', 'resolution'), ('density', 'density')]), (select_std, scrubstd, [('scrub_std', 'in_file')]), (spacesource, scrubstd, [('space', 'space'), ('cohort', 'cohort'), ('resolution', 'resolution'), ('density', 'density')]), ]) if basil: basilstd = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='basil', suffix='cbf', compress=True), name='basilstd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) pvstd = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='pvc', suffix='cbf', compress=True), name='pvcstd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) attstd = pe.Node( DerivativesDataSink(base_directory=output_dir, desc='bat', suffix='cbf', compress=True), name='attstd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB) workflow.connect([ (inputnode, basilstd, [('source_file', 'source_file')]), (inputnode, pvstd, [('source_file', 'source_file')]), (inputnode, attstd, [('source_file', 'source_file')]), (inputnode, select_std, [ ('basil_std', 'basil_std'), ('pv_std', 'pv_std'), ('att_std', 'att_std')]), (select_std, basilstd, [('basil_std', 'in_file')]), (spacesource, basilstd, [('space', 'space'), ('cohort', 'cohort'), ('resolution', 'resolution'), ('density', 'density')]), (select_std, pvstd, [('pv_std', 'in_file')]), (spacesource, pvstd, [('space', 'space'), ('cohort', 'cohort'), ('resolution', 'resolution'), ('density', 'density')]), (select_std, attstd, [('att_std', 'in_file')]), (spacesource, attstd, [('space', 'space'), ('cohort', 'cohort'), ('resolution', 'resolution'), ('density', 'density')]), ]) return workflow
5,348,055
def makehash(w=dict): """autovivification like hash in perl http://stackoverflow.com/questions/651794/whats-the-best-way-to-initialize-a-dict-of-dicts-in-python use call it on hash like h = makehash() then directly h[1][2]= 3 useful ONLY for a 2 level hash """ # return defaultdict(makehash) return defaultdict(w)
5,348,056
def sample_parameters(kmodel, tmodel, individual, param_sampler, scaling_parameters, only_stable=True, ): """ Run sampling on first order model """ solution_raw = individual.data.data # Load fluxes and concentrations fluxes = load_fluxes(solution_raw, tmodel, kmodel, density=scaling_parameters.DENSITY, ratio_gdw_gww=scaling_parameters.GDW_GWW_RATIO, concentration_scaling=scaling_parameters.CONCENTRATION_SCALING, time_scaling=scaling_parameters.TIME_SCALING) concentrations = load_concentrations(solution_raw, tmodel, kmodel, concentration_scaling=scaling_parameters.CONCENTRATION_SCALING) # Fetch equilibrium constants load_equilibrium_constants(solution_raw, tmodel, kmodel, concentration_scaling=scaling_parameters.CONCENTRATION_SCALING, in_place=True) parameter_population_lam_mu,\ lamda_max, lamda_min = param_sampler.sample(kmodel, fluxes, concentrations, only_stable = only_stable, min_max_eigenvalues=True) return parameter_population_lam_mu, lamda_max, lamda_min
5,348,057
def load_yaml(fpath): """ load settings from a yaml file and return them as a dictionary """ with open(fpath, 'r') as f: settings = yaml.load(f) return settings
5,348,058
def recommendation(agent, other_agent, resource_id, scale, logger, discovery, recency_limit): """ Get recommendations on other agent of third agents and average them to one recommendation value. :param agent: The agent which calculates the popularity. :type agent: str :param other_agent: The other agent for which the popularity value is calculated. :type other_agent: str :param resource_id: The URI of the evaluated resource. :type resource_id: str :param scale: The Scale object to be used by the agent. :type scale: Scale :param logger: The logger object to be used by the agent. :type logger: BasicLogger :param discovery: Addresses of all agents within the scenario. :type discovery: dict :param recency_limit: A datetime object which is used for "forgetting" old history entries :type recency_limit: datetime :return: The Recommendation trust value. :rtype: float or int """ agents_to_ask = [] for third_agent in discovery: if third_agent != agent and third_agent != other_agent: combined = get_combined_direct_experience_for_agent( agent, third_agent, logger, recency_limit, scale) if combined != None and combined >= scale.minimum_to_trust_others(): agents_to_ask.append(third_agent) recommendations = ask_for_recommendations( agent, resource_id, agents_to_ask, scale, logger, discovery, recency_limit) return statistics.median(recommendations) if len(recommendations) > 0 else None
5,348,059
def do_predictions_mhctools(work_item_dicts, constant_data=None): """ Each tuple of work items consists of: (work_item_num, peptides, alleles) """ # This may run on the cluster in a way that misses all top level imports, # so we have to re-import everything here. import time import numpy import pandas import numpy.testing import mhctools if constant_data is None: constant_data = GLOBAL_DATA cols = constant_data['cols'] predictor_name = constant_data['args'].predictor results = [] for (i, d) in enumerate(work_item_dicts): work_item_num = d['work_item_num'] peptides = d['peptides'] alleles = d['alleles'] print("Processing work item", i + 1, "of", len(work_item_dicts)) result = {} results.append((work_item_num, result)) if predictor_name == "netmhcpan4-ba": predictor = mhctools.NetMHCpan4( alleles=alleles, program_name="netMHCpan-4.0", mode="binding_affinity") elif predictor_name == "netmhcpan4-el": predictor = mhctools.NetMHCpan4( alleles=alleles, program_name="netMHCpan-4.0", mode="elution_score") elif predictor_name == "mixmhcpred": # Empirically determine supported alleles. mixmhcpred_usable_alleles = [] unusable_alleles = [] for allele in alleles: predictor = mhctools.MixMHCpred(alleles=[allele]) # We use inf not nan to indicate unsupported alleles since # we use nan to indicate incomplete results that still need # to execute. empty_results = pandas.Series(index=peptides, dtype=numpy.float16) empty_results[:] = float('-inf') try: predictor.predict_peptides_dataframe(["PEPTIDESS"]) mixmhcpred_usable_alleles.append(allele) except ValueError: unusable_alleles.append(allele) for col in cols: result["%s %s" % (allele, col)] = empty_results.values print("MixMHCpred usable alleles: ", *mixmhcpred_usable_alleles) print("MixMHCpred unusable alleles: ", *unusable_alleles) predictor = mhctools.MixMHCpred(alleles=mixmhcpred_usable_alleles) assert mixmhcpred_usable_alleles, mixmhcpred_usable_alleles else: raise ValueError("Unsupported", predictor_name) start = time.time() df = predictor.predict_peptides_dataframe(peptides) print("Predicted for %d peptides x %d alleles in %0.2f sec." % ( len(peptides), len(alleles), (time.time() - start))) for (allele, sub_df) in df.groupby("allele"): for col in cols: result["%s %s" % (allele, col)] = ( sub_df[col].values.astype( constant_data['args'].result_dtype)) return results
5,348,060
def inceptionresnetv2(**kwargs): """ InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,' https://arxiv.org/abs/1602.07261. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_inceptionresnetv2(model_name="inceptionresnetv2", bn_epsilon=1e-3, **kwargs)
5,348,061
def convertStringToSysEncoding(strng): """ Convert a string to the current platform file system encoding. Returns the new encoded string. :Args: strng: string String to convert. """ if type(strng) not in [bytes_t, unicode_t]: strng = strng.decode("utf-8") strng = strng.encode(sys.getfilesystemencoding()) return strng
5,348,062
def _n_pow_i(a, b, n): """ return (1+i)**k """ x = a y = b for i in range(1, n): x1 = (x*a) - (y*b) y1 = (y*a) + (x*b) x = x1 y = y1 return x, y
5,348,063
async def get_group(msg): """Смена этапа на изменение группы.""" await msg.edit_text( text="*Выберите группу:*", reply_markup=kb.group_keyboard(msg.chat.id) )
5,348,064
def IsNameBased(link): """Finds whether the link is name based or not :param str link: :return: True if link is name-based; otherwise, False. :rtype: boolean """ if not link: return False # trimming the leading "/" if link.startswith("/") and len(link) > 1: link = link[1:] # Splitting the link(separated by "/") into parts parts = link.split("/") # First part should be "dbs" if not (parts and parts[0].lower() == "dbs"): return False # The second part is the database id(ResourceID or Name) and cannot be empty if len(parts) < 2 or not parts[1]: return False # Either ResourceID or database name databaseID = parts[1] # Length of databaseID(in case of ResourceID) is always 8 if len(databaseID) != 8: return True return not IsValidBase64String(str(databaseID))
5,348,065
def scrape_number_of_skins(names, save=True): """ Scrapes number of champion skins from League of Legends Wiki and saves them to a csv file, but returns nothing Parameters ---------- names : pandas series Contains the champion names as strings in alphabetical order save : boolean Save number of champion skins to csv file? Returns ------- None """ # Assign scrape path variables style = 'display:inline-block; margin:5px; width:342px' # Set up selenium web driver driver = webdriver.Chrome('./src/utils/chromedriver') # Get number of skins num_skins = [] for name in names: name = name.replace(' ', '_') skins_url = f'https://leagueoflegends.fandom.com/wiki/{name}/Skins' driver.get(skins_url) time.sleep(2) soup = BeautifulSoup(driver.page_source, 'html.parser') num_skins.append(len(soup.find_all('div', {'style': style}))) num_skins = pd.Series(num_skins) # Close selenium web driver driver.close() if save: num_skins.to_csv('./data/num_skins.csv', index=False) # Bye! <3 return
5,348,066
def register_submit(class_name, fire) -> None: """ Register on a form a handler :param class_name: class name of the form :param fire: function that will be fire on form submit :return: None """ def submit_handler(event) -> None: """ Handle form submit and fire handler :param event: Default html form object :return: None """ event.preventDefault() fire() if window.jQuery('.' + class_name).length == 1: return window.jQuery('.' + class_name).on('submit', submit_handler)
5,348,067
def compile(obj: Any) -> Definition: """Extract a definition from a JSON-like object representation.""" return ConcreteValue(obj)
5,348,068
def policy_network(vocab_embed_variable, document_placeholder, label_placeholder): """Build the policy core network. Args: vocab_embed_variable: [vocab_size, FLAGS.wordembed_size], embeddings without PAD and UNK document_placeholder: [None,(FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length + FLAGS.max_firstsentences_length + FLAGS.max_randomsentences_length), FLAGS.max_sent_length] label_placeholder: Gold label [None, FLAGS.max_doc_length, FLAGS.target_label_size], only used during cross entropy training of JP's model. Returns: Outputs of sentence extractor and logits without softmax """ with tf.variable_scope('PolicyNetwork') as scope: ### Full Word embedding Lookup Variable # PADDING embedding non-trainable pad_embed_variable = variable_on_cpu("pad_embed", [1, FLAGS.wordembed_size], tf.constant_initializer(0), trainable=False) # UNK embedding trainable unk_embed_variable = variable_on_cpu("unk_embed", [1, FLAGS.wordembed_size], tf.constant_initializer(0), trainable=True) # Get fullvocab_embed_variable fullvocab_embed_variable = tf.concat(0, [pad_embed_variable, unk_embed_variable, vocab_embed_variable]) # print(fullvocab_embed_variable) ### Lookup layer with tf.variable_scope('Lookup') as scope: document_placeholder_flat = tf.reshape(document_placeholder, [-1]) document_word_embedding = tf.nn.embedding_lookup(fullvocab_embed_variable, document_placeholder_flat, name="Lookup") document_word_embedding = tf.reshape(document_word_embedding, [-1, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length + FLAGS.max_firstsentences_length + FLAGS.max_randomsentences_length), FLAGS.max_sent_length, FLAGS.wordembed_size]) # print(document_word_embedding) ### Convolution Layer with tf.variable_scope('ConvLayer') as scope: document_word_embedding = tf.reshape(document_word_embedding, [-1, FLAGS.max_sent_length, FLAGS.wordembed_size]) document_sent_embedding = conv1d_layer_sentence_representation(document_word_embedding) # [None, sentembed_size] document_sent_embedding = tf.reshape(document_sent_embedding, [-1, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length + FLAGS.max_firstsentences_length + FLAGS.max_randomsentences_length), FLAGS.sentembed_size]) # print(document_sent_embedding) ### Reshape Tensor to List [-1, (max_doc_length+max_title_length+max_image_length), sentembed_size] -> List of [-1, sentembed_size] with variable_scope.variable_scope("ReshapeDoc_TensorToList"): document_sent_embedding = reshape_tensor2list(document_sent_embedding, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length + FLAGS.max_firstsentences_length + FLAGS.max_randomsentences_length), FLAGS.sentembed_size) # print(document_sent_embedding) # document_sents_enc document_sents_enc = document_sent_embedding[:FLAGS.max_doc_length] if FLAGS.doc_encoder_reverse: document_sents_enc = document_sents_enc[::-1] # document_sents_ext document_sents_ext = document_sent_embedding[:FLAGS.max_doc_length] # document_sents_titimg document_sents_titimg = document_sent_embedding[FLAGS.max_doc_length:] ### Document Encoder with tf.variable_scope('DocEnc') as scope: encoder_outputs, encoder_state = simple_rnn(document_sents_enc) ### Sentence Label Extractor with tf.variable_scope('SentExt') as scope: if (FLAGS.attend_encoder) and (len(document_sents_titimg) != 0): # Multiple decoder print("Multiple decoder is not implement yet.") exit(0) # # Decoder to attend captions # attendtitimg_extractor_output, _ = simple_attentional_rnn(document_sents_ext, document_sents_titimg, initial_state=encoder_state) # # Attend previous decoder # logits = sentence_extractor_seqrnn_docatt(document_sents_ext, attendtitimg_extractor_output, encoder_state, label_placeholder) elif (not FLAGS.attend_encoder) and (len(document_sents_titimg) != 0): # Attend only titimages during decoding extractor_output, logits = sentence_extractor_nonseqrnn_titimgatt(document_sents_ext, encoder_state, document_sents_titimg) elif (FLAGS.attend_encoder) and (len(document_sents_titimg) == 0): # JP model: attend encoder extractor_outputs, logits = sentence_extractor_seqrnn_docatt(document_sents_ext, encoder_outputs, encoder_state, label_placeholder) else: # Attend nothing extractor_output, logits = sentence_extractor_nonseqrnn_noatt(document_sents_ext, encoder_state) # print(extractor_output) # print(logits) return extractor_output, logits
5,348,069
def get_feature(file_path: str): """ Read and parse given feature file""" print('Reading feature file ', file_path) file_obj = open(file_path, "r") steam = file_obj.read() parser = Parser() return parser.parse(TokenScanner(steam))
5,348,070
def hough_lines_draw(img, outfile, peaks, rhos, thetas): """ Returns the image with hough lines drawn. Args - img: Image on which lines will be drawn - outfile: The output file. The file will be saved. - peaks: peaks returned by hough_peaks - rhos: array of rhos used in Hough Space - thetas: array of thetas used in Hough Space Returns - img: after drwaing lines on it. """ for peak in peaks: rho = rhos[peak[0]] theta = thetas[peak[1]] * np.pi / 180.0 a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho x1 = int(x0 + 1000*(-b)) y1 = int(y0 + 1000*(a)) x2 = int(x0 - 1000*(-b)) y2 = int(y0 - 1000*(a)) cv2.line(img, (x1,y1),(x2,y2),(0,0,255),2) cv2.imwrite(outfile, img) return img
5,348,071
def cg_file_h(tmpdir): """Get render config.""" return { 'cg_file': str(tmpdir.join('muti_layer_test.hip')) }
5,348,072
def test_enabled(): """ Test to verify that the service is enabled """ ret = {"changes": "saltstack", "comment": "", "name": "salt", "result": True} mock = MagicMock(return_value={"changes": "saltstack"}) with patch.object(service, "_enable", mock): assert service.enabled("salt") == ret assert service.__context__ == {"service.state": "enabled"}
5,348,073
def tags(ui, repo): """list repository tags This lists both regular and local tags. When the -v/--verbose switch is used, a third column "local" is printed for local tags. """ hexfunc = ui.debugflag and hex or short tagtype = "" for t, n in reversed(repo.tagslist()): if ui.quiet: ui.write("%s\n" % t) continue try: hn = hexfunc(n) r = "%5d:%s" % (repo.changelog.rev(n), hn) except error.LookupError: r = " ?:%s" % hn else: spaces = " " * (30 - encoding.colwidth(t)) if ui.verbose: if repo.tagtype(t) == 'local': tagtype = " local" else: tagtype = "" ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
5,348,074
def core_profiles_currents( ods, time_index=None, rho_tor_norm=None, j_actuator='default', j_bootstrap='default', j_ohmic='default', j_non_inductive='default', j_total='default', warn=True, ): """ This function sets currents in ods['core_profiles']['profiles_1d'][time_index] If provided currents are inconsistent with each other or ods, ods is not updated and an error is thrown. Updates integrated currents in ods['core_profiles']['global_quantities'] (N.B.: `equilibrium` IDS is required for evaluating j_tor and integrated currents) :param ods: ODS to update in-place :param time_index: ODS time index to updated if None, all times are updated :param rho_tor_norm: normalized rho grid upon which each j is given For each j: - ndarray: set in ods if consistent - 'default': use value in ods if present, else set to None - None: try to calculate from currents; delete from ods if you can't :param j_actuator: Non-inductive, non-bootstrap current <J.B>/B0 N.B.: used for calculating other currents and consistency, but not set in ods :param j_bootstrap: Bootstrap component of <J.B>/B0 :param j_ohmic: Ohmic component of <J.B>/B0 :param j_non_inductive: Non-inductive component of <J.B>/B0 Consistency requires j_non_inductive = j_actuator + j_bootstrap, either as explicitly provided or as computed from other components. :param j_total: Total <J.B>/B0 Consistency requires j_total = j_ohmic + j_non_inductive either as explicitly provided or as computed from other components. """ # run an all time slices if time_index is None if time_index is None: for itime in ods['core_profiles.profiles_1d']: core_profiles_currents( ods, time_index=itime, rho_tor_norm=rho_tor_norm, j_actuator=j_actuator, j_bootstrap=j_bootstrap, j_ohmic=j_ohmic, j_non_inductive=j_non_inductive, j_total=j_total, warn=warn, ) return from scipy.integrate import cumtrapz prof1d = ods['core_profiles.profiles_1d'][time_index] if rho_tor_norm is None: rho_tor_norm = prof1d['grid.rho_tor_norm'] # SETUP DEFAULTS data = {} with omas_environment(ods, coordsio={'core_profiles.profiles_1d.%d.grid.rho_tor_norm' % time_index: rho_tor_norm}): for j in ['j_actuator', 'j_bootstrap', 'j_non_inductive', 'j_ohmic', 'j_total']: if isinstance(eval(j), str) and eval(j) == 'default': if j in prof1d: data[j] = copy.deepcopy(prof1d[j]) elif (j == 'j_actuator') and 'core_sources' in ods: data[j] = get_j_actuator_from_core_sources(ods) elif (j == 'j_actuator') and (('j_bootstrap' in prof1d) and ('j_non_inductive' in prof1d)): data['j_actuator'] = prof1d['j_non_inductive'] - prof1d['j_bootstrap'] else: data[j] = None else: data[j] = eval(j) j_actuator = data['j_actuator'] j_bootstrap = data['j_bootstrap'] j_ohmic = data['j_ohmic'] j_non_inductive = data['j_non_inductive'] j_total = data['j_total'] # ================= # UPDATE FORWARD # ================= # j_non_inductive if (j_actuator is not None) and (j_bootstrap is not None): if j_non_inductive is None: j_non_inductive = j_actuator + j_bootstrap # j_total if (j_ohmic is not None) and (j_non_inductive is not None): if j_total is None: j_total = j_ohmic + j_non_inductive # get some quantities we'll use below if 'equilibrium.time_slice.%d' % time_index in ods: eq = ods['equilibrium']['time_slice'][time_index] if 'core_profiles.vacuum_toroidal_field.b0' in ods: B0 = ods['core_profiles']['vacuum_toroidal_field']['b0'][time_index] elif 'equilibrium.vacuum_toroidal_field.b0' in ods: R0 = ods['equilibrium']['vacuum_toroidal_field']['r0'] B0 = ods['equilibrium']['vacuum_toroidal_field']['b0'][time_index] ods['core_profiles']['vacuum_toroidal_field']['r0'] = R0 ods.set_time_array('core_profiles.vacuum_toroidal_field.b0', time_index, B0) fsa_invR = omas_interp1d(rho_tor_norm, eq['profiles_1d']['rho_tor_norm'], eq['profiles_1d']['gm9']) else: # can't do any computations with the equilibrium if warn: printe("Warning: ods['equilibrium'] does not exist: Can't convert between j_total and j_tor or calculate integrated currents") eq = None # j_tor if (j_total is not None) and (eq is not None): JparB_tot = j_total * B0 JtoR_tot = transform_current(rho_tor_norm, JparB=JparB_tot, equilibrium=eq, includes_bootstrap=True) j_tor = JtoR_tot / fsa_invR else: j_tor = None # ================= # UPDATE BACKWARD # ================= if j_total is not None: # j_non_inductive if (j_non_inductive is None) and (j_ohmic is not None): j_non_inductive = j_total - j_ohmic # j_ohmic elif (j_ohmic is None) and (j_non_inductive is not None): j_ohmic = j_total - j_non_inductive if j_non_inductive is not None: # j_actuator if (j_actuator is None) and (j_bootstrap is not None): j_actuator = j_non_inductive - j_bootstrap # j_bootstrap if (j_bootstrap is None) and (j_actuator is not None): j_bootstrap = j_non_inductive - j_actuator # =============== # CONSISTENCY? # =============== if (j_actuator is not None) and (j_bootstrap is None): err = "Cannot set j_actuator without j_bootstrap provided or calculable" raise RuntimeError(err) # j_non_inductive err = 'j_non_inductive inconsistent with j_actuator and j_bootstrap' if (j_non_inductive is not None) and ((j_actuator is not None) or (j_bootstrap is not None)): assert numpy.allclose(j_non_inductive, j_actuator + j_bootstrap), err # j_total err = 'j_total inconsistent with j_ohmic and j_non_inductive' if (j_total is not None) and ((j_ohmic is not None) or (j_non_inductive is not None)): assert numpy.allclose(j_total, j_ohmic + j_non_inductive), err # j_tor err = 'j_tor inconsistent with j_total' if (j_total is not None) and (j_tor is not None): if eq is not None: JparB_tot = j_total * B0 JtoR_tot = transform_current(rho_tor_norm, JparB=JparB_tot, equilibrium=eq, includes_bootstrap=True) assert numpy.allclose(j_tor, JtoR_tot / fsa_invR), err else: if warn: printe("Warning: ods['equilibrium'] does not exist") printe(" can't determine if " + err) # ============= # UPDATE ODS # ============= with omas_environment(ods, coordsio={'core_profiles.profiles_1d.%d.grid.rho_tor_norm' % time_index: rho_tor_norm}): for j in ['j_bootstrap', 'j_non_inductive', 'j_ohmic', 'j_total', 'j_tor']: if eval(j) is not None: prof1d[j] = eval(j) elif j in prof1d: del prof1d[j] # ====================== # INTEGRATED CURRENTS # ====================== if eq is None: # can't integrate currents without the equilibrium return # Calculate integrated currents rho_eq = eq['profiles_1d']['rho_tor_norm'] vp = eq['profiles_1d']['dvolume_dpsi'] psi = eq['profiles_1d']['psi'] fsa_invR = eq['profiles_1d']['gm9'] with omas_environment(ods, coordsio={'core_profiles.profiles_1d.%d.grid.rho_tor_norm' % time_index: rho_eq}): currents = [('j_bootstrap', 'current_bootstrap', True), ('j_non_inductive', 'current_non_inductive', True), ('j_tor', 'ip', False)] for Jname, Iname, transform in currents: if Jname in prof1d: J = prof1d[Jname] if transform: # transform <J.B>/B0 to <Jt/R> J = transform_current(rho_eq, JparB=J * B0, equilibrium=eq, includes_bootstrap=True) else: # already <Jt/R>/<1/R> J *= fsa_invR ods.set_time_array('core_profiles.global_quantities.%s' % Iname, time_index, cumtrapz(vp * J, psi)[-1] / (2.0 * numpy.pi)) elif 'core_profiles.global_quantities.%s' % Iname in ods: # set current to zero if this time_index exists already if time_index < len(ods['core_profiles.global_quantities.%s' % Iname]): ods['core_profiles.global_quantities.%s' % Iname][time_index] = 0.0 return
5,348,075
def group_by_until( key_mapper: Mapper[_T, _TKey], element_mapper: Optional[Mapper[_T, _TValue]], duration_mapper: Callable[[GroupedObservable[_TKey, _TValue]], Observable[Any]], subject_mapper: Optional[Callable[[], Subject[_TValue]]] = None, ) -> Callable[[Observable[_T]], Observable[GroupedObservable[_TKey, _TValue]]]: """Groups the elements of an observable sequence according to a specified key mapper function. A duration mapper function is used to control the lifetime of groups. When a group expires, it receives an OnCompleted notification. When a new element with the same key value as a reclaimed group occurs, the group will be reborn with a new lifetime request. .. marble:: :alt: group_by_until --1--2--a--3--b--c-| [ group_by_until() ] -+-----+-----------| +a-----b--c-| +1--2-----3-------| Examples: >>> group_by_until(lambda x: x.id, None, lambda : reactivex.never()) >>> group_by_until( lambda x: x.id, lambda x: x.name, lambda grp: reactivex.never() ) >>> group_by_until( lambda x: x.id, lambda x: x.name, lambda grp: reactivex.never(), lambda: ReplaySubject() ) Args: key_mapper: A function to extract the key for each element. element_mapper: A function to map each source element to an element in an observable group. duration_mapper: A function to signal the expiration of a group. subject_mapper: A function that returns a subject used to initiate a grouped observable. Default mapper returns a Subject object. Returns: An operator function that takes an observable source and returns a sequence of observable groups, each of which corresponds to a unique key value, containing all elements that share that same key value. If a group's lifetime expires, a new group with the same key value can be created once an element with such a key value is encountered. """ from ._groupbyuntil import group_by_until_ return group_by_until_(key_mapper, element_mapper, duration_mapper, subject_mapper)
5,348,076
def GetFilesystemSize(options, image_type, layout_filename, num): """Returns the filesystem size of a given partition for a given layout type. If no filesystem size is specified, returns the partition size. Args: options: Flags passed to the script image_type: Type of image eg base/test/dev/factory_install layout_filename: Path to partition configuration file num: Number of the partition you want to read from Returns: Size of selected partition filesystem in bytes """ partitions = GetPartitionTableFromConfig(options, layout_filename, image_type) partition = GetPartitionByNumber(partitions, num) if 'fs_bytes' in partition: return partition['fs_bytes'] else: return partition['bytes']
5,348,077
def match_pairs(obj_match, params): """ Matches objects into pairs given a disparity matrix and removes bad matches. Bad matches have a disparity greater than the maximum threshold. """ # Create a list of sets, where the i-th set will store the objects # from image1 that have merged with objects in image2 # Maybe faster to use a 2D array? obj_merge = np.zeros(obj_match.shape, dtype=bool) # Determine optimal pairs pairs = optimize.linear_sum_assignment(obj_match) for id1 in pairs[0]: if obj_match[id1, pairs[1][id1]] > params['MAX_DISPARITY']: # Set to -1 if object has died (or merged) pairs[1][id1] = -1 # Find the closest object in image2 to object with id1 id2 = np.argmin(obj_match[id1]) # If this object was in the search radius of object id1, # add object id1 to obj_merge[id2]. if obj_match[id1, id2] < LARGE_NUM: obj_merge[id1, id2] = True pairs = pairs[1] + 1 # ids in current_objects are 1-indexed return pairs, obj_merge
5,348,078
def test_handle_response_for_invalid_content(mock_get, response_dir): """If invalid content is returned, store warning log entry""" # arrange url = 'http://digital.bibliothek.uni-halle.de/hd/oai/?verb=GetRecord&metadataPrefix=mets&mode=xml&identifier=foo' mock_get.return_value.status_code = 200 mock_get.return_value.content = b'foo bar' headers = {'Content-Type': 'text/plain'} mock_get.return_value.headers = headers resolver = Resolver() initLogging() # capture log log = getLogger('ocrd_models.utils.handle_oai_response') capt = FIFOIO(256) sh = StreamHandler(capt) sh.setFormatter(Formatter(LOG_FORMAT)) log.addHandler(sh) # act resolver.download_to_directory(response_dir, url) # assert behavior mock_get.assert_called_once_with(url) log_output = capt.getvalue() assert 'WARNING ocrd_models.utils.handle_oai_response' in log_output
5,348,079
def spike_train_convolution(spike_times, interval, dt, sigma): """ Needed for Schreiber reliability measure """ N = int(np.floor((interval[1]-interval[0])/dt)+1) x = np.linspace(interval[0], interval[1], N) s = np.zeros(N) for spike in spike_times: s = s + gaussian(x, spike, sigma) return s
5,348,080
def part_4() -> None: """ Spam Classification: - Preprocessing emails. - Making a vocabulary list. - Extracting features from emails. - Training SVM for spam classification. Returns: None """ vocabulary = readers.read_vocabulary(DATA_PATH_5) tokens = readers.read_tokens(DATA_PATH_4, vocabulary) feature_vector_len = len(vocabulary) features = algorithms.extract_features(tokens, feature_vector_len) non_zero_count = np.count_nonzero(features) print(f"Length of feature vector is {feature_vector_len}") print(f"Number of non-zero entries is {non_zero_count}") x, y = readers.read_data(DATA_PATH_6) svm_function = algorithms.train_svm( x, y, C=0.1, coef0=0.0, decision_function_shape="ovr", degree=3, gamma="auto", kernel="linear", ) predictions = svm_function.predict(x) print(f"Training accuracy: {np.mean(predictions == y.flatten()) * 100}") x_test, y_test = readers.read_test_data(DATA_PATH_7) predictions = svm_function.predict(x_test) print(f"Test accuracy: {np.mean(predictions == y_test.flatten()) * 100}") weights = svm_function.coef_[0] data_frame = pd.DataFrame({"vocabulary": vocabulary, "weights": weights}) print(data_frame.sort_values(by="weights", ascending=False).head())
5,348,081
def p_shift_expression(p): """shift_expression : additive_expression | shift_expression LEFT_OP additive_expression | shift_expression RIGHT_OP additive_expression""" if len(p) == 2: p[0] = p[1] else: fname, entry, args = resolve_function_name_uniform_types(p[2], [p[1], p[3]]) p[0] = { "value": fname, "type": entry["return type"], "arguments": args, "kind": "FUNCTION CALL", } nvar = get_tmp_var(p[0]["type"]) codes = [] for _a in args: if len(_a["code"]) == 0: continue codes += _a["code"] _a["code"] = [] # p[0]["code"] = codes + [[p[0]["kind"], p[0]["type"], p[0]["value"], p[0]["arguments"], nvar]] p[0]["code"] = codes + [ [ nvar, ":=", p[0]["arguments"][0]["value"], p[2], p[0]["arguments"][1]["value"], ] ] p[0]["value"] = nvar del p[0]["arguments"] # p[0] = ("shift_expression",) + tuple(p[-len(p) + 1 :])
5,348,082
def touch(file): """ update a file's access/modifications times Attempts to update the access/modifications times on a file. If the file does not exist, it will be created. This utility call operates in the same fashion as the ``touch`` system command. An example when using in the context of script helpers is as follows: .. code-block:: python if releng_touch('my-file'): print('file was created') else: print('file was not created') Args: file: the file Returns: ``True`` if the file was created/updated; ``False`` if the file could not be created/updated """ try: parent_dir = os.path.dirname(file) if parent_dir and not os.path.isdir(parent_dir): ensure_dir_exists(parent_dir) with open(file, 'ab'): os.utime(file, None) return True except OSError: return False
5,348,083
def interpolate_peak(spectrum: list, peak: int) -> float: """ Uses quadratic interpolation of spectral peaks to get a better estimate of the peak. Args: - spectrum: the frequency bin to analyze. - peak: the location of the estimated peak in the spectrum list. Based off: https://ccrma.stanford.edu/~jos/sasp/Quadratic_Interpolation_Spectral_Peaks.html """ prev_neighbour = spectrum[peak-1] next_neighbour = spectrum[peak+1] peak_value = spectrum[peak] estimated_peak = (next_neighbour - prev_neighbour) / (2 * peak_value - prev_neighbour - next_neighbour) + peak return abs(estimated_peak)
5,348,084
def _check_trunk_switchport( dut, check, expd_status: SwitchportTrunkExpectation, msrd_status: dict ) -> tr.CheckResultsCollection: """ This function validates a trunk switchport against the expected values. These checks include matching on the native-vlan and trunk-allowed-vlans. """ results = list() device = dut.device e_nvl_id = expd_status.native_vlan.vlan_id if expd_status.native_vlan else None m_nvl_id = msrd_status["trunkingNativeVlanId"] if e_nvl_id and (e_nvl_id != m_nvl_id): results.append( tr.CheckFailFieldMismatch( device=device, check=check, field="native_vlan", expected=e_nvl_id, measurement=m_nvl_id, ) ) # EOS stores this as a CSV string, with ranges, for example: # 14,16,25-26,29 e_tr_allowed_vids = sorted( [vlan.vlan_id for vlan in expd_status.trunk_allowed_vlans] ) # conver the list of vlan-ids to a range string for string comparison # purposes. e_tr_alwd_vstr = range_string(e_tr_allowed_vids) m_tr_alwd_vstr = msrd_status["trunkAllowedVlans"] # if there no expected allowed vlans on this trunk, then set the expected # value to "NONE" since that is what EOS reports in this case. if not e_tr_alwd_vstr: e_tr_alwd_vstr = "NONE" if e_tr_alwd_vstr != m_tr_alwd_vstr: results.append( tr.CheckFailFieldMismatch( device=device, check=check, field="trunk_allowed_vlans", expected=e_tr_alwd_vstr, measurement=m_tr_alwd_vstr, ) ) return results
5,348,085
def is_valid_compressed(file): """Check tar gz or zip is valid.""" try: archive = ZipFile(file, 'r') try: corrupt = archive.testzip() except zlib_error: corrupt = True archive.close() except BadZipfile: corrupt = True return not corrupt
5,348,086
def Krsol_SP_pt(SP,pt): """ Krsol_SP_pt solubility of Kr in seawater ========================================================================== USAGE: Krsol = sol.Krsol_SP_pt(SP,pt) DESCRIPTION: Calculates the krypton, Kr, concentration expected at equilibrium with air at an Absolute Pressure of 101325 Pa (sea pressure of 0 dbar) including saturated water vapor. This function uses the solubility coefficients derived from the data of Weiss (1971). Note that this algorithm has not been approved by IOC and is not work from SCOR/IAPSO Working Group 127. It is included in the GSW Oceanographic Toolbox as it seems to be oceanographic best practice. INPUT: SP = Practical Salinity (PSS-78) [ unitless ] pt = potential temperature (ITS-90) referenced [ deg C ] to one standard atmosphere (0 dbar). SP & pt need to have the same dimensions. OUTPUT: Krsol = solubility of krypton in micro-moles per kg [ umol/kg ] AUTHOR: Roberta Hamme, Paul Barker and Trevor McDougall [ [email protected] ] REFERENCES: IOC, SCOR and IAPSO, 2010: The international thermodynamic equation of seawater - 2010: Calculation and use of thermodynamic properties. Intergovernmental Oceanographic Commission, Manuals and Guides No. 56, UNESCO (English), 196 pp. Available from http://www.TEOS-10.org Weiss, R.F. and T.K. Kyser, 1978: Solubility of Krypton in Water and Seawater. J. Chem. Thermodynamics, 23, 69-72. The software is available from http://www.TEOS-10.org ========================================================================== """ x = SP # Note that salinity argument is Practical Salinity, this is # beacuse the major ionic components of seawater related to Cl # are what affect the solubility of non-electrolytes in seawater. pt68 = pt * 1.00024 # pt68 is the potential temperature in degress C on # the 1968 International Practical Temperature Scale IPTS-68. y = pt68 + K0 y_100 = y * 1e-2 # Table 2 (Weiss and Kyser, 1978) a = (-112.6840, 153.5817, 74.4690, -10.0189) b = (-0.011213, -0.001844, 0.0011201) Krsol_mL = np.exp(a[0] + a[1] * 100/y + a[2] * np.log(y_100) + a[3] * \ y_100 + x * (b[0] + y_100 * (b[1] + b[2] * y_100))) # mL/kg to umol/kg for Kr (1/22.3511e-3) #Molar volume at STP (Dymond and Smith, 1980). Krsol = Krsol_mL * 4.474052731185490e1 return Krsol
5,348,087
def get_node_element(tree_element, tag, key=None): """ FIXME: This is an ugly function that should be refactored. It wqs written to create the same function for getting either an attribute or a subelement for an element. :param tree_element: Element object from the ElementTree package :param tag: subelement of the tree_element :param key: key for value to be returned from the 'branch' subelements :return: either text from the element or value from the attribute """ if key: branch_elements = tree_element.findall('branch') for each_element in branch_elements: if key in each_element.attrib.keys(): return each_element.attrib[key] raise BranchElementError(tree_element.find('name').text, key) if tree_element.find(tag) is not None: text = tree_element.find(tag).text return text else: raise NoAttributeTypeName(tree_element.find('name').text, tag)
5,348,088
def UseNetwork(weights_f, load_weights=False): """Use DenseModel. Args: weights_f: weight file location. load_weights: load weights when it is True. """ model = QDenseModel(weights_f, load_weights) batch_size = BATCH_SIZE (x_train_, y_train_), (x_test_, y_test_) = mnist.load_data() x_train_ = x_train_.reshape(60000, RESHAPED) x_test_ = x_test_.reshape(10000, RESHAPED) x_train_ = x_train_.astype("float32") x_test_ = x_test_.astype("float32") x_train_ /= 255 x_test_ /= 255 print(x_train_.shape[0], "train samples") print(x_test_.shape[0], "test samples") y_train_ = to_categorical(y_train_, NB_CLASSES) y_test_ = to_categorical(y_test_, NB_CLASSES) if not load_weights: model.fit( x_train_, y_train_, batch_size=batch_size, epochs=NB_EPOCH, verbose=VERBOSE, validation_split=VALIDATION_SPLIT) if weights_f: model.save_weights(weights_f) score = model.evaluate(x_test_, y_test_, verbose=VERBOSE) print_qstats(model) print("Test score:", score[0]) print("Test accuracy:", score[1])
5,348,089
def find_title(item): """Title of the video""" title = item['snippet']['title'] return title
5,348,090
def calc_fingerprint(text): """Return a hex string that fingerprints `text`.""" return hashlib.sha1(text).hexdigest()
5,348,091
def yolo_collate_fn( batch: List[Any], ) -> Tuple[Tensor, Tuple[Tensor, Tensor, List[Tuple[Tensor, Tensor]]]]: """ Collate function to be used for creating a DataLoader with values for Yolo model input. :param batch: a batch of data points and annotations transformed by bounding_box_and_labels_to_yolo_fmt :return: the batch stacked as tensors for all values except for the original annotations """ images = [] targets = [] annotations = [] for idx, (image, (target, annotation)) in enumerate(batch): images.append(image.unsqueeze(0)) img_label = torch.ones(target.size(0), 1) * idx targets.append(torch.cat((img_label, target), 1)) annotations.append(annotation) images = torch.cat(images, 0) targets = torch.cat(targets, 0) return images, (targets, annotations)
5,348,092
def delete_category(category_id): """Delete a category.""" category = session.query(Category).filter_by(id=category_id).first() if 'username' not in login_session: flash("Please log in to continue.") return redirect(url_for('login')) if not exists_category(category_id): flash("We are unable to process your request right now.") return redirect(url_for('home')) # If the logged in user does not have authorisation to # edit the category, redirect to homepage. if login_session['user_id'] != category.user_id: flash("We are unable to process your request right now.") return redirect(url_for('home')) if request.method == 'POST': session.delete(category) session.commit() flash("Category successfully deleted!") return redirect(url_for('home')) else: return render_template("delete_category.html", category=category)
5,348,093
def ec2_add_priv_launch_key(argument_table, operation_model, session, **kwargs): """ This handler gets called after the argument table for the operation has been created. It's job is to add the ``priv-launch-key`` parameter. """ argument_table['priv-launch-key'] = LaunchKeyArgument(session, operation_model, 'priv-launch-key')
5,348,094
def get_all_users_of(fx_module: GraphModule, index: int) -> List[int]: """Given the graph(fx_module) and an index, return a list of all node indexes that use this node""" graph = fx_module.graph current_node = graph.nodes[index] user_indexes: List[int] = [] """if the node A is in node B's args, then B is the user of A go through all the nodes, if the input node in any node's args, then that node is the input node's user """ for i, n in enumerate(graph.nodes): if find_use(n.args, current_node) or find_use(n.kwargs, current_node): user_indexes.append(i) return user_indexes
5,348,095
def insert_from( table_name, into_table_name, column_names=None, join_columns=None, create_if_not_exists=False, engine=None ): """ Inserts records from one table into another :param table_name: the name of the table from which to insert records :param into_table_name: the name of the table into which the records will go :param column_names: an optional reduced list of column names to specify for insertion :param join_columns: one or more column names that constitute unique records, not to be inserted :param create_if_not_exists: if True, create into_table_name if it doesn't exist, otherwise exit with warning :param engine: an optional sqlalchemy.engine to use in the UPDATE query """ both_tables = get_tables(engine=engine) from_table = both_tables.get(table_name) into_table = both_tables.get(into_table_name) validate_table_name(from_table, table_name) if not table_exists(into_table): if not create_if_not_exists: raise ValueError(f"No table named {into_table_name} to insert into") return select_from(table_name, into_table_name, column_names, engine=engine) # Validate parameters for excluding unique records if isinstance(join_columns, str): join_columns = [c.strip() for c in join_columns.split(",")] if join_columns: validate_columns_in( from_table, join_columns, empty_table=table_name, message=f"Join columns missing in source table {table_name}" ) validate_columns_in( into_table, join_columns, empty_table=into_table_name, message=f"Join columns missing in target table {into_table_name}" ) # Prepare column names to be inserted log_message = f"insert_from: populating {into_table_name} from {table_name}" from_cols = from_table.columns into_cols = into_table.columns if isinstance(column_names, str): column_names = column_names.split(",") if column_names is None or "*" in column_names: log_message += f", with all columns in {table_name}" insert_cols = from_cols else: log_message += f", with specified columns in {table_name}" insert_cols = [c for c in from_cols if c.name in column_names] if not insert_cols: logger.warning("insert_from: no columns to insert") return elif column_names and len(column_names) > len(insert_cols): target_cols = set(c.name for c in insert_cols) ignore_cols = ", ".join(set(column_names).difference(target_cols)) logger.warning(f"insert_from: ignoring columns: {ignore_cols}") # Prepare query with specified columns and filtering if not join_columns: insert_vals = Select(insert_cols).select_from(from_table) else: log_message += f", excluding those matching: {join_columns}" # Exclude records matching specified columns via outer join insert_from = from_table.outerjoin( into_table, and_(*[from_cols[col] == into_cols[col] for col in join_columns]) ) insert_vals = ( Select(insert_cols) .select_from(insert_from) .where(and_(*[into_cols[col].is_(None) for col in join_columns])) ) logger.info(log_message) insert_from = Insert(into_table).from_select(names=[c.name for c in insert_cols], select=insert_vals) with from_table.bind.connect() as conn: conn.execute(insert_from.execution_options(autocommit=True))
5,348,096
def format(color, style=''): """Return a QTextCharFormat with the given attributes. """ _color = QColor() _color.setNamedColor(color) _format = QTextCharFormat() _format.setForeground(_color) if 'bold' in style: _format.setFontWeight(QFont.Bold) if 'italic' in style: _format.setFontItalic(True) return _format
5,348,097
def array_to_mincvolume(filename, array, like, volumeType=None, dtype=None, labels=None, write=True, close=False): """ Create a mincVolume from a data array. Create a mincVolume from a data array, using coordinate system information from another volume. Parameters ---------- filname : str A path to the new MINC volume. array : array_like Input array to convert to mincVolume. like : mincVolume or str Either an existing mincVolume object, or a path to one on disk. volumeType : str, optional MINC type. The default is None. If no value is given (default), then volumeType will be set as ushort if the dtype is a subtype of np.integer, otherwise volumeType will be set as double. dtype : np.dtype, optional Datatype for the mincVolume data array. The default is None. If no value is given (default), the dtype of array is used. labels : bool, optional Does the output mincVolume represent integer labels? The default is None. If no value is given (default), then labels will be set as True if the dtype is a subtype of np.integer, otherwise labels will be set as False. write : bool, optional Should the mincVolume be written to disk? Default is True. close : bool, optional Should the mincVolume be closed? Default is False. Returns ------- outvol : mincVolume An object of mincVolume type. """ if dtype is None: dtype = array.dtype if labels is None: if np.issubdtype(array.dtype, np.integer): labels = True else: labels = False if volumeType is None: if np.issubdtype(array.dtype, np.integer): volumeType='ushort' else: volumeType='double' if like.__class__ == mincVolume: outvol = volumeFromData(outputFilename=filename, data=array, dimnames=like.getDimensionNames(), starts=like.getStarts(), steps=like.getSeparations(), volumeType=volumeType, dtype=dtype, labels=labels, x_dir_cosines=[i for i in like._x_direction_cosines], y_dir_cosines=[i for i in like._y_direction_cosines], z_dir_cosines=[i for i in like._z_direction_cosines], ) # Set dimnames and starts outvol.starts = like.getStarts() outvol.dimnames = like.getDimensionNames() else: outvol = volumeLikeFile(likeFilename=like, outputFilename=filename, dtype=dtype, volumeType=volumeType, labels=labels) outvol.data = array # Finish if write: outvol.writeFile() if close: outvol.closeVolume() return(outvol)
5,348,098
def getRandomPipe(): """returns a randomly generated pipe""" # y of gap between upper and lower pipe gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE)) gapY += int(BASEY * 0.2) pipeHeight = IMAGES['pipe'][0].get_height() pipeX = SCREENWIDTH + 10 return [ {'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe {'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe ]
5,348,099