content
stringlengths
22
815k
id
int64
0
4.91M
def parse_regex_flags(raw_flags: str = 'gim'): """ parse flags user input and convert them to re flags. Args: raw_flags: string chars representing er flags Returns: (re flags, whether to return multiple matches) """ raw_flags = raw_flags.lstrip('-') # compatibility with original MatchRegex script. multiple_matches = 'g' in raw_flags raw_flags = raw_flags.replace('g', '') flags = re.RegexFlag(0) for c in raw_flags: if c in LETTER_TO_REGEX_FLAGS: flags |= LETTER_TO_REGEX_FLAGS[c] else: raise ValueError(f'Invalid regex flag "{c}".\n' f'Supported flags are {", ".join(LETTER_TO_REGEX_FLAGS.keys())}') return flags, multiple_matches
5,346,300
def create_parser(): """ Create argparse object for this CLI """ parser = argparse.ArgumentParser( description="Remove doubled extensions from files") parser.add_argument("filename", metavar="file", help="File to process") return parser
5,346,301
def has_answer(answers, retrieved_text, match='string', tokenized: bool = False): """Check if retrieved_text contains an answer string. If `match` is string, token matching is done between the text and answer. If `match` is regex, we search the whole text with the regex. """ if not isinstance(answers, list): answers = [answers] if match == 'string': if tokenized: text = md.detokenize(retrieved_text) t_text = retrieved_text else: text = retrieved_text t_text = spacy_tokenize(retrieved_text, uncase=True) for single_answer in answers: single_answer = spacy_tokenize(single_answer, uncase=True) for i in range(0, len(t_text) - len(single_answer) + 1): if single_answer == t_text[i: i + len(single_answer)]: return True for single_answer in answers: # If raw covered. if single_answer in text: return True elif match == 'regex': if tokenized: text = md.detokenize(retrieved_text) else: text = retrieved_text # Answer is a regex single_answer = normalize(answers[0]) if regex_match(text, single_answer): return True return False
5,346,302
def get_utm_zone(srs): """ extracts the utm_zone from an osr.SpatialReference object (srs) returns the utm_zone as an int, returns None if utm_zone not found """ if not isinstance(srs, osr.SpatialReference): raise TypeError('srs is not a osr.SpatialReference instance') if srs.IsProjected() != 1: return None projcs = srs.GetAttrValue('projcs') assert 'UTM' in projcs datum = None if 'NAD83' in projcs: datum = 'NAD83' elif 'WGS84' in projcs: datum = 'WGS84' elif 'NAD27' in projcs: datum = 'NAD27' # should be something like NAD83 / UTM zone 11N... if '/' in projcs: utm_token = projcs.split('/')[1] else: utm_token = projcs if 'UTM' not in utm_token: return None # noinspection PyBroadException try: utm_zone = int(''.join([k for k in utm_token if k in '0123456789'])) except Exception: return None if utm_zone < 0 or utm_zone > 60: return None hemisphere = projcs[-1] return datum, utm_zone, hemisphere
5,346,303
def test_create_with_manager(): """Test getting a created test user with manager""" user, user_key, manager, manager_key = helper.user.create_with_manager() assert isinstance(user, protobuf.user_state_pb2.User) assert isinstance(manager, protobuf.user_state_pb2.User) assert isinstance(user.next_id, str) assert isinstance(manager.next_id, str) assert isinstance(user.name, str) assert isinstance(manager.name, str) assert isinstance(user_key, Key) assert isinstance(manager_key, Key) assert user.manager_id == manager.next_id assert user.next_id != manager.next_id
5,346,304
def launcher(cmd): """Start a new terminal instance with defined command""" global thread_id print() # Just for clean output logging.info("Thread %s: Starting :- %s", thread_id, cmd) os.system(cmd) logging.info("Thread %s: Finished :- %s", thread_id, cmd) thread_id -= 1
5,346,305
def get_documents_meta_url(project_id: int, limit: int = 10, host: str = KONFUZIO_HOST) -> str: """ Generate URL to load meta information about the Documents in the Project. :param project_id: ID of the Project :param host: Konfuzio host :return: URL to get all the Documents details. """ return f"{host}/api/projects/{project_id}/docs/?limit={limit}"
5,346,306
def CalcUB(idx1, idx2, replace=False, reflist=None): """Calculate a UB matrix from the cell and two reflections idx1 and idx2 in reflist. Replace the current UB matrix when replace is True""" sample, inst = getSampleInst() if not sample: return rfl = sample.getRefList(reflist) if rfl is None: session.log.error('Reflection list %s not found', reflist) return r1 = rfl.get_reflection(idx1) r2 = rfl.get_reflection(idx2) if not r1 or not r2: session.log.error('Reflections not found') return newub = inst.calc_ub(sample.getCell(), r1, r2) session.log.info('New UB:\n %s', str(newub)) if replace: sample.ubmatrix = list(newub.flatten())
5,346,307
def parse_params_from_string(paramStr: str) -> dict: """ Create a dictionary representation of parameters in PBC format """ params = dict() lines = paramStr.split('\n') for line in lines: if line: name, value = parse_param_line(line) add_param(params, name, value) return params
5,346,308
def hstack(gctoos, remove_all_metadata_fields=False, error_report_file=None, fields_to_remove=[], reset_ids=False): """ Horizontally concatenate gctoos. Args: gctoos (list of gctoo objects) remove_all_metadata_fields (bool): ignore/strip all common metadata when combining gctoos error_report_file (string): path to write file containing error report indicating problems that occurred during hstack, mainly for inconsistencies in common metadata fields_to_remove (list of strings): fields to be removed from the common metadata because they don't agree across files reset_ids (bool): set to True if sample ids are not unique Return: concated (gctoo object) """ # Separate each gctoo into its component dfs row_meta_dfs = [] col_meta_dfs = [] data_dfs = [] srcs = [] for g in gctoos: row_meta_dfs.append(g.row_metadata_df) col_meta_dfs.append(g.col_metadata_df) data_dfs.append(g.data_df) srcs.append(g.src) logger.debug("shapes of row_meta_dfs: {}".format([x.shape for x in row_meta_dfs])) # Concatenate row metadata all_row_metadata_df = assemble_common_meta(row_meta_dfs, fields_to_remove, srcs, remove_all_metadata_fields, error_report_file) # Concatenate col metadata all_col_metadata_df = assemble_concatenated_meta(col_meta_dfs, remove_all_metadata_fields) # Concatenate the data_dfs all_data_df = assemble_data(data_dfs, "horiz") # Make sure df shapes are correct assert all_data_df.shape[0] == all_row_metadata_df.shape[0], "Number of rows in metadata does not match number of rows in data - all_data_df.shape[0]: {} all_row_metadata_df.shape[0]: {}".format(all_data_df.shape[0], all_row_metadata_df.shape[0]) assert all_data_df.shape[1] == all_col_metadata_df.shape[0], "Number of columns in data does not match number of columns metadata - all_data_df.shape[1]: {} all_col_metadata_df.shape[0]: {}".format(all_data_df.shape[1], all_col_metadata_df.shape[0]) # If requested, reset sample ids to be unique integers and move old sample # ids into column metadata if reset_ids: do_reset_ids(all_col_metadata_df, all_data_df, "horiz") logger.info("Build GCToo of all...") concated = GCToo.GCToo(row_metadata_df=all_row_metadata_df, col_metadata_df=all_col_metadata_df, data_df=all_data_df) return concated
5,346,309
def number_fixed_unused_variables(block): """ Method to return the number of fixed Var components which do not appear within any activated Constraint in a model. Args: block : model to be studied Returns: Number of fixed Var components which do not appear within any activated Constraints in block """ return len(fixed_unused_variables_set(block))
5,346,310
def tunnelX11( node, display=None): """Create an X11 tunnel from node:6000 to the root host display: display on root host (optional) returns: node $DISPLAY, Popen object for tunnel""" if display is None and 'DISPLAY' in environ: display = environ[ 'DISPLAY' ] if display is None: error( "Error: Cannot connect to display\n" ) return None, None host, screen = display.split( ':' ) # Unix sockets should work if not host or host == 'unix': # GDM3 doesn't put credentials in .Xauthority, # so allow root to just connect quietRun( 'xhost +si:localuser:root' ) return display, None else: # Create a tunnel for the TCP connection port = 6000 + int( float( screen ) ) connection = r'TCP\:%s\:%s' % ( host, port ) cmd = [ "socat", "TCP-LISTEN:%d,fork,reuseaddr" % port, "EXEC:'mnexec -a 1 socat STDIO %s'" % connection ] return 'localhost:' + screen, node.popen( cmd )
5,346,311
def static_docs(file_path): """Serve the 'docs' folder static files and redirect folders to index.html. :param file_path: File path inside the 'docs' folder. :return: Full HTTPResponse for the static file. """ if os.path.isdir(os.path.join(document_root, 'docs', file_path)): return redirect('/docs/%s/index.html' % file_path) return static_file(file_path, root=os.path.join(document_root, 'docs'))
5,346,312
def get_aws_account_id_file_section_dict() -> collections.OrderedDict: """~/.aws_accounts_for_set_aws_mfa から Section 情報を取得する""" # ~/.aws_accounts_for_set_aws_mfa の有無を確認し、なければ生成する prepare_aws_account_id_file() # 該当 ini ファイルのセクション dictionary を取得 return Config._sections
5,346,313
def profile(request, session_key): """download_audio.html renderer. :param request: rest API request object. :type request: Request :param session_key: string representing the session key for the user :type session_key: str :return: Just another django mambo. :rtype: HttpResponse """ # This may be different from the one provided in the URL. my_session_key = request.session.session_key last_week = datetime.date.today() - datetime.timedelta(days=7) # Get the weekly counts. last_weeks = [datetime.date.today() - datetime.timedelta(days=days) for days in [6, 13, 20, 27, 34]] dates = [] weekly_counts = [] for week in last_weeks: dates.append(week.strftime('%m/%d/%Y')) count = AnnotatedRecording.objects.filter( file__gt='', file__isnull=False, session_id=session_key, timestamp__gt=week, timestamp__lt=week + datetime.timedelta(days=7)).count() weekly_counts.append(count) recording_count = AnnotatedRecording.objects.filter( file__gt='', file__isnull=False).count() # Construct dictionaries of the user's recordings. user_recording_count = AnnotatedRecording.objects.filter( file__gt='', file__isnull=False, session_id=session_key).count() recent_recordings = AnnotatedRecording.objects.filter( file__gt='', file__isnull=False, session_id=session_key, timestamp__gt=last_week) recent_dict = defaultdict(list) [recent_dict[rec.surah_num].append((rec.ayah_num, rec.file.url)) for rec in recent_recordings] old_recordings = AnnotatedRecording.objects.filter( file__gt='', file__isnull=False, session_id=session_key, timestamp__lt=last_week) old_dict = defaultdict(list) [old_dict[rec.surah_num].append((rec.ayah_num, rec.file.url)) for rec in old_recordings] recent_lists = _sort_recitations_dict_into_lists(recent_dict) old_lists = _sort_recitations_dict_into_lists(old_dict) return render(request, 'audio/profile.html', {'session_key': my_session_key, 'recent_dict': dict(recent_dict), 'recent_lists': recent_lists, 'old_lists': old_lists, 'dates': dates[::-1], 'weekly_counts': weekly_counts[::-1], 'old_dict': dict(old_dict), 'recording_count': recording_count, 'user_recording_count': user_recording_count})
5,346,314
def genotype_vcf( args, input_vcf: str, input_sim: str, input_real: str, output_file=sys.stdout, remove_z=5.0, samples=[], ): """Write new VCF with NPSV-determined genotypes Args: args (argparse.Namespace): Command arguments input_vcf (str): Path to input VCF file input_sim (str): Path to TSV of NPSV features for simulated data input_real (str): Path to TSV of NPSV features for real data output_file ([type], optional): File object for writing VCF. Defaults to sys.stdout. """ # Load simulation and real data if args.filter_bed is not None: # If provided, filter training data by BED file filter_bed = bed.BedTool(args.filter_bed) sim_bed = bed.BedTool(input_sim) sim_data = sim_bed.intersect(filter_bed, u=True, f=0.5).to_dataframe( header=None, na_values=".", names=get_tsv_columns(input_sim), dtype={"#CHROM": str, "SAMPLE": str, "AC": int}, ) else: sim_data = pd.read_table( input_sim, na_values=".", dtype={"#CHROM": str, "SAMPLE": str, "AC": int} ) if sim_data.shape[0] == 0: # No data is available, copy input to output and exit vcf_reader = vcf.Reader(filename=input_vcf) if samples is not None: overwrite_reader_samples(vcf_reader, samples) vcf.Writer(output_file, vcf_reader) if not vcf_reader._reader.closed: vcf_reader._reader.close() return real_data = pd.read_table( input_real, na_values=".", dtype={"#CHROM": str, "SAMPLE": str} ) # Add derived features add_derived_features(sim_data) add_derived_features(real_data) # Lines to add to VCF header classifier_vcf_metadata = {} if not ({args.DEL_gt_mode, args.INS_gt_mode}).isdisjoint({"single", "hybrid"}): single_pred = np.full(real_data.shape[0], -1, dtype=int) single_prob = np.full((real_data.shape[0], 3), 0, dtype=float) # Construct classifiers for each variant type typed_sim_data = sim_data.groupby(TYPE_COL) typed_real_data = real_data.groupby(TYPE_COL) for variant_type, sim_group in typed_sim_data: if getattr(args, f"{variant_type}_gt_mode", "single") == "single": single_classifier = getattr(args, f"{variant_type}_classifier", "rf") else: single_classifier = getattr(args, f"{variant_type}_hybrid_classifier") real_group = typed_real_data.get_group(variant_type) sim_group = ( sim_group.groupby(VAR_COL + [KLASS_COL]) .sample(args.downsample) .reset_index(drop=True) ) with pd.option_context("mode.use_inf_as_na", True): # Drop any columns that are entirely NA sim_group = sim_group.dropna(axis=1, how="all") real_group = real_group.dropna(axis=1, how="all") # Filter to available features desired_features = CLASSIFIER_FEATURES[single_classifier] single_features = list( set(desired_features) & set(sim_group) & set(real_group) ) # Then drop rows with na, inf, etc. from training data sim_group = sim_group.dropna(axis=0, subset=single_features) # Expand here with additional classifiers logging.info( "Building 'single model' %s classifier for %s variants (%d observations) with features: %s", single_classifier, variant_type, sim_group.shape[0], ", ".join(single_features), ) classifier_vcf_metadata[f"npsv_{variant_type}_single_classifier"] = [ f"{single_classifier}({','.join(single_features)})" ] if single_classifier == "svm": pred, prob = svm_classify( sim_group, real_group, features=single_features, param_search=args.param_search, threads=args.threads, gamma=args.svm_gamma, C=args.svm_C, ) elif single_classifier == "rf": pred, prob = rf_classify( sim_group, real_group, features=single_features, param_search=args.param_search, threads=args.threads, n_estimators=args.rf_n_estimators, max_depth=args.rf_max_depth, ) elif single_classifier == "logistic": pred, prob = logistic_classify( sim_group, real_group, features=single_features, param_search=args.param_search, threads=args.threads, penalty=args.lr_penalty, C=args.lr_C, ) elif single_classifier == "xgboost": pred, prob = xgboost_classify( sim_group, real_group, features=single_features, param_search=args.param_search, threads=args.threads, ) # Reconstruct original vectors indices = typed_real_data.indices[variant_type] single_pred[indices] = pred if prob.shape[1] == 3: single_prob[indices] = prob # Report accuracy if actual class is defined if logging.getLogger().isEnabledFor(logging.DEBUG) and KLASS_COL in real_data: logging.debug( "Accuracy compared to reported %s in 'real' data: %f", KLASS_COL, accuracy_score(real_data[KLASS_COL], single_pred), ) bayesian_accuracy = accuracy_score( real_data[KLASS_COL], np.argmax( real_data[["PROB_HOMREF", "PROB_HET", "PROB_HOMALT"]].values, axis=1, ), ) logging.debug( "Accuracy for Bayesian model compared to reported %s in 'real' data: %f", KLASS_COL, bayesian_accuracy, ) if not ({args.DEL_gt_mode, args.INS_gt_mode}).isdisjoint({"variant", "hybrid"}): if args.filter_bed is not None: # Clunky, but for the variant model we need unfiltered training data sim_data = pd.read_table( input_sim, na_values=".", dtype={"#CHROM": str, "SAMPLE": str, "AC": int} ) add_derived_features(sim_data) # Prepare simulated data for per-variant classifier grouped_sim_data = sim_data.groupby(VAR_COL) # Filter to available features for per-variant classifier pervariant_features = {} for kind in ("DEL", "INS"): if getattr(args, f"{kind}_gt_mode") not in ("variant", "hybrid"): continue variant_classifier = getattr(args, f"{kind}_classifier") #desired_features = set(CLASSIFIER_FEATURES[variant_classifier]) - set(("SVLEN",)) desired_features = set(CLASSIFIER_FEATURES[variant_classifier]) pervariant_features[kind] = list( desired_features & set(sim_data) & set(real_data) ) logging.info( "Building 'per-variant' %s classifiers for %s variants based on simulated data with features: %s", variant_classifier, kind, ", ".join(pervariant_features[kind]), ) classifier_vcf_metadata[f"npsv_{kind}_variant_classifier"] = [ f"{variant_classifier}({','.join(pervariant_features[kind])})" ] # Prepare real data to write out per-variant predictions grouped_real_data = real_data.groupby(VAR_COL) # Write new VCF # -------------------------------------- vcf_reader = vcf.Reader(filename=input_vcf) # Original VCF file # Add new fields to the header vcf_reader.metadata.update(classifier_vcf_metadata) vcf_reader.metadata["npsv_dm2"] = [f"mahal({','.join(MAHAL_FEATURES)})"] vcf_reader.formats["GT"] = vcf.parser._Format("GT", 1, "String", "Genotype") vcf_reader.formats["DM"] = vcf.parser._Format( "DM", "G", "Float", "Mahalanobis distance for each genotype", ) vcf_reader.formats["PL"] = vcf.parser._Format( "PL", "G", "Integer", "Phred-scaled genotype likelihoods", ) vcf_reader.formats["AD"] = vcf.parser._Format( "AD", "R", "Integer", "Read depth for each allele", ) # If original VCF is sites only... if len(vcf_reader._column_headers) < 9: vcf_reader._column_headers = VCF_COLUMN_HEADERS # Set sample names overwrite_reader_samples( vcf_reader, list(set(real_data[SAMPLE_COL]) | set(samples)) ) # Write new VCF entries, building local classifiers as needed vcf_writer = vcf.Writer(output_file, vcf_reader, lineterminator="") for record in tqdm(vcf_reader, desc="Genotyping variants"): variant = Variant.from_pyvcf(record) # Write sites-only and FORMAT columns (overwriting any original or potentially invalidated values) record.INFO.pop("AC", None) record.INFO.pop("AN", None) record.FORMAT = None record.samples = [] vcf_writer.write_record(record) output_file.write(f"\t{VCF_FORMAT}") # Get prediction (we can't assume that the simulated data is in the same order as the VCF) for sample in vcf_reader.samples: group_vals = record_to_var_col(record, sample) try: real_group = grouped_real_data.get_group(group_vals) indices = grouped_real_data.indices[group_vals] if len(indices) > 1: logging.warn( "Multiple 'real' data rows for %s@%s. Skipping.", sample, variant_descriptor(record), ) output_file.write("\t.") continue except KeyError: logging.warn( "No 'real' data found for %s@%s. Skipping.", sample, variant_descriptor(record), ) output_file.write("\t.") continue assert len(indices) == 1, "Should only be only 'real data' entry" indices = indices[0] # Construct VCF call entry variant_type = record.var_subtype gt_mode = getattr(args, f"{variant_type}_gt_mode", "single") if gt_mode == "single" or (gt_mode == "hybrid" and variant.event_length >= getattr(args, f"{variant_type}_hybrid_threshold", 1000) ): call = pred_to_vcf( real_group, single_pred[indices], single_prob[indices], ad=real_group[AD_COL].to_numpy().squeeze() ) else: # Construct local classifier sim_group = grouped_sim_data.get_group(group_vals) if args.variant_downsample: sim_group = ( sim_group.groupby(VAR_COL + [KLASS_COL]) .head(args.variant_downsample) .reset_index(drop=True) ) with pd.option_context("mode.use_inf_as_na", True), warnings.catch_warnings(): warnings.simplefilter("ignore") # Drop any columns that are entirely NA sim_group = sim_group.dropna(axis=1, how="all") real_group = real_group.dropna(axis=1, how="all") # Update the available features avail_features = list( set(pervariant_features[variant_type]) & set(sim_group) & set(real_group) ) # Then drop rows with na, inf, etc. from training data sim_group = sim_group.dropna(axis=0, subset=avail_features) # Remove outliers from training data rows_before = sim_group.shape[0] sim_group = ( sim_group.groupby(KLASS_COL) .apply(filter_by_zscore, avail_features, remove_z) .reset_index(drop=True) ) # TODO: Check that all three classes are still present logging.debug( "Classifying with %d observations after removing %d outliers", sim_group.shape[0], rows_before - sim_group.shape[0], ) mahal_score = None if args.dm2: try: _, _, mahal_score = single_mahalanobis( sim_group, real_group, features=list(set(avail_features) & set(MAHAL_FEATURES)), ) except: pass try: variant_classifier = getattr(args, f"{variant_type}_classifier") if variant_classifier == "svm": pred, prob = svm_classify( sim_group, real_group, features=avail_features, gamma=args.svm_gamma, C=args.svm_C, ) elif variant_classifier == "rf": pred, prob = rf_classify( sim_group, real_group, features=avail_features, n_estimators=args.rf_n_estimators, max_depth=args.rf_max_depth, ) elif variant_classifier == "logistic": pred, prob = logistic_classify( sim_group, real_group, features=avail_features, penalty=args.lr_penalty, C=args.lr_C, ) elif variant_classifier == "xgboost": pred, prob = xgboost_classify( sim_group, real_group, features=avail_features, ) call = pred_to_vcf( real_group, pred.item(0), prob[0,], dm2=mahal_score, ad=real_group[AD_COL].to_numpy().squeeze() ) except ValueError as e: logging.error( "Genotyping error for %s: %s", variant_descriptor(record), e ) call = "./." output_file.write("\t" + call) output_file.write("\n")
5,346,315
def pipFetchLatestVersion(pkg_name: str) -> str: """ Fetches the latest version of a python package from pypi.org :param pkg_name: package to search for :return: latest version of the package or 'not found' if error was returned """ base_url = "https://pypi.org/pypi" request = f"{base_url}/{pkg_name}/json" response = requests.get(request) if response.status_code == requests.codes.ok: json = response.json() newest_version = json["info"]["version"] else: newest_version = NOT_FOUND return newest_version
5,346,316
def _GetLastAuthor(): """Returns a string with the author of the last commit.""" author = subprocess.check_output(['git', 'log', '-1', '--pretty=format:"%an"']).splitlines() return author
5,346,317
def PlotPolygons(Polygons, Map=None, Ax=None, OutlineColour='k', FillColour='w', ColourMap="None", alpha=0.5): """ Function to plot polygons from a shapely Polygon Dictionary Modified from PlottingRaster.py code by FJC Outline colour can be name, tuple or range of value to shade MDH """ #create a figure if one doesnt already exist? if Ax == None: print("PlotPolygons: Warning, no axes provided, creating new figure and axes") Fig = plt.figure() Ax = plt.gca() plt.axis('equal') plt.xlabel('Longitude ($^o$)') plt.ylabel('Latitude ($^o$)') # convert to map coordinates if Map != None: Polygons = ConvertPolygonsLatLong2MapCoords(Polygons, Map) # loop through shapes in polygons and plot patches for Key, Poly in Polygons.iteritems(): if Poly.geom_type == 'Polygon': Patch = PolygonPatch(Poly,fc=FillColour,ec=OutlineColour,alpha=alpha) Ax.add_patch(Patch) elif Poly.geom_type == 'MultiPolygon': for singlepoly in Poly: Patch = PolygonPatch(singlepoly,fc=FillColour,ec=OutlineColour,alpha=alpha) Ax.add_patch(Patch) if Ax == None: Ax.autoscale_view()
5,346,318
def mock_datasource_http_oauth2(mock_datasource): """Mock DataSource object with http oauth2 credentials""" mock_datasource.credentials = b"client_id: FOO\nclient_secret: oldisfjowe84uwosdijf" mock_datasource.location = "http://foo.com" return mock_datasource
5,346,319
def test_050_person_image_file(): """Test person_image_file method.""" # FORM is subordinate of OBJE dialect = model.Dialect.MYHERITAGE form = model.make_record(2, None, "FORM", "JPG", [], 0, dialect, None).freeze() file = model.make_record( 2, None, "FILE", "/path/to/file.jpeg", [], 0, dialect, None).freeze() obje = model.make_record(1, None, "OBJE", "", [ file, form], 0, dialect, None).freeze() person = model.make_record(0, None, "INDI", "", [ obje], 0, dialect, None).freeze() assert utils.person_image_file(person) == "/path/to/file.jpeg" # FORM is subordinate of FILE dialect = model.Dialect.MYHERITAGE form = model.make_record(3, None, "FORM", "JPG", [], 0, dialect, None).freeze() file = model.make_record( 2, None, "FILE", "/path/to/file.jpeg", [form], 0, dialect, None).freeze() obje = model.make_record(1, None, "OBJE", "", [ file], 0, dialect, None).freeze() person = model.make_record(0, None, "INDI", "", [ obje], 0, dialect, None).freeze() assert utils.person_image_file(person) == "/path/to/file.jpeg" # FORM is subordinate of OBJE dialect = model.Dialect.MYHERITAGE form = model.make_record(2, None, "FORM", "WAV", [], 0, dialect, None).freeze() file = model.make_record( 2, None, "FILE", "/path/to/file.wav", [], 0, dialect, None).freeze() obje = model.make_record(1, None, "OBJE", "", [ file, form], 0, dialect, None).freeze() person = model.make_record(0, None, "INDI", "", [ obje], 0, dialect, None).freeze() assert utils.person_image_file(person) is None # FORM is subordinate of FILE dialect = model.Dialect.MYHERITAGE form = model.make_record(3, None, "FORM", "WAV", [], 0, dialect, None).freeze() file = model.make_record( 2, None, "FILE", "/path/to/file.wav", [form], 0, dialect, None).freeze() obje = model.make_record(1, None, "OBJE", "", [ file], 0, dialect, None).freeze() person = model.make_record(0, None, "INDI", "", [ obje], 0, dialect, None).freeze() assert utils.person_image_file(person) is None # _PRIM flag is set on one of the two OBJE dialect = model.Dialect.MYHERITAGE form = model.make_record(2, None, "FORM", "JPG", [], 0, dialect, None).freeze() file = model.make_record( 2, None, "FILE", "/path/to/file.jpg", [], 0, dialect, None).freeze() obje1 = model.make_record(1, None, "OBJE", "", [ file, form], 0, dialect, None).freeze() prim_y = model.make_record( 2, None, "_PRIM", "Y", [], 0, dialect, None).freeze() form = model.make_record(2, None, "FORM", "JPG", [], 0, dialect, None).freeze() file = model.make_record( 2, None, "FILE", "/path/to/file_primary.jpg", [], 0, dialect, None).freeze() obje2 = model.make_record(1, None, "OBJE", "", [ file, form, prim_y], 0, dialect, None).freeze() person = model.make_record(0, None, "INDI", "", [ obje1, obje2], 0, dialect, None).freeze() assert utils.person_image_file(person) == "/path/to/file_primary.jpg" person = model.make_record(0, None, "INDI", "", [ obje2, obje1], 0, dialect, None).freeze() assert utils.person_image_file(person) == "/path/to/file_primary.jpg" # multiple FILEs per OBJE, choose JPG over WAV dialect = model.Dialect.MYHERITAGE form = model.make_record(3, None, "FORM", "JPG", [], 0, dialect, None).freeze() file1 = model.make_record( 2, None, "FILE", "/path/to/file.jpeg", [form], 0, dialect, None).freeze() form = model.make_record(3, None, "FORM", "WAV", [], 0, dialect, None).freeze() file2 = model.make_record( 2, None, "FILE", "/path/to/file.wav", [form], 0, dialect, None).freeze() obje = model.make_record(1, None, "OBJE", "", [ file1, file2], 0, dialect, None).freeze() person = model.make_record(0, None, "INDI", "", [ obje], 0, dialect, None).freeze() assert utils.person_image_file(person) == "/path/to/file.jpeg" obje = model.make_record(1, None, "OBJE", "", [ file2, file1], 0, dialect, None).freeze() person = model.make_record(0, None, "INDI", "", [ obje], 0, dialect, None).freeze() assert utils.person_image_file(person) == "/path/to/file.jpeg"
5,346,320
def os_environ(): """ clear os.environ, and restore it after the test runs """ # for use whenever you expect code to edit environment variables old_env = os.environ.copy() os.environ = {} yield os.environ = old_env
5,346,321
async def test_if_fires_on_transmitter_event(hass, calls, entry, lcn_connection): """Test for transmitter event triggers firing.""" address = (0, 7, False) device = get_device(hass, entry, address) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { CONF_PLATFORM: "device", CONF_DOMAIN: DOMAIN, CONF_DEVICE_ID: device.id, CONF_TYPE: "transmitter", }, "action": { "service": "test.automation", "data_template": { "test": "test_trigger_transmitter", "code": "{{ trigger.event.data.code }}", "level": "{{ trigger.event.data.level }}", "key": "{{ trigger.event.data.key }}", "action": "{{ trigger.event.data.action }}", }, }, }, ] }, ) inp = ModStatusAccessControl( LcnAddr(*address), periphery=AccessControlPeriphery.TRANSMITTER, code="aabbcc", level=0, key=0, action=KeyAction.HIT, ) await lcn_connection.async_process_input(inp) await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data == { "test": "test_trigger_transmitter", "code": "aabbcc", "level": 0, "key": 0, "action": "hit", }
5,346,322
def save_pred_vs_label_4tuple( img_rgb: np.ndarray, label_img: np.ndarray, id_to_class_name_map: Mapping[int, str], save_fpath: str ) -> None: """7-tuple consists of (1-3) rgb mask 3-sequence for label or predictions (4) color palette Args: img_rgb label_img id_to_class_name_map save_fpath """ img_h, img_w, _ = img_rgb.shape assert label_img.shape == (img_h, img_w) if min(img_h, img_w) < MIN_DISCERNABLE_RES_FOR_TEXT: save_pred_vs_label_4tuple( img_rgb=resize_img_by_short_side( img_rgb.copy(), short_side_len=MIN_DISCERNABLE_RES_FOR_TEXT, img_type="rgb" ), label_img=resize_img_by_short_side( label_img.copy(), short_side_len=MIN_DISCERNABLE_RES_FOR_TEXT, img_type="label" ), id_to_class_name_map=id_to_class_name_map, save_fpath=save_fpath.replace(".png", "_upsample.png"), ) NUM_HSTACKED_IMGS = 3 hstack_img = form_mask_triple(img_rgb, label_img, save_fpath, save_to_disk=False) save_dir = "/".join(save_fpath.split("/")[:-1]) present_color_ids = np.unique(label_img) num_present_colors = len(present_color_ids) max_colors_per_col = int(math.ceil(num_present_colors / NUM_HSTACKED_IMGS)) palette_img = form_contained_classes_color_guide( present_color_ids, id_to_class_name_map, "", "", save_to_disk=False, max_colors_per_col=max_colors_per_col ) vstack_img2 = vstack_img_with_palette(hstack_img, palette_img) save_fpath = save_fpath.replace(".png", "_pred_labels_palette.png") cv2.imwrite(save_fpath, vstack_img2[:, :, ::-1])
5,346,323
def find_certificate_name(file_name): """Search the CRT for the actual aggregator name.""" # This loop looks for the collaborator name in the key with open(file_name, 'r') as f: for line in f: if 'Subject: CN=' in line: col_name = line.split('=')[-1].strip() break return col_name
5,346,324
def _find_module(module): """Find module using imp.find_module. While imp is deprecated, it provides a Python 2/3 compatible interface for finding a module. We use the result later to load the module with imp.load_module with the '__main__' name, causing it to execute. The non-deprecated method of using importlib.util.find_spec and loader.execute_module is not supported in Python 2. The _find_module implementation uses a novel approach to bypass imp.find_module's requirement that package directories contain __init__.py/__init__.pyc markers. This lets users specify namespace packages in main modules, which are not otherwise supported by imp.find_module. """ parts = module.split(".") module_path = parts[0:-1] module_name_part = parts[-1] # See function docstring for the rationale of this algorithm. for sys_path_item in sys.path: cur_path = os.path.join(sys_path_item, *module_path) try: return imp.find_module(module_name_part, [cur_path]) except ImportError: pass raise ImportError("No module named %s" % module)
5,346,325
def update_counts(partno, quantity, batchname, message): """ Updates the given stock entry, creating it if necessary. Raises an exception if the part number is not valid or if there is a database problem """ _check_bom(partno) # validation _do_update_counts(partno, quantity, batchname, message)
5,346,326
def drop_object(): """Drop an object into the scene.""" global bodies, geom, counter, objcount body, geom = create_box(world, space, 1000, 0.2,0.2,0.2) body.setPosition( (random.gauss(0,0.1),3.0,random.gauss(0,0.1)) ) theta = random.uniform(0,2*pi) ct = cos (theta) st = sin (theta) body.setRotation([ct, 0., -st, 0., 1., 0., st, 0., ct]) bodies.append(body) geoms.append(geom) counter=0 objcount+=1
5,346,327
def float2bin(p: float, min_bits: int = 10, max_bits: int = 20, relative_error_tol=1e-02) -> List[bool]: """ Converts probability `p` into binary list `b`. Args: p: probability such that 0 < p < 1 min_bits: minimum number of bits before testing relative error. max_bits: maximum number of bits for truncation. relative_error_tol: relative error tolerance Returns: b: List[bool] Examples: Probability 0.5 becomes: >>> float2bin(0.5) # Is 0.1 [1] Moreover 0.125 is: >>> float2bin(0.125) # Is 0.001 [0, 0, 1] Some numbers get truncated. For example, probability 1/3 becomes: >>> float2bin(1/3) # Is 0.0101010101... [0, 1, 0, 1, 0, 1, 0, 1, 0] You can increase the maximum number of bits to reach float precision, for example: >>> 1/3 0.3333333333333333 >>> q = float2bin(1/3, 64) >>> bin2float(q) 0.3333333333333333 >>> 1/3 == bin2float(q) True """ assert 1 > p > 0 b = [] i = 1 original_p = 1 - p while p != 0 or i > max_bits: if i > min_bits: if isclose(1 - bin2float(b), original_p, rtol=relative_error_tol, atol=0): break if p >= 2 ** -i: b.append(True) p -= 2 ** -i else: b.append(False) i += 1 return b
5,346,328
def bin_thresh(img: np.ndarray, thresh: Number) -> np.ndarray: """ Performs binary thresholding of an image Parameters ---------- img : np.ndarray Image to filter. thresh : int Pixel values >= thresh are set to 1, else 0. Returns ------- np.ndarray : Binarized image, same shape as input """ res = img >= thresh return res
5,346,329
def add_object_align_init(context, operator): """ Return a matrix using the operator settings and view context. :arg context: The context to use. :type context: :class:`bpy.types.Context` :arg operator: The operator, checked for location and rotation properties. :type operator: :class:`bpy.types.Operator` :return: the matrix from the context and settings. :rtype: :class:`mathutils.Matrix` """ from mathutils import Matrix, Vector, Euler properties = operator.properties if operator is not None else None space_data = context.space_data if space_data and space_data.type != 'VIEW_3D': space_data = None # location if operator and properties.is_property_set("location"): location = Matrix.Translation(Vector(properties.location)) else: if space_data: # local view cursor is detected below location = Matrix.Translation(space_data.cursor_location) else: location = Matrix.Translation(context.scene.cursor_location) if operator: properties.location = location.to_translation() # rotation view_align = (context.user_preferences.edit.object_align == 'VIEW') view_align_force = False if operator: if properties.is_property_set("view_align"): view_align = view_align_force = operator.view_align else: if properties.is_property_set("rotation"): # ugh, 'view_align' callback resets value = properties.rotation[:] properties.view_align = view_align properties.rotation = value del value else: properties.view_align = view_align if operator and (properties.is_property_set("rotation") and not view_align_force): rotation = Euler(properties.rotation).to_matrix().to_4x4() else: if view_align and space_data: rotation = space_data.region_3d.view_matrix.to_3x3().inverted() rotation.resize_4x4() else: rotation = Matrix() # set the operator properties if operator: properties.rotation = rotation.to_euler() return location * rotation
5,346,330
def _main() -> None: """実行用スクリプト.""" ut.init_root_logger() config = utils.load_config_from_input_args(lambda x: Config(**x)) if config is None: _logger.error("config error.") return filepath = download(config) _logger.info(f"download path: {filepath}")
5,346,331
def load_dataset(): """ load dataset :return: dataset in numpy style """ data_location = 'data.pk' data = pickle.load(open(data_location, 'rb')) return data
5,346,332
def video_feed(): """Return camera live feed.""" return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
5,346,333
def run(flist): """ Iterate over a list of files and yields each line sequentially. Parameters ---------- flist : list of file-like objects Must handle `.close()` attribute. Yields ------ str (line-by-line) Lines from the concatenated PDB files. """ for fhandle in flist: for line in fhandle: yield line fhandle.close()
5,346,334
def allocatable_production(dataset): """Return generator of production exchanges for a dataset. Production exchanges either: * Have type ``reference product``, or * Have type ``byproduct`` and ``classification`` is ``allocatable product`` Note that all types of reference products are returned: ``allocatable product``, ``waste``, and ``recyclable``! """ for exc in dataset['exchanges']: if exc['type'] =='reference product': yield exc elif (exc['type'] == 'byproduct' and exc['byproduct classification'] == 'allocatable product'): yield exc
5,346,335
def test_capture_and_release_default_warning_handler(loguru_logger: Logger): """Ensure the module can capture and release the default warnings handler.""" assert not isinstance(warnings.showwarning, partial) assert warnings.showwarning == DEFAULT_SHOWARNING assert python_warnings.capture(loguru_logger) assert warnings.showwarning != DEFAULT_SHOWARNING assert isinstance(warnings.showwarning, partial) assert warnings.showwarning.func == python_warnings._warning_handler assert python_warnings.release() assert not isinstance(warnings.showwarning, partial) assert warnings.showwarning == DEFAULT_SHOWARNING
5,346,336
def area_in_squaremeters(geodataframe): """Calculates the area sizes of a geo dataframe in square meters. Following https://gis.stackexchange.com/a/20056/77760 I am choosing equal-area projections to receive a most accurate determination of the size of polygons in the geo dataframe. Instead of Gall-Peters, as suggested in the answer, I am using EPSG_3035 which is particularly usefull for Europe. Returns a pandas series of area sizes in square meters. """ return geodataframe.to_crs(EPSG_3035_PROJ4).area
5,346,337
def main() -> None: """Compares the Alma records to the current WorldCat holdings. Outputs the following files: - records_with_no_action_needed.csv The OCLC numbers found in both the alma_records_file and the worldcat_records_directory - records_to_set_in_worldcat.csv The OCLC numbers found in the alma_records_file but not the worldcat_records_directory - records_to_unset_in_worldcat.csv The OCLC numbers found in the worldcat_records_directory but not the alma_records_file """ start_time = datetime.now() # Initialize parser and parse command-line args parser = init_argparse() args = parser.parse_args() # Create sets from each input file alma_records_set = set() libraries.handle_file.csv_column_to_set(args.Alma_records_file, alma_records_set, 0, False) # logger.debug(f'{alma_records_set=}') logger.debug(f'{len(alma_records_set)=}\n') worldcat_records_set = set() # Check every file in directory for file in os.listdir(args.Worldcat_records_directory): if not file.endswith('.txt'): logger.debug(f'Not a text (.txt) file: {file}\n') continue logger.debug(f'Started processing file: {file}\n') libraries.handle_file.csv_column_to_set( f'{args.Worldcat_records_directory}/{file}', worldcat_records_set, 0, False) logger.debug(f'Finished processing file: {file}\n') # logger.debug(f'{worldcat_records_set=}') logger.debug(f'{len(worldcat_records_set)=}\n') # Perform set comparisons and add results to appropriate output file with open('csv/records_with_no_action_needed.csv', mode='w', newline='') as records_in_both_sets, \ open('csv/records_to_set_in_worldcat.csv', mode='w', newline='') as records_in_alma_not_worldcat, \ open('csv/records_to_unset_in_worldcat.csv', mode='w', newline='') as records_in_worldcat_not_alma: records_in_both_sets_writer = writer(records_in_both_sets) records_in_alma_not_worldcat_writer = \ writer(records_in_alma_not_worldcat) records_in_worldcat_not_alma_writer = \ writer(records_in_worldcat_not_alma) # Perform intersection of sets alma_worldcat_intersection = alma_records_set & worldcat_records_set libraries.handle_file.set_to_csv(alma_worldcat_intersection, 'records_in_both_sets', records_in_both_sets_writer, 'OCLC Number') # Perform set difference: alma_records_set - worldcat_records_set alma_not_worldcat = alma_records_set - worldcat_records_set libraries.handle_file.set_to_csv(alma_not_worldcat, 'records_in_alma_not_worldcat', records_in_alma_not_worldcat_writer, 'OCLC Number') # Perform set difference: worldcat_records_set - alma_records_set worldcat_not_alma = worldcat_records_set - alma_records_set libraries.handle_file.set_to_csv(worldcat_not_alma, 'records_in_worldcat_not_alma', records_in_worldcat_not_alma_writer, 'OCLC Number') print(f'End of script. Completed in: {datetime.now() - start_time} ' \ f'(hours:minutes:seconds.microseconds)')
5,346,338
def add_eges_grayscale(image): """ Edge detect. Keep original image grayscale value where no edge. """ greyscale = rgb2gray(image) laplacian = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]]) edges = scipy.ndimage.filters.correlate(greyscale, laplacian) for index,value in np.ndenumerate(edges): edges[index] = 255-greyscale[index] if value == 0 else 0 return edges
5,346,339
def generateFromSitePaymentObject(signature: str, account_data: dict, data: dict)->dict: """[summary] Creates object for from site chargment request Args: signature (str): signature hash string account_data (dict): merchant_account: str merchant_domain: str data (dict): order + personal data to create charge orderReference (str): timestamp amount (float): order total amount currency (str): 'USD', 'UAH', 'RUB' card (str): user card number expMonth (str): card expires month expYear (str): card expires year cardCvv (str): card cvv cardHolder (str): full name of card holder "Test test" productName (list[str]): product names list productPrice (list[float]): product price list productCount (list[int]): product count list clientFirstName (str): client first name clientLastName (str): client last name clientCountry (str): client country clientEmail (str): client email clientPhone (str): client phone Returns: dict: [description] """ return { "transactionType":"CHARGE", 'merchantAccount': account_data['merchant_account'], "merchantAuthType":"SimpleSignature", 'merchantDomainName': account_data['merchant_domain'], "merchantTransactionType":"AUTH", "merchantTransactionSecureType": "NON3DS", 'merchantSignature': signature, "apiVersion":1, 'orderReference': str(data['orderReference']), 'orderDate': str(data['orderReference']), "amount":data["amount"], 'currency': data['currency'], "card":data['card'], "expMonth":data['expMonth'], "expYear":data['expYear'], "cardCvv":data['cardCvv'], "cardHolder":data['cardHolder'], 'productName': list(map(str, data['productName'])), 'productPrice': list(map(float, data['productPrice'])), 'productCount': list(map(int, data['productCount'])), "clientFirstName":data['clientFirstName'], "clientLastName":data['clientLastName'], "clientCountry":data['clientCountry'], "clientEmail":data['clientEmail'], "clientPhone":data['clientPhone'], }
5,346,340
def deal_weights(node, data=None): """ deal the weights of the custom layer """ layer_type = node.layer_type weights_func = custom_layers[layer_type]['weights'] name = node.layer_name return weights_func(name, data)
5,346,341
def label_brand_generic(df): """ Correct the formatting of the brand and generic drug names """ df = df.reset_index(drop=True) df = df.drop(['drug_brand_name', 'drug_generic_name'], axis=1) df['generic_compare'] = df['generic_name'].str.replace('-', ' ') df['generic_compare'] = df['generic_compare'].str.replace('with ', '') df['generic_compare'] = df['generic_compare'].str.replace('/', ' ') df['brand_compare'] = df['brand_name'].str.replace('-', ' ') df['brand_compare'] = df['brand_compare'].str.replace('with ', '') df['brand_compare'] = df['brand_compare'].str.replace('/', ' ') df_na = df.fillna(0) #df.dropna().sort_values(by='generic_name') risk_class_list = [] # Find contingency table for each generic # format [[brand_ad_ev, brand_bene], [generic_ad_ev, generic_bene]] for i, val in enumerate(df_na['generic_compare']): if ((df_na.iloc[i]['brand_compare'] == val) | (df_na.iloc[i]['brand_compare'] in val) | (val in df_na.iloc[i]['brand_compare'])): # GENERIC NEG = -1 risk_class_list.append(-1) else: # BRAND POS = 1 risk_class_list.append(1) risk_series = pd.Series(risk_class_list).replace(np.inf, np.nan) risk_series = risk_series.replace(-np.inf, np.nan) df_na['risk_class'] = risk_series df['risk_class'] = risk_series # Drop columns that are redunant from name matching df_na = df_na.drop(['generic_compare', 'brand_compare'], axis = 1) df = df.drop(['generic_compare', 'brand_compare'], axis = 1) df_class_generic_count = pd.pivot_table(df, index = ['generic_name'], values = ['risk_class'], aggfunc = 'count') df_class_generic_count.rename(columns={'risk_class' : 'risk_count'}, inplace=True) df = df.merge(df_class_generic_count, right_index=True, left_on = 'generic_name', how='inner') return df
5,346,342
def RMSRE( image_true: np.ndarray, image_test: np.ndarray, mask: np.ndarray = None, epsilon: float = 1e-9, ) -> float: """Root mean squared relative error (RMSRE) between two images within the specified mask. If not mask is specified the entire image is used. Parameters ---------- image_true : np.ndarray ground truth image. image_test : np.ndarray predicted image. mask : np.ndarray, optional mask to compute the RMSRE in, by default None epsilon : float, optional epsilon used to stabilize the calculation of the relative error, by default 1e-9 Returns ------- float RMSRE value between the images within the specified mask. """ if mask is None: mask = np.ones_like(image_true) mask_flat = mask.reshape(-1).astype(bool) # flatten relativeErrorImageFlat = ( image_test.reshape(-1)[mask_flat] - image_true.reshape(-1)[mask_flat] ) / (image_true.reshape(-1)[mask_flat] + epsilon) return np.sqrt( np.mean(relativeErrorImageFlat) ** 2 + np.std(relativeErrorImageFlat) ** 2 )
5,346,343
def test_ksic10_to_isic4_concordance(code: str, expected: str): """Test KSIC10 to ISIC4 sample concordances.""" assert KSIC10_to_ISIC4.concordant(code) == expected
5,346,344
def getImage(imageData, flag): """ Returns the PIL image object from imageData based on the flag. """ image = None try: if flag == ENHANCED: image = PIL.Image.open(imageData.enhancedImage.file) elif flag == UNENHANCED: image = PIL.Image.open(imageData.unenhancedImage.file) elif flag == DISPLAY: image = PIL.Image.open(imageData.image.file) except: logging.error("image cannot be read from the image data") return None return image
5,346,345
def save_record(record_type, record_source, info, indicator, date=None): """ A convenience function that calls 'create_record' and also saves the resulting record. :param record_type: The record type, which should be a value from the RecordType enumeration :param record_source: The source for the record, which should be a value from the RecordSource enumeration :param info: The actual data to be stored in the record :param date: The date to use with this record, or None to use the current date :return: The new IndicatorRecord instance """ record = create_record(record_type, record_source, info, indicator, date) record.save() logger.info("%s (%s) record from %s saved successfully", record_type.name, record_type.title, record_source.title) return record
5,346,346
def test_adding_existing_data_is_idempotent(learner_type, f, learner_kwargs): """Adding already existing data is an idempotent operation. Either it is idempotent, or it is an error. This is the only sane behaviour. """ f = generate_random_parametrization(f) learner = learner_type(f, **learner_kwargs) control = learner_type(f, **learner_kwargs) if learner_type is Learner1D: learner._recompute_losses_factor = 1 control._recompute_losses_factor = 1 N = random.randint(10, 30) control.ask(N) xs, _ = learner.ask(N) points = [(x, learner.function(x)) for x in xs] for p in points: control.tell(*p) learner.tell(*p) random.shuffle(points) for p in points: learner.tell(*p) M = random.randint(10, 30) pls = zip(*learner.ask(M)) cpls = zip(*control.ask(M)) if learner_type is SequenceLearner: # The SequenceLearner's points might not be hasable points, values = zip(*pls) indices, points = zip(*points) cpoints, cvalues = zip(*cpls) cindices, cpoints = zip(*cpoints) assert (np.array(points) == np.array(cpoints)).all() assert values == cvalues assert indices == cindices else: # Point ordering is not defined, so compare as sets assert set(pls) == set(cpls)
5,346,347
def update_product_price(pid: str, new_price: int): """ Update product's price Args: pid (str): product id new_price (int): new price Returns: dict: status(success, error) """ playload = {'status': ''} try: connection = create_connection() with connection: with connection.cursor() as cursor: sql = "UPDATE `product` SET `PRICE` = %s WHERE `PID` = %s" cursor.execute(sql, (new_price, pid)) connection.commit() playload['status'] = 'success' return playload except: playload['status'] = 'error' return playload
5,346,348
def select_n_products(lst, n): """Select the top N products (by number of reviews) args: lst: a list of lists that are (key,value) pairs for (ASIN, N-reviews) sorted on the number of reviews in reverse order n: a list of three numbers, returns: a list of lists with N products """ top_products = [] first_third = lst[100:100 + n[0] + 1] second_third = lst[1000:1000 + n[1] + 1] third_third = lst[50000:50000 + n[2] + 1] top_products.extend(first_third) top_products.extend(second_third) top_products.extend(third_third) n_reviews = sum([x[1] for x in top_products]) print "The number of products is: {} and the number of reviews is: {}".format( sum(n), n_reviews) return(top_products)
5,346,349
def load_vanHateren(params): """ Load van Hateren data and format as a Dataset object Inputs: params [obj] containing attributes: data_dir [str] directory to van Hateren data rand_state (optional) [obj] numpy random state object num_images (optional) [int] how many images to extract. Default (None) is all images. image_edge_size (optional) [int] how many pixels on an edge. Default (None) is full-size. """ # Parse params assert hasattr(params, "data_dir"), ("function input must have 'data_dir' kwarg") data_dir = params.data_dir if hasattr(params, "rand_state"): rand_state = params.rand_state else: #assert hasattr(params, "rand_seed"), ("Params must specify a random state or seed") if hasattr(params, "rand_seed"): rand_state = np.random.RandomState(params.rand_seed) else: rand_state = np.random.RandomState(None) print("WARNING: Params did not specify a random state or seed") num_images = int(params.num_images) if hasattr(params, "num_images") else None image_edge_size = int(params.image_edge_size) if hasattr(params, "image_edge_size") else None # Get data img_filename = data_dir+"/img/images_curated.h5" # pre-curated dataset vh_data = vanHateren(img_filename, num_images, rand_state) image_dataset = Dataset(vh_data.images, lbls=None, ignore_lbls=None, rand_state=rand_state) # Resize data if image_edge_size is not None: edge_scale = image_edge_size/image_dataset.shape[1] #vh has square images assert edge_scale <= 1.0, ( "image_edge_size (%g) must be less than or equal to the original size (%g)."%(image_edge_size, image_dataset.shape[1])) scale_factor = [1.0, edge_scale, edge_scale, 1.0] # batch & channel don't get downsampled image_dataset.downsample(scale_factor, order=3) return {"train":image_dataset}
5,346,350
def build_parser() -> argparse.ArgumentParser: """Builds and returns the CLI parser.""" # Help parser help_parser = argparse.ArgumentParser(add_help=False) group = help_parser.add_argument_group('Help and debug') group.add_argument('--debug', help='Enable debug output.', action='store_true') group.add_argument('-h', '--help', help='Show this help message and exit.', action='help') # IO parser io_parser = argparse.ArgumentParser(add_help=False) group = io_parser.add_argument_group('Input/Output') group.add_argument('-i', '--input', help='Input document.', required=True) group.add_argument('-o', '--output', help='Output file path.') # Main parser main_parser = argparse.ArgumentParser(prog=EXE_NAME, description='Detects design patterns in class diagrams.', parents=[help_parser], add_help=False) subparsers = main_parser.add_subparsers(title='Subcommands') # 'patterns' subcommand description = 'List detected design patterns.' patterns_parser = subparsers.add_parser('patterns', description=description, help=description, parents=[help_parser, io_parser], add_help=False) patterns_parser.add_argument('-p', '--pattern', choices=ALL_PATTERNS, nargs='*', help='Patterns to match.') patterns_parser.set_defaults(func=patterns_sub) # 'cycles' subcommand description = 'List detected dependency cycles.' cycles_parser = subparsers.add_parser('cycles', description=description, help=description, parents=[help_parser, io_parser], add_help=False) cycles_parser.set_defaults(func=cycles_sub) # 'metrics' subcommand description = 'Print metrics computed from the class diagram.' metrics_parser = subparsers.add_parser('metrics', description=description, help=description, parents=[help_parser, io_parser], add_help=False) metrics_parser.add_argument('-c', '--config', help='Configuration file.') metrics_parser.set_defaults(func=metrics_sub) return main_parser
5,346,351
def parse_args(): """Parse command-line args. """ parser = argparse.ArgumentParser(description = 'Upload (JSON-encoded) conformance resources from FHIR IGPack tar archive.', add_help = False) parser.add_argument('-h', '--help', action = 'store_true', help = 'show this help message and exit') parser.add_argument('-i', '--igpack', help = 'IGPack filename (e.g. us-core-v3.1.1-package.tgz)') parser.add_argument('-t', '--target', help = 'FHIR API base URL for target server (e.g. http://localhost:8080/r4)') args = parser.parse_args() usage = False error = False if getattr(args, 'help'): usage = True else: for arg in vars(args): if getattr(args, arg) == None: print('Error - missing required argument: --{}'.format(arg), file=sys.stderr, flush=True) error = True if usage or error: parser.print_help() print() print('Additionally, if the ACCESS_TOKEN environment variable is defined,') print('its value will be used as an OAuth bearer token for the FHIR API.', flush=True) if error: raise RuntimeError('Command-line argument error.') return args
5,346,352
def wrap_to_pi(inp, mask=None): """Wraps to [-pi, pi)""" if mask is None: mask = torch.ones(1, inp.size(1)) if mask.dim() == 1: mask = mask.unsqueeze(0) mask = mask.to(dtype=inp.dtype) val = torch.fmod((inp + pi) * mask, 2 * pi) neg_mask = (val * mask) < 0 val = val + 2 * pi * neg_mask.to(val.dtype) val = (val - pi) inp = (1 - mask) * inp + mask * val return inp
5,346,353
def if_pandas(func): """Test decorator that skips test if pandas not installed.""" @wraps(func) def run_test(*args, **kwargs): try: import pandas except ImportError: pytest.skip('Pandas not available.') else: return func(*args, **kwargs) return run_test
5,346,354
def handle_front_pots(pots, next_pots): """Handle front, additional pots in pots.""" if next_pots[2] == PLANT: first_pot = pots[0][1] pots = [ [next_pots[2], first_pot - 1]] + pots return pots, next_pots[2:] return pots, next_pots[3:]
5,346,355
def run_example_activity_tests(): """Parses and validates example activity file.""" fname = os.path.join( os.path.dirname(__file__), '../assets/js/activity-examples.js') if not os.path.exists(fname): raise Exception('Missing file: %s', fname) verifier = Verifier() verifier.echo_func = echo activity = evaluate_javascript_expression_from_file( fname, 'activity', Activity().scope, verifier.echo_func) verifier.verify_activity_instance(activity, fname)
5,346,356
def environment(envdata): """ Class decorator that allows to run tests in sandbox against different Qubell environments. Each test method in suite is converted to <test_name>_on_environemnt_<environment_name> :param params: dict """ #assert isinstance(params, dict), "@environment decorator should take 'dict' with environments" def copy(func, name=None): return types.FunctionType(func.func_code, func.func_globals, name=name, argdefs=func.func_defaults, closure=func.func_closure) def wraps_class(clazz): if "environments" in clazz.__dict__: log.warn("Class {0} environment attribute is overridden".format(clazz.__name__)) params = format_as_api(envdata) clazz.environments = params methods = [method for _, method in clazz.__dict__.items() if isinstance(method, types.FunctionType) and method.func_name.startswith("test")] for env in params: if env['name'] != DEFAULT_ENV_NAME(): env['name'] += '_for_%s' % clazz.__name__ # Each test class should have it's own set of envs. for method in methods: delattr(clazz, method.func_name) log.info("Test '{0}' multiplied per environment in {1}".format(method.func_name, clazz.__name__)) for env in params: new_name = method.func_name + "_on_environment_" + env['name'] setattr(clazz, new_name, copy(method, new_name)) return clazz return wraps_class
5,346,357
def convert_vcf(i_filename, sample_info, o_prefix, skip_haploid): """Reads a VCF and creates a TPED/TFAM from it. :param i_filename: the name of the VCF file (might be gzip). :param sample_info: information about the samples. :param o_prefix: the prefix of the output files. :param skip_haploid: whether to check haploid genotypes or not. :type i_filename: string :type sample_info: pandas.DataFrame :type o_prefix: string :type skip_haploid: bool """ # Some regular expression single_point_re = re.compile("^\.$") allele_split_re = re.compile("[/|]") # The open function to use open_f = open if re.search("\.gz$", i_filename): open_f = gzip.open # Reading the file with open_f(i_filename, 'rb') as i_file: line = i_file.readline().decode() # We read until the header while re.search("^##", line) is not None: line = i_file.readline().decode() # This should be the header if re.search("^##CHROM", line) is not None: msg = "{}: no header".format(i_filename) raise ProgramError(msg) # Creating the header row = line.rstrip("\r\n").split("\t") header = {name: i for i, name in enumerate(row)} # Checking some names for name in ["#CHROM", "POS", "ID", "REF", "ALT", "FORMAT"]: if name not in header: msg = "{}: no column named {}".format(i_filename, name) raise ProgramError(msg) # Printing the TFAMs tfam_names = ["{}.snv.2_alleles.tfam".format(o_prefix), "{}.snv.n_alleles.tfam".format(o_prefix), "{}.indel.2_alleles.tfam".format(o_prefix), "{}.indel.n_alleles.tfam".format(o_prefix)] sample_info = print_same_tfams(tfam_names, sample_info, row[header["FORMAT"]+1:]) # Those positions have been seen seen_pos = defaultdict(int) # The output files tped_snv_2 = None tped_snv_n = None tped_indel_2 = None tped_indel_n = None snv_ref = None try: tped_snv_2 = open("{}.snv.2_alleles.tped".format(o_prefix), 'w') tped_snv_n = open("{}.snv.n_alleles.tped".format(o_prefix), 'w') tped_indel_2 = open( "{}.indel.2_alleles.tped".format(o_prefix), "w", ) tped_indel_n = open( "{}.indel.n_alleles.tped".format(o_prefix), "w", ) snv_ref = open("{}.snv.ref".format(o_prefix), "w") indel_ref = open("{}.indel.ref".format(o_prefix), "w") except IOError: msg = "couldn't write output files" raise ProgramError(msg) # Reading the rest of the data for line in i_file: row = line.decode().rstrip("\r\n").split("\t") # Getting the information chrom = encode_chr(row[header["#CHROM"]]) pos = row[header["POS"]] name = row[header["ID"]] alleles = [row[header["REF"]]] + row[header["ALT"]].split(",") g_format = row[header["FORMAT"]].split(":") g_format = {name: i for i, name in enumerate(g_format)} genotypes = [ "." if single_point_re.match(i) else i.split(":")[g_format["GT"]] for i in row[header["FORMAT"]+1:] ] # Getting rid of the "." (so that it becomes "./.") genotypes = [single_point_re.sub("./.", i) for i in genotypes] # Is this an INDEL? indel = False for allele in alleles: if len(allele) > 1: indel = True break # The output file to choose from (default is SNV and bi-allelic) o_file = tped_snv_2 o_ref_file = snv_ref if len(alleles) > 2: o_file = tped_snv_n # Constructing the genotype map g_map = {str(i): a for i, a in enumerate(alleles)} if indel: # This is a new genotype map, only for INDELs g_map = {str(i): str(i+1) for i in range(len(alleles))} # These are the required files o_ref_file = indel_ref o_file = tped_indel_2 if len(alleles) > 2: o_file = tped_indel_n # Adding the unknown genotype g_map["."] = "0" # Checking if the position have a name if name == ".": name = "{}:{}".format(chrom, pos) # We increase the number of time we saw this name, and check if we # saw it more than once seen_pos[name] += 1 if seen_pos[name] > 1: name = "{}-{}".format(name, seen_pos[name]) # The first part of the TPED line first_part = [chrom, name, "0", pos] genotypes = [allele_split_re.split(i) for i in genotypes] genotypes = [ recode_genotype(g, g_map, chrom, pos, sample_info.iloc[i, 0], sample_info.iloc[i, 4], skip_haploid) for i, g in enumerate(genotypes) ] print("\t".join(first_part + genotypes), file=o_file) # Saving the alleles print("\t".join([chrom, pos, name, alleles[0], ",".join(alleles[1:])]), file=o_ref_file) # Closing the output files tped_snv_2.close() tped_snv_n.close() tped_indel_2.close() tped_indel_n.close() snv_ref.close() indel_ref.close()
5,346,358
def get_domain_name(url): """ Returns the domain name from a URL """ parsed_uri = urlparse(url) return parsed_uri.netloc
5,346,359
def get_answer_str(answers: list, scale: str): """ :param ans_type: span, multi-span, arithmetic, count :param ans_list: :param scale: "", thousand, million, billion, percent :param mode: :return: """ sorted_ans = sorted(answers) ans_temp = [] for ans in sorted_ans: ans_str = str(ans) if is_number(ans_str): ans_num = to_number(ans_str) if ans_num is None: if scale: ans_str = ans_str + " " + str(scale) else: if '%' in ans_str: # has been handled the answer itself is a percentage ans_str = '%.4f' % ans_num else: ans_str = '%.4f' % (round(ans_num, 2) * scale_to_num(scale)) else: if scale: ans_str = ans_str + " " + str(scale) ans_temp.append(ans_str) return [" ".join(ans_temp)]
5,346,360
def user_0post(users): """ Fixture that returns a test user with 0 posts. """ return users['user2']
5,346,361
def initialize(): """ Initialize some parameters, such as API key """ api_key = os.environ.get("api_key") # None when not exist if api_key and len(api_key) == 64: # length of a key should be 64 return api_key print("Please set a valid api_key in the environment variables.") exit()
5,346,362
def plot_tuning_curve_evo(data, epochs=None, ax=None, cmap='inferno_r', linewidth=0.3, ylim='auto', include_true=True, xlabel='Bandwidths', ylabel='Average Firing Rate'): """ Plot evolution of TC averaged over noise (zs). .. WARNING:: It is not used for a long time. .. TODO:: Make `plot_tuning_curve_evo` accept `.GANRecords`. Parameters ---------- data : `.GANData` """ if ax is None: _, ax = pyplot.subplots() if epochs is None: epochs = len(data.tuning) elif isinstance(epochs, int): epochs = range(10) cmap = matplotlib.cm.get_cmap(cmap) norm = matplotlib.colors.Normalize(min(epochs), max(epochs)) mappable = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap) mappable.set_array([]) fig = ax.get_figure() cb = fig.colorbar(mappable, ax=ax) cb.set_label('epochs') bandwidths = data.bandwidths for i in epochs: ax.plot(bandwidths, data.model_tuning[i], color=cmap(norm(i)), linewidth=linewidth) if include_true: ax.plot(bandwidths, data.true_tuning[0], linewidth=3, linestyle='--') if ylim == 'auto': y = data.model_tuning[epochs] q3 = np.percentile(y, 75) q1 = np.percentile(y, 25) iqr = q3 - q1 yamp = y[y < q3 + 1.5 * iqr].max() ax.set_ylim(- yamp * 0.05, yamp * 1.2) elif ylim: ax.set_ylim(ylim) if xlabel: ax.set_xlabel(xlabel) if ylabel: ax.set_ylabel(ylabel) return ax
5,346,363
def transportinfo_decoder(obj): """Decode programme object from json.""" transportinfo = json.loads(obj) if "__type__" in transportinfo and transportinfo["__type__"] == "__transportinfo__": return TransportInfo(**transportinfo["attributes"]) return transportinfo
5,346,364
def group_events_data(events): """ Group events according to the date. """ # e.timestamp is a datetime.datetime in UTC # change from UTC timezone to current seahub timezone def utc_to_local(dt): tz = timezone.get_default_timezone() utc = dt.replace(tzinfo=timezone.utc) local = timezone.make_naive(utc, tz) return local event_groups = [] for e in events: e.time = utc_to_local(e.timestamp) e.date = e.time.strftime("%Y-%m-%d") if e.etype == 'repo-update': e.author = e.commit.creator_name elif e.etype == 'repo-create': e.author = e.creator else: e.author = e.repo_owner if len(event_groups) == 0 or \ len(event_groups) > 0 and e.date != event_groups[-1]['date']: event_group = {} event_group['date'] = e.date event_group['events'] = [e] event_groups.append(event_group) else: event_groups[-1]['events'].append(e) return event_groups
5,346,365
def onesetup(places, numtwts, numtest, balance): """ This setup considers the tweets from the places in the list and select some number of tweets from those places as testing tweets, the query is just one tweet @arg city the place_id of the city @arg num the number of tweets generated @return a list() of tuple (text, cadidates) """ lsts = linestyles() # prepare for data twtmodel = dict() webmodel = dict() twttest = Dataset() for pid in places: twtp = loadrows(GEOTWEET, ('place_id', 'text'), ('place_id=\'{0}\''.format(pid),), 'sample', 'order by rand() limit {0}'.format(max(numtwts) + numtest)) webmodel[pid] = loadrows(GEOTWEET, ('place_id', 'web'), ('place_id=\'{0}\''.format(pid),), 'web', 'order by rand() limit 30')['web'] twtmodel[pid] = twtp['text'][:max(numtwts)] for idx in range(max(numtwts) + 1, twtp.size()): twttest.append(twtp.item(idx)) # ranking by twt and twt+web for numtwt in numtwts: lmtwt = dict() lmweb = dict() for pid in twtmodel.iterkeys(): lmtwt[pid] = lmfromtext(twtmodel[pid][:numtwt]) lmweb[pid] = lmfromtext(webmodel[pid]) jointranks = list() for item in twttest: jointranks.append(joint_ranking(lmfromtext([item['text'],]), lmtwt, lmweb, balance)) twtranks = list() for item in twttest: twtranks.append(kl_ranking(lmtwt, lmfromtext([item['text'],]))) gjoint = batcheval(twttest['place_id'], len(places), jointranks) gtwt = batcheval(twttest['place_id'], len(places), twtranks) plt.plot(gjoint['pos'], gjoint['rate'], marker='^', label='JOINT($t={0}$)'.format(numtwt), linestyle=lsts.next()) plt.plot(gtwt['pos'], gtwt['rate'], marker='o', label='TWEET($t={0}$)'.format(numtwt), linestyle=lsts.next()) webranks = list() for item in twttest: webranks.append(kl_ranking(lmweb, lmfromtext([item['text'],]))) gweb = batcheval(twttest['place_id'], len(places), webranks) plt.plot(gweb['pos'], gweb['rate'], label='WEB', linestyle='dotted') plt.plot(lmeval['pos'], [float(r) / max(lmeval['pos']) for r in lmeval['pos']], ls='-.', marker='s', label='Random Baseline') plt.xlabel('First $n$ Places') plt.ylabel('Probability') plt.legend(loc='lower right') plt.show()
5,346,366
def create_matrix(PBC=None): """ Used for calculating distances in lattices with periodic boundary conditions. When multiplied with a set of points, generates additional points in cells adjacent to and diagonal to the original cell Args: PBC: an axis which does not have periodic boundary condition. Ex: PBC=1 cancels periodic boundary conditions along the x axis Returns: A numpy array of matrices which can be multiplied by a set of coordinates """ matrix = [] i_list = [-1, 0, 1] j_list = [-1, 0, 1] k_list = [-1, 0, 1] if PBC == 1: i_list = [0] elif PBC == 2: j_list = [0] elif PBC == 3: k_list = [0] for i in i_list: for j in j_list: for k in k_list: matrix.append([i,j,k]) return np.array(matrix, dtype=float)
5,346,367
def get_additive_seasonality_linear_trend() -> pd.Series: """Get example data for additive seasonality tutorial""" dates = pd.date_range(start="2017-06-01", end="2021-06-01", freq="MS") T = len(dates) base_trend = 2 state = np.random.get_state() np.random.seed(13) observations = base_trend * np.arange(T) + np.random.normal(loc=4, size=T) np.random.set_state(state) seasonality = 12 time = np.arange(0, T / seasonality, 1 / seasonality) amplitude = 10 sin_cos_wave = amplitude * np.cos(2 * np.pi * time) + amplitude * np.sin( 2 * np.pi * time ) observations += sin_cos_wave output = pd.Series(observations, index=dates) return output
5,346,368
async def test_creating_entry_tries_discover(hass): """Test setting up does discovery.""" with MOCK_PYHS100, patch( "homeassistant.components.tplink.async_setup_entry", return_value=mock_coro(True), ) as mock_setup, patch( "pyHS100.Discover.discover", return_value={"host": 1234} ): result = await hass.config_entries.flow.async_init( tplink.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure( result["flow_id"], {} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1
5,346,369
def is_client_trafic_trace(conf_list, text): """Determine if text is client trafic that should be included.""" for index in range(len(conf_list)): if text.find(conf_list[index].ident_text) != -1: return True return False
5,346,370
def convert_range(g, op, block): """Operator converter for range.""" start = g.get_node(op.input("Start")[0]) stop = g.get_node(op.input("End")[0]) step = g.get_node(op.input("Step")[0]) dtype = infer_type(start).checked_type.dtype params = [] for param in (start, stop, step): param = _infer_value(param, g.get_params()) if isinstance(param, list): param = param[0] if isinstance(param, _expr.Expr): param = _op.squeeze(param) else: param = _op.const(param, dtype=dtype) params.append(param) out = _op.transform.arange(params[0], params[1], params[2], dtype=dtype) g.add_node(op.output("Out")[0], out)
5,346,371
def isinf(x): """ For an ``mpf`` *x*, determines whether *x* is infinite:: >>> from sympy.mpmath import * >>> isinf(inf), isinf(-inf), isinf(3) (True, True, False) """ if not isinstance(x, mpf): return False return x._mpf_ in (finf, fninf)
5,346,372
def do_run_comparison(source, config_input, suppress_warnings_for=[]): """ Run rsmcompre experiment automatically. Use the given experiment configuration file located in the given source directory. Parameters ---------- source : str Path to where the test experiment is located on disk. config_input : str or Configuration or dict Path to the experiment configuration file, or a ``configuration_parser.Configuration`` object, or a dictionary with keys corresponding to fields in the configuration file suppress_warnings_for : list, optional Categories for which warnings should be suppressed when running the experiments. Note that ``RuntimeWarning``s are always suppressed. Defaults to ``[]``. """ source_output_dir = 'test_outputs' experiment_dir = join(source_output_dir, source) with warnings.catch_warnings(): # always suppress runtime warnings warnings.filterwarnings('ignore', category=RuntimeWarning) # suppress additional warning types if specified for warning_type in suppress_warnings_for: warnings.filterwarnings('ignore', category=warning_type) run_comparison(config_input, experiment_dir)
5,346,373
def formalize_switches(switches): """ Create all entries for the switches in the topology.json """ switches_formal=dict() for s, switch in enumerate(switches): switches_formal["s_"+switch]=formalize_switch(switch, s) return switches_formal
5,346,374
def check_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim): """Checks that X has coordinates named as specified by x_lat_dim, x_lon_dim, x_sample_dim, and x_feature_dim""" assert x_lat_dim in X.coords.keys(), 'XCast requires a dataset_lat_dim to be a coordinate on X' assert x_lon_dim in X.coords.keys(), 'XCast requires a dataset_lon_dim to be a coordinate on X' assert x_sample_dim in X.coords.keys(), 'XCast requires a dataset_sample_dim to be a coordinate on X' assert x_feature_dim in X.coords.keys(), 'XCast requires a dataset_feature_dim to be a coordinate on X'
5,346,375
def arp_scores(run): """ This function computes the Average Retrieval Performance (ARP) scores according to the following paper: Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff. How to Measure the Reproducibility of System-oriented IR Experiments. Proceedings of SIGIR, pages 349-358, 2020. The ARP score is defined by the mean across the different topic scores of a run. For all measures outputted by trec_eval, the ARP scores will be determined. @param run: The run to be evaluated. @return: Dictionary containing the ARP scores for every measure outputted by trec_eval. """ return dict(_arp_scores(run))
5,346,376
def init_testlink(): """Test link initialization""" if not TLINK.enabled: return # connect to test link TLINK.rpc = testlink.TestlinkAPIClient(server_url=TLINK.conf['xmlrpc_url'], devKey=TLINK.conf['api_key']) # assert test project exists _test_project = TLINK.rpc.getTestProjectByName(TLINK.conf['project']) if not _test_project: TLINK.disable_or_exit( 'Invalid tl_project name. Unable to find project') return # type convert from list for older testlink instances _test_project = _test_project[0] \ if isinstance(_test_project, list) else _test_project # get project id and prefix TLINK.project_id = _test_project['id'] TLINK.project_prefix = _test_project['prefix'] # create test plan if required plan_name = [tp for tp in TLINK.rpc.getProjectTestPlans(TLINK.project_id) if tp['name'] == TLINK.conf['test_plan']] if not plan_name: # pylint: disable=E1121 TLINK.rpc.createTestPlan(TLINK.conf['test_plan'], TLINK.conf['project']) plan_name = [tp for tp in TLINK.rpc.getProjectTestPlans(TLINK.project_id) if tp['name'] == TLINK.conf['test_plan']] TLINK.test_plan_id = plan_name[0]['id'] TLINK.test_build_id = None TLINK.test_platform = None TLINK.tc_pattern = re.compile(r'%s\d+' % TLINK.conf['pytest_tc_prefix'], re.I)
5,346,377
def bayesian_proportion_test( x:Tuple[int,int], n:Tuple[int,int], prior:Tuple[float,float]=(0.5,0.5), prior2:Optional[Tuple[float,float]]=None, num_samples:int=1000, seed:int=8675309) -> Tuple[float,float,float]: """ Perform a Bayesian test to identify significantly different proportions. This test is based on a beta-binomial conjugate model. It uses Monte Carlo simulations to estimate the posterior of the difference between the proportions, as well as the likelihood that :math:`\pi_1 > \pi_2` (where :math:`\pi_i` is the likelihood of success in sample :math:`i`). Parameters ---------- x : typing.Tuple[int,int] The number of successes in each sample n : typing.Tuple[int,int] The number of trials in each sample prior : typing.Tuple[float,float] The parameters of the beta distribution used as the prior in the conjugate model for the first sample. prior2 : typing.Optional[typing.Tuple[float,float]] The parameters of the beta distribution used as the prior in the conjugate model for the second sample. If this is not specified, then `prior` is used. num_samples : int The number of simulations seed : int The seed for the random number generator Returns ------- difference_{mean,var} : float The posterior mean and variance of the difference in the likelihood of success in the two samples. A negative mean indicates that the likelihood in sample 2 is higher. p_pi_1_greater : float The probability that :math:`\pi_1 > \pi_2` """ # copy over the prior if not specified for sample 2 if prior2 is None: prior2 = prior # check the bounds if len(x) != 2: msg = "[bayesian_proportion_test]: please ensure x has exactly two elements" raise ValueError(msg) if len(n) != 2: msg = "[bayesian_proportion_test]: please ensure n has exactly two elements" raise ValueError(msg) if len(prior) != 2: msg = "[bayesian_proportion_test]: please ensure prior has exactly two elements" raise ValueError(msg) if len(prior2) != 2: msg = "[bayesian_proportion_test]: please ensure prior2 has exactly two elements" raise ValueError(msg) # set the seed if seed is not None: np.random.seed(seed) # perform the test a = prior[0]+x[0] b = prior[0]+n[0]-x[0] s1_posterior_samples = scipy.stats.beta.rvs(a, b, size=num_samples) a = prior[1]+x[1] b = prior[1]+n[1]-x[1] s2_posterior_samples = scipy.stats.beta.rvs(a, b, size=num_samples) diff_posterior_samples = s1_posterior_samples - s2_posterior_samples diff_posterior_mean = np.mean(diff_posterior_samples) diff_posterior_var = np.var(diff_posterior_samples) p_pi_1_greater = sum(s1_posterior_samples > s2_posterior_samples) / num_samples return diff_posterior_mean, diff_posterior_var, p_pi_1_greater
5,346,378
def _create_triangular_filterbank( all_freqs: Tensor, f_pts: Tensor, ) -> Tensor: """Create a triangular filter bank. Args: all_freqs (Tensor): STFT freq points of size (`n_freqs`). f_pts (Tensor): Filter mid points of size (`n_filter`). Returns: fb (Tensor): The filter bank of size (`n_freqs`, `n_filter`). """ # Adopted from Librosa # calculate the difference between each filter mid point and each stft freq point in hertz f_diff = f_pts[1:] - f_pts[:-1] # (n_filter + 1) slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_filter + 2) # create overlapping triangles zero = torch.zeros(1) down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_filter) up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_filter) fb = torch.max(zero, torch.min(down_slopes, up_slopes)) return fb
5,346,379
def convert_millis(track_dur_lst): """ Convert milliseconds to 00:00:00 format """ converted_track_times = [] for track_dur in track_dur_lst: seconds = (int(track_dur)/1000)%60 minutes = int(int(track_dur)/60000) hours = int(int(track_dur)/(60000*60)) converted_time = '%02d:%02d:%02d' % (hours, minutes, seconds) converted_track_times.append(converted_time) return converted_track_times
5,346,380
def start_server(use_sendfile, keep_sending=False): """A simple test server which sends a file once a client connects. use_sendfile decides whether using sendfile() or plain send(). If keep_sending is True restart sending file when EOF is reached. """ sock = socket.socket() sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((HOST, PORT)) sock.listen(1) conn, addr = sock.accept() sock.close() file = open(BIGFILE, 'rb') def on_exit(signum, fram): file.close() conn.close() sys.exit(0) signal.signal(signal.SIGTERM, on_exit) signal.signal(signal.SIGINT, on_exit) if not use_sendfile: while 1: chunk = file.read(BUFFER_SIZE) if not chunk: # EOF if keep_sending: file.seek(0) continue else: break conn.sendall(chunk) else: offset = 0 sockno = conn.fileno() fileno = file.fileno() while 1: try: sent = sendfile(sockno, fileno, offset, BUFFER_SIZE) except OSError: err = sys.exc_info()[1] if err.errno in (errno.EAGAIN, errno.EBUSY): continue raise else: if not sent: # EOF if keep_sending: offset = 0 continue else: break else: offset += sent
5,346,381
def sync_xlims(*axes): """Synchronize the x-axis data limits for multiple axes. Uses the maximum upper limit and minimum lower limit across all given axes. Parameters ---------- *axes : axis objects List of matplotlib axis objects to format Returns ------- out : yxin, xmax The computed bounds """ xmins, xmaxs = zip(*[ax.get_xlim() for ax in axes]) xmin = min(xmins) xmax = max(xmaxs) for ax in axes: ax.set_xlim(xmin, xmax) return xmin, xmax
5,346,382
def algo_config_to_class(algo_config): """ Maps algo config to the IRIS algo class to instantiate, along with additional algo kwargs. Args: algo_config (Config instance): algo config Returns: algo_class: subclass of Algo algo_kwargs (dict): dictionary of additional kwargs to pass to algorithm """ pol_cls, _ = algo_name_to_factory_func("bc")(algo_config.actor) plan_cls, _ = algo_name_to_factory_func("gl")(algo_config.value_planner.planner) value_cls, _ = algo_name_to_factory_func("bcq")(algo_config.value_planner.value) return IRIS, dict(policy_algo_class=pol_cls, planner_algo_class=plan_cls, value_algo_class=value_cls)
5,346,383
def group_by_key(dirnames, key): """Group a set of output directories according to a model parameter. Parameters ---------- dirnames: list[str] Output directories key: various A field of a :class:`Model` instance. Returns ------- groups: dict[various: list[str]] For each value of `key` that is found at least once in the models, a list of the output directories where `key` is that value. """ groups = defaultdict(lambda: []) for dirname in dirnames: m = get_recent_model(dirname) groups[m.__dict__[key]].append(dirname) return dict(groups)
5,346,384
def test_transform(): """2D and 3D""" WGS84_crs = {'init': 'EPSG:4326'} WGS84_points = ([12.492269], [41.890169], [48.]) ECEF_crs = {'init': 'EPSG:4978'} ECEF_points = ([4642610.], [1028584.], [4236562.]) ECEF_result = transform(WGS84_crs, ECEF_crs, *WGS84_points) assert numpy.allclose(numpy.array(ECEF_result), numpy.array(ECEF_points)) UTM33_crs = {'init': 'EPSG:32633'} UTM33_points = ([291952], [4640623]) UTM33_result = transform(WGS84_crs, UTM33_crs, *WGS84_points[:2]) assert numpy.allclose(numpy.array(UTM33_result), numpy.array(UTM33_points))
5,346,385
def redistrict_grouped(df, kind, group_cols, district_col=None, value_cols=None, **kwargs): """Redistrict dataframe by groups Args: df (pandas.DataFrame): input dataframe kind (string): identifier of redistrict info (e.g. de/kreise) group_cols (list): List of column names to group by district_col (string): Name of district column value_cols (list): List of column names with values to operate on **kwargs: see redistrict function Returns: pandas.Dataframe: Redistricted dataframe """ return pd.concat(redistrict_grouped_dataframe(df, kind, group_cols, district_col=district_col, value_cols=value_cols, **kwargs))
5,346,386
def count_wraps_rand( nr_parties: int, shape: Tuple[int] ) -> Tuple[List[ShareTensor], List[ShareTensor]]: """Count wraps random. The Trusted Third Party (TTP) or Crypto provider should generate: - a set of shares for a random number - a set of shares for the number of wraparounds for that number Those shares are used when doing a public division, such that the end result would be the correct one. Args: nr_parties (int): Number of parties shape (Tuple[int]): The shape for the random value Returns: List[List[List[ShareTensor, ShareTensor]]: a list of instaces with the shares for a random integer value and shares for the number of wraparounds that are done when reconstructing the random value """ rand_val = torch.empty(size=shape, dtype=torch.long).random_( generator=ttp_generator ) r_shares = MPCTensor.generate_shares( secret=rand_val, nr_parties=nr_parties, tensor_type=torch.long, encoder_precision=0, ) wraps = count_wraps([share.tensor for share in r_shares]) theta_r_shares = MPCTensor.generate_shares( secret=wraps, nr_parties=nr_parties, tensor_type=torch.long, encoder_precision=0 ) # We are always creating only an instance primitives_sequential = [(r_shares, theta_r_shares)] primitives = list( map(list, zip(*map(lambda x: map(list, zip(*x)), primitives_sequential))) ) return primitives
5,346,387
def update_processing_with_collection_contents(updated_processing, new_processing=None, updated_collection=None, updated_files=None, new_files=None, coll_msg_content=None, file_msg_content=None, transform_updates=None, message_bulk_size=1000, session=None): """ Update processing with collection, contents, file messages and collection messages. :param updated_processing: dict with processing id and parameters. :param updated_collection: dict with collection id and parameters. :param updated_files: list of content files. :param coll_msg_content: message with collection info. :param file_msg_content: message with files info. """ if updated_files: orm_contents.update_contents(updated_files, session=session) if new_files: orm_contents.add_contents(contents=new_files, session=session) if file_msg_content: if not type(file_msg_content) in [list, tuple]: file_msg_content = [file_msg_content] for file_msg_con in file_msg_content: orm_messages.add_message(msg_type=file_msg_con['msg_type'], status=file_msg_con['status'], source=file_msg_con['source'], transform_id=file_msg_con['transform_id'], num_contents=file_msg_con['num_contents'], msg_content=file_msg_con['msg_content'], bulk_size=message_bulk_size, session=session) if updated_collection: orm_collections.update_collection(coll_id=updated_collection['coll_id'], parameters=updated_collection['parameters'], session=session) if coll_msg_content: orm_messages.add_message(msg_type=coll_msg_content['msg_type'], status=coll_msg_content['status'], source=coll_msg_content['source'], transform_id=coll_msg_content['transform_id'], num_contents=coll_msg_content['num_contents'], msg_content=coll_msg_content['msg_content'], session=session) if updated_processing: orm_processings.update_processing(processing_id=updated_processing['processing_id'], parameters=updated_processing['parameters'], session=session) if new_processing: orm_processings.add_processing(**new_processing, session=session) if transform_updates: orm_transforms.update_transform(transform_id=transform_updates['transform_id'], parameters=transform_updates['parameters'], session=session)
5,346,388
def text_sim( sc1: Sequence, sc2: Sequence, ) -> float: """Returns the Text_Sim similarity measure between two pitch class sets. """ sc1 = prime_form(sc1) sc2 = prime_form(sc2) corpus = [text_set_class(x) for x in sorted(allClasses)] vectorizer = TfidfVectorizer() trsfm = vectorizer.fit_transform(corpus) text_similarity = cosine_similarity(trsfm) names = [str(x) for x in sorted(allClasses)] df = pd.DataFrame(text_similarity.round(3), columns=names, index=names) return df[str(sc1)][str(sc2)]
5,346,389
def _feature_properties(feature, layer_definition, whitelist=None, skip_empty_fields=False): """ Returns a dictionary of feature properties for a feature in a layer. Third argument is an optional list or dictionary of properties to whitelist by case-sensitive name - leave it None to include everything. A dictionary will cause property names to be re-mapped. OGR property types: OFTInteger (0), OFTIntegerList (1), OFTReal (2), OFTRealList (3), OFTString (4), OFTStringList (5), OFTWideString (6), OFTWideStringList (7), OFTBinary (8), OFTDate (9), OFTTime (10), OFTDateTime (11). Extra OGR types for GDAL 2.x: OFTInteger64 (12), OFTInteger64List (13) """ properties = {} okay_types = [ogr.OFTInteger, ogr.OFTReal, ogr.OFTString, ogr.OFTWideString, ogr.OFTDate, ogr.OFTTime, ogr.OFTDateTime] if hasattr(ogr, 'OFTInteger64'): okay_types.extend([ogr.OFTInteger64, ogr.OFTInteger64List]) for index in range(layer_definition.GetFieldCount()): field_definition = layer_definition.GetFieldDefn(index) field_type = field_definition.GetType() name = field_definition.GetNameRef() if type(whitelist) in (list, dict) and name not in whitelist: continue if field_type not in okay_types: try: name = [oft for oft in dir(ogr) if oft.startswith('OFT') and getattr(ogr, oft) == field_type][0] except IndexError: raise KnownUnknown("Found an OGR field type I've never even seen: %d" % field_type) else: raise KnownUnknown("Found an OGR field type I don't know what to do with: ogr.%s" % name) if not skip_empty_fields or feature.IsFieldSet(name): property = type(whitelist) is dict and whitelist[name] or name properties[property] = feature.GetField(name) return properties
5,346,390
def reverse_search(view, what, start=0, end=-1, flags=0): """Do binary search to find `what` walking backwards in the buffer. """ if end == -1: end = view.size() end = find_eol(view, view.line(end).a) last_match = None lo, hi = start, end while True: middle = (lo + hi) / 2 line = view.line(middle) middle, eol = find_bol(view, line.a), find_eol(view, line.a) if search_in_range(view, what, middle, hi, flags): lo = middle elif search_in_range(view, what, lo, middle - 1, flags): hi = middle -1 else: return calculate_relative_ref(view, '.') # Don't search forever the same line. if last_match and line.contains(last_match): match = find_last_match(view, what, lo, hi, flags=flags) return view.rowcol(match.begin())[0] + 1 last_match = sublime.Region(line.begin(), line.end())
5,346,391
def formatLookupLigatureSubstitution(lookup, lookupList, makeName=makeName): """ GSUB LookupType 4 """ # substitute <glyph sequence> by <glyph>; # <glyph sequence> must contain two or more of <glyph|glyphclass>. For example: # substitute [one one.oldstyle] [slash fraction] [two two.oldstyle] by onehalf; lines = list(filter(None, [ formatLookupflag(lookup, makeName=makeName) ])) \ + ['sub {0} {1} by {2};'.format(first, ' '.join(lig.Component), lig.LigGlyph) for sub in lookup.SubTable for first, ligatures in sub.ligatures.items() for lig in ligatures] return (True, lines)
5,346,392
def convert_raw2nc(path2rawfolder = '/nfs/grad/gradobs/raw/mlo/2020/', path2netcdf = '/mnt/telg/data/baseline/mlo/2020/', # database = None, start_date = '2020-02-06', pattern = '*sp02.*', sernos = [1032, 1046], site = 'mlo', overwrite = False, verbose = False, raise_error = True, test = False): """ Parameters ---------- path2rawfolder : TYPE, optional DESCRIPTION. The default is '/nfs/grad/gradobs/raw/mlo/2020/'. path2netcdf : TYPE, optional DESCRIPTION. The default is '/mnt/telg/data/baseline/mlo/2020/'. # database : TYPE, optional DESCRIPTION. The default is None. start_date : TYPE, optional DESCRIPTION. The default is '2020-02-06'. pattern : str, optional Only files with this pattern are considered. In newer raw data versions this would be '*sp02.*'. In older ones: 'MLOD*' sernos : TYPE, optional DESCRIPTION. The default is [1032, 1046]. overwrite : TYPE, optional DESCRIPTION. The default is False. verbose : TYPE, optional DESCRIPTION. The default is False. test : TYPE, optional If True only one file is processed. The default is False. Returns ------- None. """ # lines = get_lines_from_station_header() path2rawfolder = pathlib.Path(path2rawfolder) path2netcdf = pathlib.Path(path2netcdf) try: path2netcdf.mkdir(exist_ok=True) except FileNotFoundError: path2netcdf.parent.mkdir() path2netcdf.mkdir() file_list = list(path2rawfolder.glob(pattern)) # print(len(file_list)) # file_contents = [] # return file_list df_in = pd.DataFrame(file_list, columns=['path_in']) # test what format, old or new. p2f = file_list[0] nl = p2f.name.split('.') if len(nl) == 2: # old format like /nfs/grad/gradobs/raw/mlo/2013/sp02/MLOD013A.113 # get year from path def path2date(path2file): year = path2file.parent.parent.name jul = int(''.join(filter(str.isdigit, path2file.name.split('.')[0]))) date = pd.to_datetime(year) + pd.to_timedelta(jul-1, 'd') return date # df_in.index = df_in.path_in.apply(lambda x: pd.to_datetime(year) + pd.to_timedelta((int(''.join(filter(str.isdigit, x.name.split('.')[0]))))-1, 'd')) else: # new format: gradobs.mlo-sp02.20200126.raw.dat # df_in.index = df_in.path_in.apply(lambda x: pd.to_datetime(x.name.split('.')[2])) path2date = lambda x: pd.to_datetime(x.name.split('.')[2]) # set index based on format df_in.index = df_in.path_in.apply(path2date) df_in.sort_index(inplace=True) df_in = df_in.truncate(before=start_date) df_out = pd.DataFrame(columns=['path_out']) # generate output path for sn in sernos: for idx, row in df_in.iterrows(): # fnnc = row.path_in.name.replace('.dat','.nc') # fnnc = fnnc.replace('-sp02', '.sp02') # fnns = fnnc.split('.') # fnns = fnns[:3] + [f'sn{str(sn)}'] + fnns[3:] # fnnc = '.'.join(fnns) # path2netcdf_file = path2netcdf.joinpath(fnnc) date = idx fnnc = f'gradobs.mlo.sp02.sn{sn}.{date.year}{date.month:02d}{date.day:02d}.raw.nc' path2netcdf_file = path2netcdf.joinpath(fnnc) df_add = pd.DataFrame({'path_in': row.path_in, 'path_out':path2netcdf_file}, index = [idx] # ignore_index=True ) df_out = df_out.append(df_add) # check if file exists. Process only those that do not exist df_out['exists'] = df_out.path_out.apply(lambda x: x.is_file()) df_work = df_out[~df_out.exists] # return df_work ### bsts work_array = df_work.path_in.unique() print(f'No of files that need to be processed: {len(work_array)}') # exists = 0 # new = 0 for e, file in enumerate(work_array): # if e == 3: break # ds = read_file(file, lines) df_sel = df_work[df_work.path_in == file] try: dslist = read_file(file, database = database, site = site) except IndexError: if raise_error: raise else: print('Instrument not installed ... skip', end = '...') if test: return {'file': file, 'database': database} else: continue ### generate output file name # processing for ds in dslist: # fnnc = file.name.replace('.dat','.nc') # fnnc = fnnc.replace('-sp02', '.sp02') # fnns = fnnc.split('.') # fnns = fnns[:3] + [f'sn{str(ds.serial_no.values)}'] + fnns[3:] # fnnc = '.'.join(fnns) # path2netcdf_file = path2netcdf.joinpath(fnnc) # check which of the output files is the right ... still, i am not convinced this is the most elegant way to do this.... add the lineno in the work table? sn = str(ds.serial_no.values) try: path2netcdf_file = [p2fo for p2fo in df_sel.path_out.values if sn in p2fo.name][0] except IndexError: assert(False), 'This Error is usually caused because one of the netcdf files (for a serial number) is deleted, but not the other.' # save to file ds.to_netcdf(path2netcdf_file) if test: break # out = dict(processed = new, # skipped = exists, # last_ds_list = dslist) if not test: df_out['exists'] = df_out.path_out.apply(lambda x: x.is_file()) df_work = df_out[~df_out.exists] work_array = df_work.path_in.unique() assert(df_work.shape[0] == 0), f'df_work should be empty at the end. Still has {df_work.shape[0]} entries.' return
5,346,393
def zenith_simple_env(_shared_simple_env: ZenithEnv) -> Iterator[ZenithEnv]: """ Simple Zenith environment, with no authentication and no safekeepers. If TEST_SHARED_FIXTURES environment variable is set, we reuse the same environment for all tests that use 'zenith_simple_env', keeping the page server and safekeepers running. Any compute nodes are stopped after each the test, however. """ yield _shared_simple_env _shared_simple_env.postgres.stop_all()
5,346,394
def get_curricula(course_url, year): """Encodes the available curricula for a given course in a given year in a vaguely sane format Dictionary fields: - constant.CODEFLD: curriculum code as used in JSON requests - constant.NAMEFLD: human-readable curriculum name""" curricula = [] curricula_req_url = constant.CURRICULAURLFORMAT[get_course_lang(course_url)].format(course_url, year) for curr in requests.get(curricula_req_url).json(): curricula.append({constant.CODEFLD: curr[constant.CURRVAL], constant.NAMEFLD: curr[constant.CURRNAME]}) return curricula
5,346,395
def conv3x3(in_planes, out_planes, stride=1, groups=1): """3x3 conv with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)
5,346,396
def extract_facility_data(inventory_dict): """ Returns df of facilities from each inventory in inventory_dict, including FIPS code :param inventory_dict: a dictionary of inventory types and years (e.g., {'NEI':'2017', 'TRI':'2017'}) :return: df """ import stewi facility_mapping = pd.DataFrame() # load facility data from stewi output directory, keeping only the facility IDs, # and geographic information inventory_list = list(inventory_dict.keys()) for i in range(len(inventory_dict)): # define inventory name as inventory type + inventory year (e.g., NEI_2017) database = inventory_list[i] year = list(inventory_dict.values())[i] inventory_name = database + '_' + year facilities = stewi.getInventoryFacilities(database, year) facilities = facilities[['FacilityID', 'State', 'County', 'NAICS']] if len(facilities[facilities.duplicated(subset='FacilityID', keep=False)]) > 0: log.debug('Duplicate facilities in %s - keeping first listed', inventory_name) facilities.drop_duplicates(subset='FacilityID', keep='first', inplace=True) facility_mapping = facility_mapping.append(facilities) # Apply FIPS to facility locations facility_mapping = apply_county_FIPS(facility_mapping) return facility_mapping
5,346,397
def test_log_interp_units(): """Test interpolating with log x-scale with units.""" x_log = np.array([1e3, 1e4, 1e5, 1e6]) * units.hPa y_log = (np.log(x_log.m) * 2 + 3) * units.degC x_interp = np.array([5e5, 5e6, 5e7]) * units.Pa y_interp_truth = np.array([20.0343863828, 24.6395565688, 29.2447267548]) * units.degC y_interp = log_interp(x_interp, x_log, y_log) assert_array_almost_equal(y_interp, y_interp_truth, 7)
5,346,398
def post_five_days_weather_data(message, option, city_name): """ 指定された都市に関する5日間の天気予報を表示する。 コマンド: "tenki [-5 cityname|--five cityname]" """ post_message = tenkibot_service.make_5_days_weather_message(city_name) message.send('{}'.format(post_message))
5,346,399